2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 * $FreeBSD: head/sys/dev/drm2/radeon/evergreen_cs.c 254885 2013-08-25 19:37:15Z dumbbell $
33 #include "radeon_asic.h"
34 #include "evergreend.h"
35 #include "evergreen_reg_safe.h"
36 #include "cayman_reg_safe.h"
39 #define MAX(a,b) (((a)>(b))?(a):(b))
40 #define MIN(a,b) (((a)<(b))?(a):(b))
42 struct evergreen_cs_track {
48 u32 nsamples; /* unused */
49 struct radeon_bo *cb_color_bo[12];
50 u32 cb_color_bo_offset[12];
51 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
52 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
53 u32 cb_color_info[12];
54 u32 cb_color_view[12];
55 u32 cb_color_pitch[12];
56 u32 cb_color_slice[12];
57 u32 cb_color_slice_idx[12];
58 u32 cb_color_attrib[12];
59 u32 cb_color_cmask_slice[8];/* unused */
60 u32 cb_color_fmask_slice[8];/* unused */
62 u32 cb_shader_mask; /* unused */
63 u32 vgt_strmout_config;
64 u32 vgt_strmout_buffer_config;
65 struct radeon_bo *vgt_strmout_bo[4];
66 u32 vgt_strmout_bo_offset[4];
67 u32 vgt_strmout_size[4];
74 u32 db_z_write_offset;
75 struct radeon_bo *db_z_read_bo;
76 struct radeon_bo *db_z_write_bo;
79 u32 db_s_write_offset;
80 struct radeon_bo *db_s_read_bo;
81 struct radeon_bo *db_s_write_bo;
82 bool sx_misc_kill_all_prims;
88 struct radeon_bo *htile_bo;
91 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
93 if (tiling_flags & RADEON_TILING_MACRO)
94 return ARRAY_2D_TILED_THIN1;
95 else if (tiling_flags & RADEON_TILING_MICRO)
96 return ARRAY_1D_TILED_THIN1;
98 return ARRAY_LINEAR_GENERAL;
101 static u32 evergreen_cs_get_num_banks(u32 nbanks)
105 return ADDR_SURF_2_BANK;
107 return ADDR_SURF_4_BANK;
110 return ADDR_SURF_8_BANK;
112 return ADDR_SURF_16_BANK;
116 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
120 for (i = 0; i < 8; i++) {
121 track->cb_color_fmask_bo[i] = NULL;
122 track->cb_color_cmask_bo[i] = NULL;
123 track->cb_color_cmask_slice[i] = 0;
124 track->cb_color_fmask_slice[i] = 0;
127 for (i = 0; i < 12; i++) {
128 track->cb_color_bo[i] = NULL;
129 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
130 track->cb_color_info[i] = 0;
131 track->cb_color_view[i] = 0xFFFFFFFF;
132 track->cb_color_pitch[i] = 0;
133 track->cb_color_slice[i] = 0xfffffff;
134 track->cb_color_slice_idx[i] = 0;
136 track->cb_target_mask = 0xFFFFFFFF;
137 track->cb_shader_mask = 0xFFFFFFFF;
138 track->cb_dirty = true;
140 track->db_depth_slice = 0xffffffff;
141 track->db_depth_view = 0xFFFFC000;
142 track->db_depth_size = 0xFFFFFFFF;
143 track->db_depth_control = 0xFFFFFFFF;
144 track->db_z_info = 0xFFFFFFFF;
145 track->db_z_read_offset = 0xFFFFFFFF;
146 track->db_z_write_offset = 0xFFFFFFFF;
147 track->db_z_read_bo = NULL;
148 track->db_z_write_bo = NULL;
149 track->db_s_info = 0xFFFFFFFF;
150 track->db_s_read_offset = 0xFFFFFFFF;
151 track->db_s_write_offset = 0xFFFFFFFF;
152 track->db_s_read_bo = NULL;
153 track->db_s_write_bo = NULL;
154 track->db_dirty = true;
155 track->htile_bo = NULL;
156 track->htile_offset = 0xFFFFFFFF;
157 track->htile_surface = 0;
159 for (i = 0; i < 4; i++) {
160 track->vgt_strmout_size[i] = 0;
161 track->vgt_strmout_bo[i] = NULL;
162 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
164 track->streamout_dirty = true;
165 track->sx_misc_kill_all_prims = false;
169 /* value gathered from cs */
185 unsigned long base_align;
188 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
189 struct eg_surface *surf,
192 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
193 surf->base_align = surf->bpe;
199 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
200 struct eg_surface *surf,
203 struct evergreen_cs_track *track = p->track;
206 palign = MAX(64, track->group_size / surf->bpe);
207 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
208 surf->base_align = track->group_size;
209 surf->palign = palign;
211 if (surf->nbx & (palign - 1)) {
213 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
214 __func__, __LINE__, prefix, surf->nbx, palign);
221 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
222 struct eg_surface *surf,
225 struct evergreen_cs_track *track = p->track;
228 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
229 palign = MAX(8, palign);
230 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
231 surf->base_align = track->group_size;
232 surf->palign = palign;
234 if ((surf->nbx & (palign - 1))) {
236 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
237 __func__, __LINE__, prefix, surf->nbx, palign,
238 track->group_size, surf->bpe, surf->nsamples);
242 if ((surf->nby & (8 - 1))) {
244 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
245 __func__, __LINE__, prefix, surf->nby);
252 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
253 struct eg_surface *surf,
256 struct evergreen_cs_track *track = p->track;
257 unsigned palign, halign, tileb, slice_pt;
258 unsigned mtile_pr, mtile_ps, mtileb;
260 tileb = 64 * surf->bpe * surf->nsamples;
262 if (tileb > surf->tsplit) {
263 slice_pt = tileb / surf->tsplit;
265 tileb = tileb / slice_pt;
266 /* macro tile width & height */
267 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
268 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
269 mtileb = (palign / 8) * (halign / 8) * tileb;
270 mtile_pr = surf->nbx / palign;
271 mtile_ps = (mtile_pr * surf->nby) / halign;
272 surf->layer_size = mtile_ps * mtileb * slice_pt;
273 surf->base_align = (palign / 8) * (halign / 8) * tileb;
274 surf->palign = palign;
275 surf->halign = halign;
277 if ((surf->nbx & (palign - 1))) {
279 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
280 __func__, __LINE__, prefix, surf->nbx, palign);
284 if ((surf->nby & (halign - 1))) {
286 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
287 __func__, __LINE__, prefix, surf->nby, halign);
295 static int evergreen_surface_check(struct radeon_cs_parser *p,
296 struct eg_surface *surf,
299 /* some common value computed here */
300 surf->bpe = r600_fmt_get_blocksize(surf->format);
302 switch (surf->mode) {
303 case ARRAY_LINEAR_GENERAL:
304 return evergreen_surface_check_linear(p, surf, prefix);
305 case ARRAY_LINEAR_ALIGNED:
306 return evergreen_surface_check_linear_aligned(p, surf, prefix);
307 case ARRAY_1D_TILED_THIN1:
308 return evergreen_surface_check_1d(p, surf, prefix);
309 case ARRAY_2D_TILED_THIN1:
310 return evergreen_surface_check_2d(p, surf, prefix);
312 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
313 __func__, __LINE__, prefix, surf->mode);
319 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
320 struct eg_surface *surf,
323 switch (surf->mode) {
324 case ARRAY_2D_TILED_THIN1:
326 case ARRAY_LINEAR_GENERAL:
327 case ARRAY_LINEAR_ALIGNED:
328 case ARRAY_1D_TILED_THIN1:
331 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
332 __func__, __LINE__, prefix, surf->mode);
336 switch (surf->nbanks) {
337 case 0: surf->nbanks = 2; break;
338 case 1: surf->nbanks = 4; break;
339 case 2: surf->nbanks = 8; break;
340 case 3: surf->nbanks = 16; break;
342 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
343 __func__, __LINE__, prefix, surf->nbanks);
346 switch (surf->bankw) {
347 case 0: surf->bankw = 1; break;
348 case 1: surf->bankw = 2; break;
349 case 2: surf->bankw = 4; break;
350 case 3: surf->bankw = 8; break;
352 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
353 __func__, __LINE__, prefix, surf->bankw);
356 switch (surf->bankh) {
357 case 0: surf->bankh = 1; break;
358 case 1: surf->bankh = 2; break;
359 case 2: surf->bankh = 4; break;
360 case 3: surf->bankh = 8; break;
362 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
363 __func__, __LINE__, prefix, surf->bankh);
366 switch (surf->mtilea) {
367 case 0: surf->mtilea = 1; break;
368 case 1: surf->mtilea = 2; break;
369 case 2: surf->mtilea = 4; break;
370 case 3: surf->mtilea = 8; break;
372 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
373 __func__, __LINE__, prefix, surf->mtilea);
376 switch (surf->tsplit) {
377 case 0: surf->tsplit = 64; break;
378 case 1: surf->tsplit = 128; break;
379 case 2: surf->tsplit = 256; break;
380 case 3: surf->tsplit = 512; break;
381 case 4: surf->tsplit = 1024; break;
382 case 5: surf->tsplit = 2048; break;
383 case 6: surf->tsplit = 4096; break;
385 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
386 __func__, __LINE__, prefix, surf->tsplit);
392 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
394 struct evergreen_cs_track *track = p->track;
395 struct eg_surface surf;
396 unsigned pitch, slice, mslice;
397 unsigned long offset;
400 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
401 pitch = track->cb_color_pitch[id];
402 slice = track->cb_color_slice[id];
403 surf.nbx = (pitch + 1) * 8;
404 surf.nby = ((slice + 1) * 64) / surf.nbx;
405 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
406 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
407 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
408 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
409 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
410 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
411 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
414 if (!r600_fmt_is_valid_color(surf.format)) {
415 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
416 __func__, __LINE__, surf.format,
417 id, track->cb_color_info[id]);
421 r = evergreen_surface_value_conv_check(p, &surf, "cb");
426 r = evergreen_surface_check(p, &surf, "cb");
428 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
429 __func__, __LINE__, id, track->cb_color_pitch[id],
430 track->cb_color_slice[id], track->cb_color_attrib[id],
431 track->cb_color_info[id]);
435 offset = track->cb_color_bo_offset[id] << 8;
436 if (offset & (surf.base_align - 1)) {
437 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
438 __func__, __LINE__, id, offset, surf.base_align);
442 offset += surf.layer_size * mslice;
443 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
444 /* old ddx are broken they allocate bo with w*h*bpp but
445 * program slice with ALIGN(h, 8), catch this and patch
449 volatile u32 *ib = p->ib.ptr;
450 unsigned long tmp, nby, bsize, size, min = 0;
452 /* find the height the ddx wants */
456 bsize = radeon_bo_size(track->cb_color_bo[id]);
457 tmp = track->cb_color_bo_offset[id] << 8;
458 for (nby = surf.nby; nby > min; nby--) {
459 size = nby * surf.nbx * surf.bpe * surf.nsamples;
460 if ((tmp + size * mslice) <= bsize) {
466 slice = ((nby * surf.nbx) / 64) - 1;
467 if (!evergreen_surface_check(p, &surf, "cb")) {
468 /* check if this one works */
469 tmp += surf.layer_size * mslice;
471 ib[track->cb_color_slice_idx[id]] = slice;
477 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
478 "offset %d, max layer %d, bo size %ld, slice %d)\n",
479 __func__, __LINE__, id, surf.layer_size,
480 track->cb_color_bo_offset[id] << 8, mslice,
481 radeon_bo_size(track->cb_color_bo[id]), slice);
482 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
483 __func__, __LINE__, surf.nbx, surf.nby,
484 surf.mode, surf.bpe, surf.nsamples,
485 surf.bankw, surf.bankh,
486 surf.tsplit, surf.mtilea);
494 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
495 unsigned nbx, unsigned nby)
497 struct evergreen_cs_track *track = p->track;
500 if (track->htile_bo == NULL) {
501 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
502 __func__, __LINE__, track->db_z_info);
506 if (G_028ABC_LINEAR(track->htile_surface)) {
507 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
508 nbx = roundup(nbx, 16 * 8);
509 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
510 nby = roundup(nby, track->npipes * 8);
512 /* always assume 8x8 htile */
513 /* align is htile align * 8, htile align vary according to
514 * number of pipe and tile width and nby
516 switch (track->npipes) {
518 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
519 nbx = roundup(nbx, 64 * 8);
520 nby = roundup(nby, 64 * 8);
523 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
524 nbx = roundup(nbx, 64 * 8);
525 nby = roundup(nby, 32 * 8);
528 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
529 nbx = roundup(nbx, 32 * 8);
530 nby = roundup(nby, 32 * 8);
533 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
534 nbx = roundup(nbx, 32 * 8);
535 nby = roundup(nby, 16 * 8);
538 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
539 __func__, __LINE__, track->npipes);
543 /* compute number of htile */
546 /* size must be aligned on npipes * 2K boundary */
547 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
548 size += track->htile_offset;
550 if (size > radeon_bo_size(track->htile_bo)) {
551 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
552 __func__, __LINE__, radeon_bo_size(track->htile_bo),
559 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
561 struct evergreen_cs_track *track = p->track;
562 struct eg_surface surf;
563 unsigned pitch, slice, mslice;
564 unsigned long offset;
567 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
568 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
569 slice = track->db_depth_slice;
570 surf.nbx = (pitch + 1) * 8;
571 surf.nby = ((slice + 1) * 64) / surf.nbx;
572 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
573 surf.format = G_028044_FORMAT(track->db_s_info);
574 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
575 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
576 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
577 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
578 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
581 if (surf.format != 1) {
582 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
583 __func__, __LINE__, surf.format);
586 /* replace by color format so we can use same code */
587 surf.format = V_028C70_COLOR_8;
589 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
594 r = evergreen_surface_check(p, &surf, NULL);
596 /* old userspace doesn't compute proper depth/stencil alignment
597 * check that alignment against a bigger byte per elements and
598 * only report if that alignment is wrong too.
600 surf.format = V_028C70_COLOR_8_8_8_8;
601 r = evergreen_surface_check(p, &surf, "stencil");
603 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
604 __func__, __LINE__, track->db_depth_size,
605 track->db_depth_slice, track->db_s_info, track->db_z_info);
610 offset = track->db_s_read_offset << 8;
611 if (offset & (surf.base_align - 1)) {
612 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
613 __func__, __LINE__, offset, surf.base_align);
616 offset += surf.layer_size * mslice;
617 if (offset > radeon_bo_size(track->db_s_read_bo)) {
618 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
619 "offset %ld, max layer %d, bo size %ld)\n",
620 __func__, __LINE__, surf.layer_size,
621 (unsigned long)track->db_s_read_offset << 8, mslice,
622 radeon_bo_size(track->db_s_read_bo));
623 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
624 __func__, __LINE__, track->db_depth_size,
625 track->db_depth_slice, track->db_s_info, track->db_z_info);
629 offset = track->db_s_write_offset << 8;
630 if (offset & (surf.base_align - 1)) {
631 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
632 __func__, __LINE__, offset, surf.base_align);
635 offset += surf.layer_size * mslice;
636 if (offset > radeon_bo_size(track->db_s_write_bo)) {
637 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
638 "offset %ld, max layer %d, bo size %ld)\n",
639 __func__, __LINE__, surf.layer_size,
640 (unsigned long)track->db_s_write_offset << 8, mslice,
641 radeon_bo_size(track->db_s_write_bo));
646 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
647 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
656 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
658 struct evergreen_cs_track *track = p->track;
659 struct eg_surface surf;
660 unsigned pitch, slice, mslice;
661 unsigned long offset;
664 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
665 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
666 slice = track->db_depth_slice;
667 surf.nbx = (pitch + 1) * 8;
668 surf.nby = ((slice + 1) * 64) / surf.nbx;
669 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
670 surf.format = G_028040_FORMAT(track->db_z_info);
671 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
672 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
673 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
674 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
675 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
678 switch (surf.format) {
680 surf.format = V_028C70_COLOR_16;
683 case V_028040_Z_32_FLOAT:
684 surf.format = V_028C70_COLOR_8_8_8_8;
687 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
688 __func__, __LINE__, surf.format);
692 r = evergreen_surface_value_conv_check(p, &surf, "depth");
694 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
695 __func__, __LINE__, track->db_depth_size,
696 track->db_depth_slice, track->db_z_info);
700 r = evergreen_surface_check(p, &surf, "depth");
702 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
703 __func__, __LINE__, track->db_depth_size,
704 track->db_depth_slice, track->db_z_info);
708 offset = track->db_z_read_offset << 8;
709 if (offset & (surf.base_align - 1)) {
710 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
711 __func__, __LINE__, offset, surf.base_align);
714 offset += surf.layer_size * mslice;
715 if (offset > radeon_bo_size(track->db_z_read_bo)) {
716 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
717 "offset %ld, max layer %d, bo size %ld)\n",
718 __func__, __LINE__, surf.layer_size,
719 (unsigned long)track->db_z_read_offset << 8, mslice,
720 radeon_bo_size(track->db_z_read_bo));
724 offset = track->db_z_write_offset << 8;
725 if (offset & (surf.base_align - 1)) {
726 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
727 __func__, __LINE__, offset, surf.base_align);
730 offset += surf.layer_size * mslice;
731 if (offset > radeon_bo_size(track->db_z_write_bo)) {
732 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
733 "offset %ld, max layer %d, bo size %ld)\n",
734 __func__, __LINE__, surf.layer_size,
735 (unsigned long)track->db_z_write_offset << 8, mslice,
736 radeon_bo_size(track->db_z_write_bo));
741 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
742 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
751 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
752 struct radeon_bo *texture,
753 struct radeon_bo *mipmap,
756 struct eg_surface surf;
757 unsigned long toffset, moffset;
758 unsigned dim, llevel, mslice, width, height, depth, i;
762 texdw[0] = radeon_get_ib_value(p, idx + 0);
763 texdw[1] = radeon_get_ib_value(p, idx + 1);
764 texdw[2] = radeon_get_ib_value(p, idx + 2);
765 texdw[3] = radeon_get_ib_value(p, idx + 3);
766 texdw[4] = radeon_get_ib_value(p, idx + 4);
767 texdw[5] = radeon_get_ib_value(p, idx + 5);
768 texdw[6] = radeon_get_ib_value(p, idx + 6);
769 texdw[7] = radeon_get_ib_value(p, idx + 7);
770 dim = G_030000_DIM(texdw[0]);
771 llevel = G_030014_LAST_LEVEL(texdw[5]);
772 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
773 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
774 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
775 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
776 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
777 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
778 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
779 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
780 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
781 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
782 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
783 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
784 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
785 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
787 toffset = texdw[2] << 8;
788 moffset = texdw[3] << 8;
790 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
791 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
792 __func__, __LINE__, surf.format);
796 case V_030000_SQ_TEX_DIM_1D:
797 case V_030000_SQ_TEX_DIM_2D:
798 case V_030000_SQ_TEX_DIM_CUBEMAP:
799 case V_030000_SQ_TEX_DIM_1D_ARRAY:
800 case V_030000_SQ_TEX_DIM_2D_ARRAY:
803 case V_030000_SQ_TEX_DIM_2D_MSAA:
804 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
805 surf.nsamples = 1 << llevel;
809 case V_030000_SQ_TEX_DIM_3D:
812 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
813 __func__, __LINE__, dim);
817 r = evergreen_surface_value_conv_check(p, &surf, "texture");
823 evergreen_surface_check(p, &surf, NULL);
824 surf.nby = roundup(surf.nby, surf.halign);
826 r = evergreen_surface_check(p, &surf, "texture");
828 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
829 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
830 texdw[5], texdw[6], texdw[7]);
834 /* check texture size */
835 if (toffset & (surf.base_align - 1)) {
836 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
837 __func__, __LINE__, toffset, surf.base_align);
840 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
841 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
842 __func__, __LINE__, moffset, surf.base_align);
845 if (dim == SQ_TEX_DIM_3D) {
846 toffset += surf.layer_size * depth;
848 toffset += surf.layer_size * mslice;
850 if (toffset > radeon_bo_size(texture)) {
851 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
852 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
853 __func__, __LINE__, surf.layer_size,
854 (unsigned long)texdw[2] << 8, mslice,
855 depth, radeon_bo_size(texture),
862 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
866 return 0; /* everything's ok */
870 /* check mipmap size */
871 for (i = 1; i <= llevel; i++) {
874 w = r600_mip_minify(width, i);
875 h = r600_mip_minify(height, i);
876 d = r600_mip_minify(depth, i);
877 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
878 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
881 case ARRAY_2D_TILED_THIN1:
882 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
883 surf.mode = ARRAY_1D_TILED_THIN1;
885 /* recompute alignment */
886 evergreen_surface_check(p, &surf, NULL);
888 case ARRAY_LINEAR_GENERAL:
889 case ARRAY_LINEAR_ALIGNED:
890 case ARRAY_1D_TILED_THIN1:
893 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
894 __func__, __LINE__, surf.mode);
897 surf.nbx = roundup(surf.nbx, surf.palign);
898 surf.nby = roundup(surf.nby, surf.halign);
900 r = evergreen_surface_check(p, &surf, "mipmap");
905 if (dim == SQ_TEX_DIM_3D) {
906 moffset += surf.layer_size * d;
908 moffset += surf.layer_size * mslice;
910 if (moffset > radeon_bo_size(mipmap)) {
911 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
912 "offset %ld, coffset %ld, max layer %d, depth %d, "
913 "bo size %ld) level0 (%d %d %d)\n",
914 __func__, __LINE__, i, surf.layer_size,
915 (unsigned long)texdw[3] << 8, moffset, mslice,
916 d, radeon_bo_size(mipmap),
917 width, height, depth);
918 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
919 __func__, __LINE__, surf.nbx, surf.nby,
920 surf.mode, surf.bpe, surf.nsamples,
921 surf.bankw, surf.bankh,
922 surf.tsplit, surf.mtilea);
930 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
932 struct evergreen_cs_track *track = p->track;
935 unsigned buffer_mask = 0;
937 /* check streamout */
938 if (track->streamout_dirty && track->vgt_strmout_config) {
939 for (i = 0; i < 4; i++) {
940 if (track->vgt_strmout_config & (1 << i)) {
941 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
945 for (i = 0; i < 4; i++) {
946 if (buffer_mask & (1 << i)) {
947 if (track->vgt_strmout_bo[i]) {
948 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
949 (u64)track->vgt_strmout_size[i];
950 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
951 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
952 i, (uintmax_t)offset,
953 radeon_bo_size(track->vgt_strmout_bo[i]));
957 dev_warn(p->dev, "No buffer for streamout %d\n", i);
962 track->streamout_dirty = false;
965 if (track->sx_misc_kill_all_prims)
968 /* check that we have a cb for each enabled target
970 if (track->cb_dirty) {
971 tmp = track->cb_target_mask;
972 for (i = 0; i < 8; i++) {
973 if ((tmp >> (i * 4)) & 0xF) {
974 /* at least one component is enabled */
975 if (track->cb_color_bo[i] == NULL) {
976 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
977 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
981 r = evergreen_cs_track_validate_cb(p, i);
987 track->cb_dirty = false;
990 if (track->db_dirty) {
991 /* Check stencil buffer */
992 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
993 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
994 r = evergreen_cs_track_validate_stencil(p);
998 /* Check depth buffer */
999 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
1000 G_028800_Z_ENABLE(track->db_depth_control)) {
1001 r = evergreen_cs_track_validate_depth(p);
1005 track->db_dirty = false;
1012 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
1013 * @parser: parser structure holding parsing context.
1015 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1016 * Real work is done by r600_cs_common_vline_parse function.
1017 * Here we just set up ASIC-specific register table and call
1018 * the common implementation function.
1020 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1023 static uint32_t vline_start_end[6] = {
1024 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1025 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1026 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1027 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1028 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1029 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1031 static uint32_t vline_status[6] = {
1032 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1033 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1034 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1035 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1036 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1037 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1040 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
1043 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1044 struct radeon_cs_packet *pkt,
1045 unsigned idx, unsigned reg)
1050 case EVERGREEN_VLINE_START_END:
1051 r = evergreen_cs_packet_parse_vline(p);
1053 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1059 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1066 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1067 struct radeon_cs_packet *pkt)
1075 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1076 r = evergreen_packet0_check(p, pkt, idx, reg);
1085 * evergreen_cs_check_reg() - check if register is authorized or not
1086 * @parser: parser structure holding parsing context
1087 * @reg: register we are testing
1088 * @idx: index into the cs buffer
1090 * This function will test against evergreen_reg_safe_bm and return 0
1091 * if register is safe. If register is not flag as safe this function
1092 * will test it against a list of register needind special handling.
1094 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1097 struct radeon_cs_reloc *reloc;
1102 if (p->rdev->family >= CHIP_CAYMAN)
1103 last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
1105 last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
1108 if (i >= last_reg) {
1109 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1112 m = 1 << ((reg >> 2) & 31);
1113 if (p->rdev->family >= CHIP_CAYMAN) {
1114 if (!(cayman_reg_safe_bm[i] & m))
1117 if (!(evergreen_reg_safe_bm[i] & m))
1122 /* force following reg to 0 in an attempt to disable out buffer
1123 * which will need us to better understand how it works to perform
1124 * security check on it (Jerome)
1126 case SQ_ESGS_RING_SIZE:
1127 case SQ_GSVS_RING_SIZE:
1128 case SQ_ESTMP_RING_SIZE:
1129 case SQ_GSTMP_RING_SIZE:
1130 case SQ_HSTMP_RING_SIZE:
1131 case SQ_LSTMP_RING_SIZE:
1132 case SQ_PSTMP_RING_SIZE:
1133 case SQ_VSTMP_RING_SIZE:
1134 case SQ_ESGS_RING_ITEMSIZE:
1135 case SQ_ESTMP_RING_ITEMSIZE:
1136 case SQ_GSTMP_RING_ITEMSIZE:
1137 case SQ_GSVS_RING_ITEMSIZE:
1138 case SQ_GS_VERT_ITEMSIZE:
1139 case SQ_GS_VERT_ITEMSIZE_1:
1140 case SQ_GS_VERT_ITEMSIZE_2:
1141 case SQ_GS_VERT_ITEMSIZE_3:
1142 case SQ_GSVS_RING_OFFSET_1:
1143 case SQ_GSVS_RING_OFFSET_2:
1144 case SQ_GSVS_RING_OFFSET_3:
1145 case SQ_HSTMP_RING_ITEMSIZE:
1146 case SQ_LSTMP_RING_ITEMSIZE:
1147 case SQ_PSTMP_RING_ITEMSIZE:
1148 case SQ_VSTMP_RING_ITEMSIZE:
1149 case VGT_TF_RING_SIZE:
1150 /* get value to populate the IB don't remove */
1151 /*tmp =radeon_get_ib_value(p, idx);
1154 case SQ_ESGS_RING_BASE:
1155 case SQ_GSVS_RING_BASE:
1156 case SQ_ESTMP_RING_BASE:
1157 case SQ_GSTMP_RING_BASE:
1158 case SQ_HSTMP_RING_BASE:
1159 case SQ_LSTMP_RING_BASE:
1160 case SQ_PSTMP_RING_BASE:
1161 case SQ_VSTMP_RING_BASE:
1162 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1164 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1168 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1170 case DB_DEPTH_CONTROL:
1171 track->db_depth_control = radeon_get_ib_value(p, idx);
1172 track->db_dirty = true;
1174 case CAYMAN_DB_EQAA:
1175 if (p->rdev->family < CHIP_CAYMAN) {
1176 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1181 case CAYMAN_DB_DEPTH_INFO:
1182 if (p->rdev->family < CHIP_CAYMAN) {
1183 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1189 track->db_z_info = radeon_get_ib_value(p, idx);
1190 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1191 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1193 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1197 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1198 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1199 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1200 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1201 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1202 unsigned bankw, bankh, mtaspect, tile_split;
1204 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1205 &bankw, &bankh, &mtaspect,
1207 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1208 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1209 DB_BANK_WIDTH(bankw) |
1210 DB_BANK_HEIGHT(bankh) |
1211 DB_MACRO_TILE_ASPECT(mtaspect);
1214 track->db_dirty = true;
1216 case DB_STENCIL_INFO:
1217 track->db_s_info = radeon_get_ib_value(p, idx);
1218 track->db_dirty = true;
1221 track->db_depth_view = radeon_get_ib_value(p, idx);
1222 track->db_dirty = true;
1225 track->db_depth_size = radeon_get_ib_value(p, idx);
1226 track->db_dirty = true;
1228 case R_02805C_DB_DEPTH_SLICE:
1229 track->db_depth_slice = radeon_get_ib_value(p, idx);
1230 track->db_dirty = true;
1232 case DB_Z_READ_BASE:
1233 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1235 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1239 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1240 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1241 track->db_z_read_bo = reloc->robj;
1242 track->db_dirty = true;
1244 case DB_Z_WRITE_BASE:
1245 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1247 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1251 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1252 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1253 track->db_z_write_bo = reloc->robj;
1254 track->db_dirty = true;
1256 case DB_STENCIL_READ_BASE:
1257 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1259 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1263 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1264 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1265 track->db_s_read_bo = reloc->robj;
1266 track->db_dirty = true;
1268 case DB_STENCIL_WRITE_BASE:
1269 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1271 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1275 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1276 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1277 track->db_s_write_bo = reloc->robj;
1278 track->db_dirty = true;
1280 case VGT_STRMOUT_CONFIG:
1281 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1282 track->streamout_dirty = true;
1284 case VGT_STRMOUT_BUFFER_CONFIG:
1285 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1286 track->streamout_dirty = true;
1288 case VGT_STRMOUT_BUFFER_BASE_0:
1289 case VGT_STRMOUT_BUFFER_BASE_1:
1290 case VGT_STRMOUT_BUFFER_BASE_2:
1291 case VGT_STRMOUT_BUFFER_BASE_3:
1292 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1294 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1298 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1299 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1300 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1301 track->vgt_strmout_bo[tmp] = reloc->robj;
1302 track->streamout_dirty = true;
1304 case VGT_STRMOUT_BUFFER_SIZE_0:
1305 case VGT_STRMOUT_BUFFER_SIZE_1:
1306 case VGT_STRMOUT_BUFFER_SIZE_2:
1307 case VGT_STRMOUT_BUFFER_SIZE_3:
1308 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1309 /* size in register is DWs, convert to bytes */
1310 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1311 track->streamout_dirty = true;
1314 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1316 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1320 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1321 case CB_TARGET_MASK:
1322 track->cb_target_mask = radeon_get_ib_value(p, idx);
1323 track->cb_dirty = true;
1325 case CB_SHADER_MASK:
1326 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1327 track->cb_dirty = true;
1329 case PA_SC_AA_CONFIG:
1330 if (p->rdev->family >= CHIP_CAYMAN) {
1331 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1335 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1336 track->nsamples = 1 << tmp;
1338 case CAYMAN_PA_SC_AA_CONFIG:
1339 if (p->rdev->family < CHIP_CAYMAN) {
1340 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1344 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1345 track->nsamples = 1 << tmp;
1347 case CB_COLOR0_VIEW:
1348 case CB_COLOR1_VIEW:
1349 case CB_COLOR2_VIEW:
1350 case CB_COLOR3_VIEW:
1351 case CB_COLOR4_VIEW:
1352 case CB_COLOR5_VIEW:
1353 case CB_COLOR6_VIEW:
1354 case CB_COLOR7_VIEW:
1355 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1356 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1357 track->cb_dirty = true;
1359 case CB_COLOR8_VIEW:
1360 case CB_COLOR9_VIEW:
1361 case CB_COLOR10_VIEW:
1362 case CB_COLOR11_VIEW:
1363 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1364 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1365 track->cb_dirty = true;
1367 case CB_COLOR0_INFO:
1368 case CB_COLOR1_INFO:
1369 case CB_COLOR2_INFO:
1370 case CB_COLOR3_INFO:
1371 case CB_COLOR4_INFO:
1372 case CB_COLOR5_INFO:
1373 case CB_COLOR6_INFO:
1374 case CB_COLOR7_INFO:
1375 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1376 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1377 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1378 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1380 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1384 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1385 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1387 track->cb_dirty = true;
1389 case CB_COLOR8_INFO:
1390 case CB_COLOR9_INFO:
1391 case CB_COLOR10_INFO:
1392 case CB_COLOR11_INFO:
1393 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1394 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1395 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1396 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1398 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1402 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1403 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1405 track->cb_dirty = true;
1407 case CB_COLOR0_PITCH:
1408 case CB_COLOR1_PITCH:
1409 case CB_COLOR2_PITCH:
1410 case CB_COLOR3_PITCH:
1411 case CB_COLOR4_PITCH:
1412 case CB_COLOR5_PITCH:
1413 case CB_COLOR6_PITCH:
1414 case CB_COLOR7_PITCH:
1415 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1416 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1417 track->cb_dirty = true;
1419 case CB_COLOR8_PITCH:
1420 case CB_COLOR9_PITCH:
1421 case CB_COLOR10_PITCH:
1422 case CB_COLOR11_PITCH:
1423 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1424 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1425 track->cb_dirty = true;
1427 case CB_COLOR0_SLICE:
1428 case CB_COLOR1_SLICE:
1429 case CB_COLOR2_SLICE:
1430 case CB_COLOR3_SLICE:
1431 case CB_COLOR4_SLICE:
1432 case CB_COLOR5_SLICE:
1433 case CB_COLOR6_SLICE:
1434 case CB_COLOR7_SLICE:
1435 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1436 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1437 track->cb_color_slice_idx[tmp] = idx;
1438 track->cb_dirty = true;
1440 case CB_COLOR8_SLICE:
1441 case CB_COLOR9_SLICE:
1442 case CB_COLOR10_SLICE:
1443 case CB_COLOR11_SLICE:
1444 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1445 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1446 track->cb_color_slice_idx[tmp] = idx;
1447 track->cb_dirty = true;
1449 case CB_COLOR0_ATTRIB:
1450 case CB_COLOR1_ATTRIB:
1451 case CB_COLOR2_ATTRIB:
1452 case CB_COLOR3_ATTRIB:
1453 case CB_COLOR4_ATTRIB:
1454 case CB_COLOR5_ATTRIB:
1455 case CB_COLOR6_ATTRIB:
1456 case CB_COLOR7_ATTRIB:
1457 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1459 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1463 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1464 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1465 unsigned bankw, bankh, mtaspect, tile_split;
1467 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1468 &bankw, &bankh, &mtaspect,
1470 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1471 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1472 CB_BANK_WIDTH(bankw) |
1473 CB_BANK_HEIGHT(bankh) |
1474 CB_MACRO_TILE_ASPECT(mtaspect);
1477 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1478 track->cb_color_attrib[tmp] = ib[idx];
1479 track->cb_dirty = true;
1481 case CB_COLOR8_ATTRIB:
1482 case CB_COLOR9_ATTRIB:
1483 case CB_COLOR10_ATTRIB:
1484 case CB_COLOR11_ATTRIB:
1485 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1487 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1491 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1493 unsigned bankw, bankh, mtaspect, tile_split;
1495 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1496 &bankw, &bankh, &mtaspect,
1498 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1499 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1500 CB_BANK_WIDTH(bankw) |
1501 CB_BANK_HEIGHT(bankh) |
1502 CB_MACRO_TILE_ASPECT(mtaspect);
1505 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1506 track->cb_color_attrib[tmp] = ib[idx];
1507 track->cb_dirty = true;
1509 case CB_COLOR0_FMASK:
1510 case CB_COLOR1_FMASK:
1511 case CB_COLOR2_FMASK:
1512 case CB_COLOR3_FMASK:
1513 case CB_COLOR4_FMASK:
1514 case CB_COLOR5_FMASK:
1515 case CB_COLOR6_FMASK:
1516 case CB_COLOR7_FMASK:
1517 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1518 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1520 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1523 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1524 track->cb_color_fmask_bo[tmp] = reloc->robj;
1526 case CB_COLOR0_CMASK:
1527 case CB_COLOR1_CMASK:
1528 case CB_COLOR2_CMASK:
1529 case CB_COLOR3_CMASK:
1530 case CB_COLOR4_CMASK:
1531 case CB_COLOR5_CMASK:
1532 case CB_COLOR6_CMASK:
1533 case CB_COLOR7_CMASK:
1534 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1535 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1537 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1540 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1541 track->cb_color_cmask_bo[tmp] = reloc->robj;
1543 case CB_COLOR0_FMASK_SLICE:
1544 case CB_COLOR1_FMASK_SLICE:
1545 case CB_COLOR2_FMASK_SLICE:
1546 case CB_COLOR3_FMASK_SLICE:
1547 case CB_COLOR4_FMASK_SLICE:
1548 case CB_COLOR5_FMASK_SLICE:
1549 case CB_COLOR6_FMASK_SLICE:
1550 case CB_COLOR7_FMASK_SLICE:
1551 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1552 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1554 case CB_COLOR0_CMASK_SLICE:
1555 case CB_COLOR1_CMASK_SLICE:
1556 case CB_COLOR2_CMASK_SLICE:
1557 case CB_COLOR3_CMASK_SLICE:
1558 case CB_COLOR4_CMASK_SLICE:
1559 case CB_COLOR5_CMASK_SLICE:
1560 case CB_COLOR6_CMASK_SLICE:
1561 case CB_COLOR7_CMASK_SLICE:
1562 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1563 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1565 case CB_COLOR0_BASE:
1566 case CB_COLOR1_BASE:
1567 case CB_COLOR2_BASE:
1568 case CB_COLOR3_BASE:
1569 case CB_COLOR4_BASE:
1570 case CB_COLOR5_BASE:
1571 case CB_COLOR6_BASE:
1572 case CB_COLOR7_BASE:
1573 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1575 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1579 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1580 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1581 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1582 track->cb_color_bo[tmp] = reloc->robj;
1583 track->cb_dirty = true;
1585 case CB_COLOR8_BASE:
1586 case CB_COLOR9_BASE:
1587 case CB_COLOR10_BASE:
1588 case CB_COLOR11_BASE:
1589 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1591 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1595 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1596 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1597 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1598 track->cb_color_bo[tmp] = reloc->robj;
1599 track->cb_dirty = true;
1601 case DB_HTILE_DATA_BASE:
1602 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1604 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1608 track->htile_offset = radeon_get_ib_value(p, idx);
1609 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1610 track->htile_bo = reloc->robj;
1611 track->db_dirty = true;
1613 case DB_HTILE_SURFACE:
1615 track->htile_surface = radeon_get_ib_value(p, idx);
1616 /* force 8x8 htile width and height */
1618 track->db_dirty = true;
1620 case CB_IMMED0_BASE:
1621 case CB_IMMED1_BASE:
1622 case CB_IMMED2_BASE:
1623 case CB_IMMED3_BASE:
1624 case CB_IMMED4_BASE:
1625 case CB_IMMED5_BASE:
1626 case CB_IMMED6_BASE:
1627 case CB_IMMED7_BASE:
1628 case CB_IMMED8_BASE:
1629 case CB_IMMED9_BASE:
1630 case CB_IMMED10_BASE:
1631 case CB_IMMED11_BASE:
1632 case SQ_PGM_START_FS:
1633 case SQ_PGM_START_ES:
1634 case SQ_PGM_START_VS:
1635 case SQ_PGM_START_GS:
1636 case SQ_PGM_START_PS:
1637 case SQ_PGM_START_HS:
1638 case SQ_PGM_START_LS:
1639 case SQ_CONST_MEM_BASE:
1640 case SQ_ALU_CONST_CACHE_GS_0:
1641 case SQ_ALU_CONST_CACHE_GS_1:
1642 case SQ_ALU_CONST_CACHE_GS_2:
1643 case SQ_ALU_CONST_CACHE_GS_3:
1644 case SQ_ALU_CONST_CACHE_GS_4:
1645 case SQ_ALU_CONST_CACHE_GS_5:
1646 case SQ_ALU_CONST_CACHE_GS_6:
1647 case SQ_ALU_CONST_CACHE_GS_7:
1648 case SQ_ALU_CONST_CACHE_GS_8:
1649 case SQ_ALU_CONST_CACHE_GS_9:
1650 case SQ_ALU_CONST_CACHE_GS_10:
1651 case SQ_ALU_CONST_CACHE_GS_11:
1652 case SQ_ALU_CONST_CACHE_GS_12:
1653 case SQ_ALU_CONST_CACHE_GS_13:
1654 case SQ_ALU_CONST_CACHE_GS_14:
1655 case SQ_ALU_CONST_CACHE_GS_15:
1656 case SQ_ALU_CONST_CACHE_PS_0:
1657 case SQ_ALU_CONST_CACHE_PS_1:
1658 case SQ_ALU_CONST_CACHE_PS_2:
1659 case SQ_ALU_CONST_CACHE_PS_3:
1660 case SQ_ALU_CONST_CACHE_PS_4:
1661 case SQ_ALU_CONST_CACHE_PS_5:
1662 case SQ_ALU_CONST_CACHE_PS_6:
1663 case SQ_ALU_CONST_CACHE_PS_7:
1664 case SQ_ALU_CONST_CACHE_PS_8:
1665 case SQ_ALU_CONST_CACHE_PS_9:
1666 case SQ_ALU_CONST_CACHE_PS_10:
1667 case SQ_ALU_CONST_CACHE_PS_11:
1668 case SQ_ALU_CONST_CACHE_PS_12:
1669 case SQ_ALU_CONST_CACHE_PS_13:
1670 case SQ_ALU_CONST_CACHE_PS_14:
1671 case SQ_ALU_CONST_CACHE_PS_15:
1672 case SQ_ALU_CONST_CACHE_VS_0:
1673 case SQ_ALU_CONST_CACHE_VS_1:
1674 case SQ_ALU_CONST_CACHE_VS_2:
1675 case SQ_ALU_CONST_CACHE_VS_3:
1676 case SQ_ALU_CONST_CACHE_VS_4:
1677 case SQ_ALU_CONST_CACHE_VS_5:
1678 case SQ_ALU_CONST_CACHE_VS_6:
1679 case SQ_ALU_CONST_CACHE_VS_7:
1680 case SQ_ALU_CONST_CACHE_VS_8:
1681 case SQ_ALU_CONST_CACHE_VS_9:
1682 case SQ_ALU_CONST_CACHE_VS_10:
1683 case SQ_ALU_CONST_CACHE_VS_11:
1684 case SQ_ALU_CONST_CACHE_VS_12:
1685 case SQ_ALU_CONST_CACHE_VS_13:
1686 case SQ_ALU_CONST_CACHE_VS_14:
1687 case SQ_ALU_CONST_CACHE_VS_15:
1688 case SQ_ALU_CONST_CACHE_HS_0:
1689 case SQ_ALU_CONST_CACHE_HS_1:
1690 case SQ_ALU_CONST_CACHE_HS_2:
1691 case SQ_ALU_CONST_CACHE_HS_3:
1692 case SQ_ALU_CONST_CACHE_HS_4:
1693 case SQ_ALU_CONST_CACHE_HS_5:
1694 case SQ_ALU_CONST_CACHE_HS_6:
1695 case SQ_ALU_CONST_CACHE_HS_7:
1696 case SQ_ALU_CONST_CACHE_HS_8:
1697 case SQ_ALU_CONST_CACHE_HS_9:
1698 case SQ_ALU_CONST_CACHE_HS_10:
1699 case SQ_ALU_CONST_CACHE_HS_11:
1700 case SQ_ALU_CONST_CACHE_HS_12:
1701 case SQ_ALU_CONST_CACHE_HS_13:
1702 case SQ_ALU_CONST_CACHE_HS_14:
1703 case SQ_ALU_CONST_CACHE_HS_15:
1704 case SQ_ALU_CONST_CACHE_LS_0:
1705 case SQ_ALU_CONST_CACHE_LS_1:
1706 case SQ_ALU_CONST_CACHE_LS_2:
1707 case SQ_ALU_CONST_CACHE_LS_3:
1708 case SQ_ALU_CONST_CACHE_LS_4:
1709 case SQ_ALU_CONST_CACHE_LS_5:
1710 case SQ_ALU_CONST_CACHE_LS_6:
1711 case SQ_ALU_CONST_CACHE_LS_7:
1712 case SQ_ALU_CONST_CACHE_LS_8:
1713 case SQ_ALU_CONST_CACHE_LS_9:
1714 case SQ_ALU_CONST_CACHE_LS_10:
1715 case SQ_ALU_CONST_CACHE_LS_11:
1716 case SQ_ALU_CONST_CACHE_LS_12:
1717 case SQ_ALU_CONST_CACHE_LS_13:
1718 case SQ_ALU_CONST_CACHE_LS_14:
1719 case SQ_ALU_CONST_CACHE_LS_15:
1720 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1722 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1726 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1728 case SX_MEMORY_EXPORT_BASE:
1729 if (p->rdev->family >= CHIP_CAYMAN) {
1730 dev_warn(p->dev, "bad SET_CONFIG_REG "
1734 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1736 dev_warn(p->dev, "bad SET_CONFIG_REG "
1740 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1742 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1743 if (p->rdev->family < CHIP_CAYMAN) {
1744 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1748 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1750 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1754 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1757 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1760 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1766 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1770 if (p->rdev->family >= CHIP_CAYMAN)
1771 last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
1773 last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
1776 if (i >= last_reg) {
1777 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1780 m = 1 << ((reg >> 2) & 31);
1781 if (p->rdev->family >= CHIP_CAYMAN) {
1782 if (!(cayman_reg_safe_bm[i] & m))
1785 if (!(evergreen_reg_safe_bm[i] & m))
1788 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1792 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1793 struct radeon_cs_packet *pkt)
1795 struct radeon_cs_reloc *reloc;
1796 struct evergreen_cs_track *track;
1800 unsigned start_reg, end_reg, reg;
1804 track = (struct evergreen_cs_track *)p->track;
1807 idx_value = radeon_get_ib_value(p, idx);
1809 switch (pkt->opcode) {
1810 case PACKET3_SET_PREDICATION:
1816 if (pkt->count != 1) {
1817 DRM_ERROR("bad SET PREDICATION\n");
1821 tmp = radeon_get_ib_value(p, idx + 1);
1822 pred_op = (tmp >> 16) & 0x7;
1824 /* for the clear predicate operation */
1829 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1833 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1835 DRM_ERROR("bad SET PREDICATION\n");
1839 offset = reloc->lobj.gpu_offset +
1840 (idx_value & 0xfffffff0) +
1841 ((u64)(tmp & 0xff) << 32);
1843 ib[idx + 0] = offset;
1844 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1847 case PACKET3_CONTEXT_CONTROL:
1848 if (pkt->count != 1) {
1849 DRM_ERROR("bad CONTEXT_CONTROL\n");
1853 case PACKET3_INDEX_TYPE:
1854 case PACKET3_NUM_INSTANCES:
1855 case PACKET3_CLEAR_STATE:
1857 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1861 case CAYMAN_PACKET3_DEALLOC_STATE:
1862 if (p->rdev->family < CHIP_CAYMAN) {
1863 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1867 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1871 case PACKET3_INDEX_BASE:
1875 if (pkt->count != 1) {
1876 DRM_ERROR("bad INDEX_BASE\n");
1879 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1881 DRM_ERROR("bad INDEX_BASE\n");
1885 offset = reloc->lobj.gpu_offset +
1887 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1890 ib[idx+1] = upper_32_bits(offset) & 0xff;
1892 r = evergreen_cs_track_check(p);
1894 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1899 case PACKET3_DRAW_INDEX:
1902 if (pkt->count != 3) {
1903 DRM_ERROR("bad DRAW_INDEX\n");
1906 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1908 DRM_ERROR("bad DRAW_INDEX\n");
1912 offset = reloc->lobj.gpu_offset +
1914 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1917 ib[idx+1] = upper_32_bits(offset) & 0xff;
1919 r = evergreen_cs_track_check(p);
1921 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1926 case PACKET3_DRAW_INDEX_2:
1930 if (pkt->count != 4) {
1931 DRM_ERROR("bad DRAW_INDEX_2\n");
1934 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1936 DRM_ERROR("bad DRAW_INDEX_2\n");
1940 offset = reloc->lobj.gpu_offset +
1941 radeon_get_ib_value(p, idx+1) +
1942 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1945 ib[idx+2] = upper_32_bits(offset) & 0xff;
1947 r = evergreen_cs_track_check(p);
1949 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1954 case PACKET3_DRAW_INDEX_AUTO:
1955 if (pkt->count != 1) {
1956 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1959 r = evergreen_cs_track_check(p);
1961 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1965 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1966 if (pkt->count != 2) {
1967 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1970 r = evergreen_cs_track_check(p);
1972 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1976 case PACKET3_DRAW_INDEX_IMMD:
1977 if (pkt->count < 2) {
1978 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1981 r = evergreen_cs_track_check(p);
1983 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1987 case PACKET3_DRAW_INDEX_OFFSET:
1988 if (pkt->count != 2) {
1989 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1992 r = evergreen_cs_track_check(p);
1994 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1998 case PACKET3_DRAW_INDEX_OFFSET_2:
1999 if (pkt->count != 3) {
2000 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2003 r = evergreen_cs_track_check(p);
2005 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2009 case PACKET3_DISPATCH_DIRECT:
2010 if (pkt->count != 3) {
2011 DRM_ERROR("bad DISPATCH_DIRECT\n");
2014 r = evergreen_cs_track_check(p);
2016 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2020 case PACKET3_DISPATCH_INDIRECT:
2021 if (pkt->count != 1) {
2022 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2025 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2027 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2030 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2031 r = evergreen_cs_track_check(p);
2033 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2037 case PACKET3_WAIT_REG_MEM:
2038 if (pkt->count != 5) {
2039 DRM_ERROR("bad WAIT_REG_MEM\n");
2042 /* bit 4 is reg (0) or mem (1) */
2043 if (idx_value & 0x10) {
2046 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2048 DRM_ERROR("bad WAIT_REG_MEM\n");
2052 offset = reloc->lobj.gpu_offset +
2053 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2054 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2056 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2057 ib[idx+2] = upper_32_bits(offset) & 0xff;
2058 } else if (idx_value & 0x100) {
2059 DRM_ERROR("cannot use PFP on REG wait\n");
2063 case PACKET3_CP_DMA:
2065 u32 command, size, info;
2067 if (pkt->count != 4) {
2068 DRM_ERROR("bad CP DMA\n");
2071 command = radeon_get_ib_value(p, idx+4);
2072 size = command & 0x1fffff;
2073 info = radeon_get_ib_value(p, idx+1);
2074 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2075 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2076 ((((info & 0x00300000) >> 20) == 0) &&
2077 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2078 ((((info & 0x60000000) >> 29) == 0) &&
2079 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2080 /* non mem to mem copies requires dw aligned count */
2082 DRM_ERROR("CP DMA command requires dw count alignment\n");
2086 if (command & PACKET3_CP_DMA_CMD_SAS) {
2087 /* src address space is register */
2089 if (((info & 0x60000000) >> 29) != 1) {
2090 DRM_ERROR("CP DMA SAS not supported\n");
2094 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2095 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2098 /* src address space is memory */
2099 if (((info & 0x60000000) >> 29) == 0) {
2100 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2102 DRM_ERROR("bad CP DMA SRC\n");
2106 tmp = radeon_get_ib_value(p, idx) +
2107 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2109 offset = reloc->lobj.gpu_offset + tmp;
2111 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2112 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
2113 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
2118 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2119 } else if (((info & 0x60000000) >> 29) != 2) {
2120 DRM_ERROR("bad CP DMA SRC_SEL\n");
2124 if (command & PACKET3_CP_DMA_CMD_DAS) {
2125 /* dst address space is register */
2127 if (((info & 0x00300000) >> 20) != 1) {
2128 DRM_ERROR("CP DMA DAS not supported\n");
2132 /* dst address space is memory */
2133 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2134 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2137 if (((info & 0x00300000) >> 20) == 0) {
2138 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2140 DRM_ERROR("bad CP DMA DST\n");
2144 tmp = radeon_get_ib_value(p, idx+2) +
2145 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2147 offset = reloc->lobj.gpu_offset + tmp;
2149 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2150 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
2151 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
2156 ib[idx+3] = upper_32_bits(offset) & 0xff;
2158 DRM_ERROR("bad CP DMA DST_SEL\n");
2164 case PACKET3_SURFACE_SYNC:
2165 if (pkt->count != 3) {
2166 DRM_ERROR("bad SURFACE_SYNC\n");
2169 /* 0xffffffff/0x0 is flush all cache flag */
2170 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2171 radeon_get_ib_value(p, idx + 2) != 0) {
2172 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2174 DRM_ERROR("bad SURFACE_SYNC\n");
2177 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2180 case PACKET3_EVENT_WRITE:
2181 if (pkt->count != 2 && pkt->count != 0) {
2182 DRM_ERROR("bad EVENT_WRITE\n");
2188 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2190 DRM_ERROR("bad EVENT_WRITE\n");
2193 offset = reloc->lobj.gpu_offset +
2194 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2195 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2197 ib[idx+1] = offset & 0xfffffff8;
2198 ib[idx+2] = upper_32_bits(offset) & 0xff;
2201 case PACKET3_EVENT_WRITE_EOP:
2205 if (pkt->count != 4) {
2206 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2209 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2211 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2215 offset = reloc->lobj.gpu_offset +
2216 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2217 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2219 ib[idx+1] = offset & 0xfffffffc;
2220 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2223 case PACKET3_EVENT_WRITE_EOS:
2227 if (pkt->count != 3) {
2228 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2231 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2233 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2237 offset = reloc->lobj.gpu_offset +
2238 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2239 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2241 ib[idx+1] = offset & 0xfffffffc;
2242 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2245 case PACKET3_SET_CONFIG_REG:
2246 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2247 end_reg = 4 * pkt->count + start_reg - 4;
2248 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2249 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2250 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2251 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2254 for (i = 0; i < pkt->count; i++) {
2255 reg = start_reg + (4 * i);
2256 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2261 case PACKET3_SET_CONTEXT_REG:
2262 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2263 end_reg = 4 * pkt->count + start_reg - 4;
2264 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2265 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2266 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2267 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2270 for (i = 0; i < pkt->count; i++) {
2271 reg = start_reg + (4 * i);
2272 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2277 case PACKET3_SET_RESOURCE:
2278 if (pkt->count % 8) {
2279 DRM_ERROR("bad SET_RESOURCE\n");
2282 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2283 end_reg = 4 * pkt->count + start_reg - 4;
2284 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2285 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2286 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2287 DRM_ERROR("bad SET_RESOURCE\n");
2290 for (i = 0; i < (pkt->count / 8); i++) {
2291 struct radeon_bo *texture, *mipmap;
2292 u32 toffset, moffset;
2293 u32 size, offset, mip_address, tex_dim;
2295 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2296 case SQ_TEX_VTX_VALID_TEXTURE:
2298 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2300 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2303 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2304 ib[idx+1+(i*8)+1] |=
2305 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2306 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2307 unsigned bankw, bankh, mtaspect, tile_split;
2309 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2310 &bankw, &bankh, &mtaspect,
2312 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2313 ib[idx+1+(i*8)+7] |=
2314 TEX_BANK_WIDTH(bankw) |
2315 TEX_BANK_HEIGHT(bankh) |
2316 MACRO_TILE_ASPECT(mtaspect) |
2317 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2320 texture = reloc->robj;
2321 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2324 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2325 mip_address = ib[idx+1+(i*8)+3];
2327 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2329 !radeon_cs_packet_next_is_pkt3_nop(p)) {
2330 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2331 * It should be 0 if FMASK is disabled. */
2335 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2337 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2340 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2341 mipmap = reloc->robj;
2344 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2347 ib[idx+1+(i*8)+2] += toffset;
2348 ib[idx+1+(i*8)+3] += moffset;
2350 case SQ_TEX_VTX_VALID_BUFFER:
2354 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2356 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2359 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2360 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2361 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2362 /* force size to size of the buffer */
2363 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2364 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2367 offset64 = reloc->lobj.gpu_offset + offset;
2368 ib[idx+1+(i*8)+0] = offset64;
2369 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2370 (upper_32_bits(offset64) & 0xff);
2373 case SQ_TEX_VTX_INVALID_TEXTURE:
2374 case SQ_TEX_VTX_INVALID_BUFFER:
2376 DRM_ERROR("bad SET_RESOURCE\n");
2381 case PACKET3_SET_ALU_CONST:
2382 /* XXX fix me ALU const buffers only */
2384 case PACKET3_SET_BOOL_CONST:
2385 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2386 end_reg = 4 * pkt->count + start_reg - 4;
2387 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2388 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2389 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2390 DRM_ERROR("bad SET_BOOL_CONST\n");
2394 case PACKET3_SET_LOOP_CONST:
2395 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2396 end_reg = 4 * pkt->count + start_reg - 4;
2397 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2398 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2399 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2400 DRM_ERROR("bad SET_LOOP_CONST\n");
2404 case PACKET3_SET_CTL_CONST:
2405 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2406 end_reg = 4 * pkt->count + start_reg - 4;
2407 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2408 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2409 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2410 DRM_ERROR("bad SET_CTL_CONST\n");
2414 case PACKET3_SET_SAMPLER:
2415 if (pkt->count % 3) {
2416 DRM_ERROR("bad SET_SAMPLER\n");
2419 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2420 end_reg = 4 * pkt->count + start_reg - 4;
2421 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2422 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2423 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2424 DRM_ERROR("bad SET_SAMPLER\n");
2428 case PACKET3_STRMOUT_BUFFER_UPDATE:
2429 if (pkt->count != 4) {
2430 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2433 /* Updating memory at DST_ADDRESS. */
2434 if (idx_value & 0x1) {
2436 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2438 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2441 offset = radeon_get_ib_value(p, idx+1);
2442 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2443 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2444 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
2445 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2448 offset += reloc->lobj.gpu_offset;
2450 ib[idx+2] = upper_32_bits(offset) & 0xff;
2452 /* Reading data from SRC_ADDRESS. */
2453 if (((idx_value >> 1) & 0x3) == 2) {
2455 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2457 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2460 offset = radeon_get_ib_value(p, idx+3);
2461 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2462 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2463 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
2464 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2467 offset += reloc->lobj.gpu_offset;
2469 ib[idx+4] = upper_32_bits(offset) & 0xff;
2472 case PACKET3_MEM_WRITE:
2476 if (pkt->count != 3) {
2477 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2480 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2482 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2485 offset = radeon_get_ib_value(p, idx+0);
2486 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2488 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2491 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2492 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
2493 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
2496 offset += reloc->lobj.gpu_offset;
2498 ib[idx+1] = upper_32_bits(offset) & 0xff;
2501 case PACKET3_COPY_DW:
2502 if (pkt->count != 4) {
2503 DRM_ERROR("bad COPY_DW (invalid count)\n");
2506 if (idx_value & 0x1) {
2508 /* SRC is memory. */
2509 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2511 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2514 offset = radeon_get_ib_value(p, idx+1);
2515 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2516 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2517 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
2518 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2521 offset += reloc->lobj.gpu_offset;
2523 ib[idx+2] = upper_32_bits(offset) & 0xff;
2526 reg = radeon_get_ib_value(p, idx+1) << 2;
2527 if (!evergreen_is_safe_reg(p, reg, idx+1))
2530 if (idx_value & 0x2) {
2532 /* DST is memory. */
2533 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2535 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2538 offset = radeon_get_ib_value(p, idx+3);
2539 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2540 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2541 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
2542 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2545 offset += reloc->lobj.gpu_offset;
2547 ib[idx+4] = upper_32_bits(offset) & 0xff;
2550 reg = radeon_get_ib_value(p, idx+3) << 2;
2551 if (!evergreen_is_safe_reg(p, reg, idx+3))
2558 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2564 int evergreen_cs_parse(struct radeon_cs_parser *p)
2566 struct radeon_cs_packet pkt;
2567 struct evergreen_cs_track *track;
2571 if (p->track == NULL) {
2572 /* initialize tracker, we are in kms */
2573 track = kmalloc(sizeof(*track), M_DRM,
2577 evergreen_cs_track_init(track);
2578 if (p->rdev->family >= CHIP_CAYMAN)
2579 tmp = p->rdev->config.cayman.tile_config;
2581 tmp = p->rdev->config.evergreen.tile_config;
2583 switch (tmp & 0xf) {
2599 switch ((tmp & 0xf0) >> 4) {
2612 switch ((tmp & 0xf00) >> 8) {
2614 track->group_size = 256;
2618 track->group_size = 512;
2622 switch ((tmp & 0xf000) >> 12) {
2624 track->row_size = 1;
2628 track->row_size = 2;
2631 track->row_size = 4;
2638 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2640 drm_free(p->track, M_DRM);
2644 p->idx += pkt.count + 2;
2646 case RADEON_PACKET_TYPE0:
2647 r = evergreen_cs_parse_packet0(p, &pkt);
2649 case RADEON_PACKET_TYPE2:
2651 case RADEON_PACKET_TYPE3:
2652 r = evergreen_packet3_check(p, &pkt);
2655 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2656 drm_free(p->track, M_DRM);
2661 drm_free(p->track, M_DRM);
2665 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2667 for (r = 0; r < p->ib.length_dw; r++) {
2668 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
2672 drm_free(p->track, M_DRM);
2678 * evergreen_dma_cs_parse() - parse the DMA IB
2679 * @p: parser structure holding parsing context.
2681 * Parses the DMA IB from the CS ioctl and updates
2682 * the GPU addresses based on the reloc information and
2683 * checks for errors. (Evergreen-Cayman)
2684 * Returns 0 for success and an error on failure.
2686 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2688 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2689 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2690 u32 header, cmd, count, sub_cmd;
2691 volatile u32 *ib = p->ib.ptr;
2693 u64 src_offset, dst_offset, dst2_offset;
2697 if (p->idx >= ib_chunk->length_dw) {
2698 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2699 p->idx, ib_chunk->length_dw);
2703 header = radeon_get_ib_value(p, idx);
2704 cmd = GET_DMA_CMD(header);
2705 count = GET_DMA_COUNT(header);
2706 sub_cmd = GET_DMA_SUB_CMD(header);
2709 case DMA_PACKET_WRITE:
2710 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2712 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2718 dst_offset = ib[idx+1];
2721 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2722 p->idx += count + 7;
2726 dst_offset = ib[idx+1];
2727 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2729 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2730 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2731 p->idx += count + 3;
2734 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib[idx+0]);
2737 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2738 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
2739 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
2743 case DMA_PACKET_COPY:
2744 r = r600_dma_cs_next_reloc(p, &src_reloc);
2746 DRM_ERROR("bad DMA_PACKET_COPY\n");
2749 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2751 DRM_ERROR("bad DMA_PACKET_COPY\n");
2755 /* Copy L2L, DW aligned */
2758 src_offset = ib[idx+2];
2759 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2760 dst_offset = ib[idx+1];
2761 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2762 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2763 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%ju %lu)\n",
2764 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2767 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2768 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%ju %lu)\n",
2769 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2772 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2773 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2774 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2775 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2781 if (ib[idx + 2] & (1 << 31)) {
2782 /* tiled src, linear dst */
2783 src_offset = ib[idx+1];
2785 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2787 dst_offset = radeon_get_ib_value(p, idx + 7);
2788 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2789 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2790 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2792 /* linear src, tiled dst */
2793 src_offset = ib[idx+7];
2794 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2795 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2796 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2798 dst_offset = ib[idx+1];
2800 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2802 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2803 dev_warn(p->dev, "DMA L2T, src buffer too small (%ju %lu)\n",
2804 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2807 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2808 dev_warn(p->dev, "DMA L2T, dst buffer too small (%ju %lu)\n",
2809 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2814 /* Copy L2L, byte aligned */
2817 src_offset = ib[idx+2];
2818 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2819 dst_offset = ib[idx+1];
2820 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2821 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
2822 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%ju %lu)\n",
2823 (uintmax_t)src_offset + count, radeon_bo_size(src_reloc->robj));
2826 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
2827 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%ju %lu)\n",
2828 (uintmax_t)dst_offset + count, radeon_bo_size(dst_reloc->robj));
2831 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2832 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2833 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2834 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2837 /* Copy L2L, partial */
2840 if (p->family < CHIP_CAYMAN) {
2841 DRM_ERROR("L2L Partial is cayman only !\n");
2844 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2845 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2846 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2847 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2851 /* Copy L2L, DW aligned, broadcast */
2853 /* L2L, dw, broadcast */
2854 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2856 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
2859 dst_offset = ib[idx+1];
2860 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2861 dst2_offset = ib[idx+2];
2862 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
2863 src_offset = ib[idx+3];
2864 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2865 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2866 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%ju %lu)\n",
2867 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2870 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2871 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%ju %lu)\n",
2872 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2875 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2876 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%ju %lu)\n",
2877 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2880 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2881 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
2882 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2883 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2884 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
2885 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2888 /* Copy L2T Frame to Field */
2890 if (ib[idx + 2] & (1 << 31)) {
2891 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2894 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2896 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2899 dst_offset = ib[idx+1];
2901 dst2_offset = ib[idx+2];
2903 src_offset = ib[idx+8];
2904 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2905 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2906 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%ju %lu)\n",
2907 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2910 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2911 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
2912 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2915 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2916 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
2917 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2920 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2921 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2922 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2923 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2926 /* Copy L2T/T2L, partial */
2928 /* L2T, T2L partial */
2929 if (p->family < CHIP_CAYMAN) {
2930 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2934 if (ib[idx + 2 ] & (1 << 31)) {
2935 /* tiled src, linear dst */
2936 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2938 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2939 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2941 /* linear src, tiled dst */
2942 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2943 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2945 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2949 /* Copy L2T broadcast */
2951 /* L2T, broadcast */
2952 if (ib[idx + 2] & (1 << 31)) {
2953 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2956 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2958 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2961 dst_offset = ib[idx+1];
2963 dst2_offset = ib[idx+2];
2965 src_offset = ib[idx+8];
2966 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2967 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2968 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
2969 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2972 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2973 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
2974 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2977 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2978 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
2979 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2982 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2983 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2984 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2985 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2988 /* Copy L2T/T2L (tile units) */
2992 if (ib[idx + 2] & (1 << 31)) {
2993 /* tiled src, linear dst */
2994 src_offset = ib[idx+1];
2996 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2998 dst_offset = ib[idx+7];
2999 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3000 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3001 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3003 /* linear src, tiled dst */
3004 src_offset = ib[idx+7];
3005 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3006 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3007 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3009 dst_offset = ib[idx+1];
3011 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3013 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3014 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%ju %lu)\n",
3015 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3018 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3019 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%ju %lu)\n",
3020 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3025 /* Copy T2T, partial (tile units) */
3028 if (p->family < CHIP_CAYMAN) {
3029 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3032 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3033 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3036 /* Copy L2T broadcast (tile units) */
3038 /* L2T, broadcast */
3039 if (ib[idx + 2] & (1 << 31)) {
3040 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3043 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3045 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3048 dst_offset = ib[idx+1];
3050 dst2_offset = ib[idx+2];
3052 src_offset = ib[idx+8];
3053 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
3054 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3055 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
3056 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3059 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3060 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
3061 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3064 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3065 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
3066 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3069 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3070 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3071 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3072 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3076 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib[idx+0]);
3080 case DMA_PACKET_CONSTANT_FILL:
3081 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3083 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3086 dst_offset = radeon_get_ib_value(p, idx+1);
3087 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3088 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3089 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
3090 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
3093 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3094 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3097 case DMA_PACKET_NOP:
3101 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3104 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3106 for (r = 0; r < p->ib->length_dw; r++) {
3107 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
3115 static bool evergreen_vm_reg_valid(u32 reg)
3117 /* context regs are fine */
3121 /* check config regs */
3124 case GRBM_GFX_INDEX:
3125 case CP_STRMOUT_CNTL:
3128 case VGT_VTX_VECT_EJECT_REG:
3129 case VGT_CACHE_INVALIDATION:
3130 case VGT_GS_VERTEX_REUSE:
3131 case VGT_PRIMITIVE_TYPE:
3132 case VGT_INDEX_TYPE:
3133 case VGT_NUM_INDICES:
3134 case VGT_NUM_INSTANCES:
3135 case VGT_COMPUTE_DIM_X:
3136 case VGT_COMPUTE_DIM_Y:
3137 case VGT_COMPUTE_DIM_Z:
3138 case VGT_COMPUTE_START_X:
3139 case VGT_COMPUTE_START_Y:
3140 case VGT_COMPUTE_START_Z:
3141 case VGT_COMPUTE_INDEX:
3142 case VGT_COMPUTE_THREAD_GROUP_SIZE:
3143 case VGT_HS_OFFCHIP_PARAM:
3145 case PA_SU_LINE_STIPPLE_VALUE:
3146 case PA_SC_LINE_STIPPLE_STATE:
3148 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
3149 case SQ_DYN_GPR_SIMD_LOCK_EN:
3151 case SQ_GPR_RESOURCE_MGMT_1:
3152 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
3153 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
3154 case SQ_CONST_MEM_BASE:
3155 case SQ_STATIC_THREAD_MGMT_1:
3156 case SQ_STATIC_THREAD_MGMT_2:
3157 case SQ_STATIC_THREAD_MGMT_3:
3158 case SPI_CONFIG_CNTL:
3159 case SPI_CONFIG_CNTL_1:
3166 case TD_PS_BORDER_COLOR_INDEX:
3167 case TD_PS_BORDER_COLOR_RED:
3168 case TD_PS_BORDER_COLOR_GREEN:
3169 case TD_PS_BORDER_COLOR_BLUE:
3170 case TD_PS_BORDER_COLOR_ALPHA:
3171 case TD_VS_BORDER_COLOR_INDEX:
3172 case TD_VS_BORDER_COLOR_RED:
3173 case TD_VS_BORDER_COLOR_GREEN:
3174 case TD_VS_BORDER_COLOR_BLUE:
3175 case TD_VS_BORDER_COLOR_ALPHA:
3176 case TD_GS_BORDER_COLOR_INDEX:
3177 case TD_GS_BORDER_COLOR_RED:
3178 case TD_GS_BORDER_COLOR_GREEN:
3179 case TD_GS_BORDER_COLOR_BLUE:
3180 case TD_GS_BORDER_COLOR_ALPHA:
3181 case TD_HS_BORDER_COLOR_INDEX:
3182 case TD_HS_BORDER_COLOR_RED:
3183 case TD_HS_BORDER_COLOR_GREEN:
3184 case TD_HS_BORDER_COLOR_BLUE:
3185 case TD_HS_BORDER_COLOR_ALPHA:
3186 case TD_LS_BORDER_COLOR_INDEX:
3187 case TD_LS_BORDER_COLOR_RED:
3188 case TD_LS_BORDER_COLOR_GREEN:
3189 case TD_LS_BORDER_COLOR_BLUE:
3190 case TD_LS_BORDER_COLOR_ALPHA:
3191 case TD_CS_BORDER_COLOR_INDEX:
3192 case TD_CS_BORDER_COLOR_RED:
3193 case TD_CS_BORDER_COLOR_GREEN:
3194 case TD_CS_BORDER_COLOR_BLUE:
3195 case TD_CS_BORDER_COLOR_ALPHA:
3196 case SQ_ESGS_RING_SIZE:
3197 case SQ_GSVS_RING_SIZE:
3198 case SQ_ESTMP_RING_SIZE:
3199 case SQ_GSTMP_RING_SIZE:
3200 case SQ_HSTMP_RING_SIZE:
3201 case SQ_LSTMP_RING_SIZE:
3202 case SQ_PSTMP_RING_SIZE:
3203 case SQ_VSTMP_RING_SIZE:
3204 case SQ_ESGS_RING_ITEMSIZE:
3205 case SQ_ESTMP_RING_ITEMSIZE:
3206 case SQ_GSTMP_RING_ITEMSIZE:
3207 case SQ_GSVS_RING_ITEMSIZE:
3208 case SQ_GS_VERT_ITEMSIZE:
3209 case SQ_GS_VERT_ITEMSIZE_1:
3210 case SQ_GS_VERT_ITEMSIZE_2:
3211 case SQ_GS_VERT_ITEMSIZE_3:
3212 case SQ_GSVS_RING_OFFSET_1:
3213 case SQ_GSVS_RING_OFFSET_2:
3214 case SQ_GSVS_RING_OFFSET_3:
3215 case SQ_HSTMP_RING_ITEMSIZE:
3216 case SQ_LSTMP_RING_ITEMSIZE:
3217 case SQ_PSTMP_RING_ITEMSIZE:
3218 case SQ_VSTMP_RING_ITEMSIZE:
3219 case VGT_TF_RING_SIZE:
3220 case SQ_ESGS_RING_BASE:
3221 case SQ_GSVS_RING_BASE:
3222 case SQ_ESTMP_RING_BASE:
3223 case SQ_GSTMP_RING_BASE:
3224 case SQ_HSTMP_RING_BASE:
3225 case SQ_LSTMP_RING_BASE:
3226 case SQ_PSTMP_RING_BASE:
3227 case SQ_VSTMP_RING_BASE:
3228 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
3229 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
3232 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
3237 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3238 u32 *ib, struct radeon_cs_packet *pkt)
3240 u32 idx = pkt->idx + 1;
3241 u32 idx_value = ib[idx];
3242 u32 start_reg, end_reg, reg, i;
3245 switch (pkt->opcode) {
3247 case PACKET3_SET_BASE:
3248 case PACKET3_CLEAR_STATE:
3249 case PACKET3_INDEX_BUFFER_SIZE:
3250 case PACKET3_DISPATCH_DIRECT:
3251 case PACKET3_DISPATCH_INDIRECT:
3252 case PACKET3_MODE_CONTROL:
3253 case PACKET3_SET_PREDICATION:
3254 case PACKET3_COND_EXEC:
3255 case PACKET3_PRED_EXEC:
3256 case PACKET3_DRAW_INDIRECT:
3257 case PACKET3_DRAW_INDEX_INDIRECT:
3258 case PACKET3_INDEX_BASE:
3259 case PACKET3_DRAW_INDEX_2:
3260 case PACKET3_CONTEXT_CONTROL:
3261 case PACKET3_DRAW_INDEX_OFFSET:
3262 case PACKET3_INDEX_TYPE:
3263 case PACKET3_DRAW_INDEX:
3264 case PACKET3_DRAW_INDEX_AUTO:
3265 case PACKET3_DRAW_INDEX_IMMD:
3266 case PACKET3_NUM_INSTANCES:
3267 case PACKET3_DRAW_INDEX_MULTI_AUTO:
3268 case PACKET3_STRMOUT_BUFFER_UPDATE:
3269 case PACKET3_DRAW_INDEX_OFFSET_2:
3270 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
3271 case PACKET3_MPEG_INDEX:
3272 case PACKET3_WAIT_REG_MEM:
3273 case PACKET3_MEM_WRITE:
3274 case PACKET3_SURFACE_SYNC:
3275 case PACKET3_EVENT_WRITE:
3276 case PACKET3_EVENT_WRITE_EOP:
3277 case PACKET3_EVENT_WRITE_EOS:
3278 case PACKET3_SET_CONTEXT_REG:
3279 case PACKET3_SET_BOOL_CONST:
3280 case PACKET3_SET_LOOP_CONST:
3281 case PACKET3_SET_RESOURCE:
3282 case PACKET3_SET_SAMPLER:
3283 case PACKET3_SET_CTL_CONST:
3284 case PACKET3_SET_RESOURCE_OFFSET:
3285 case PACKET3_SET_CONTEXT_REG_INDIRECT:
3286 case PACKET3_SET_RESOURCE_INDIRECT:
3287 case CAYMAN_PACKET3_DEALLOC_STATE:
3289 case PACKET3_COND_WRITE:
3290 if (idx_value & 0x100) {
3291 reg = ib[idx + 5] * 4;
3292 if (!evergreen_vm_reg_valid(reg))
3296 case PACKET3_COPY_DW:
3297 if (idx_value & 0x2) {
3298 reg = ib[idx + 3] * 4;
3299 if (!evergreen_vm_reg_valid(reg))
3303 case PACKET3_SET_CONFIG_REG:
3304 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
3305 end_reg = 4 * pkt->count + start_reg - 4;
3306 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
3307 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
3308 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
3309 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
3312 for (i = 0; i < pkt->count; i++) {
3313 reg = start_reg + (4 * i);
3314 if (!evergreen_vm_reg_valid(reg))
3318 case PACKET3_CP_DMA:
3319 command = ib[idx + 4];
3321 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3322 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3323 ((((info & 0x00300000) >> 20) == 0) &&
3324 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3325 ((((info & 0x60000000) >> 29) == 0) &&
3326 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3327 /* non mem to mem copies requires dw aligned count */
3328 if ((command & 0x1fffff) % 4) {
3329 DRM_ERROR("CP DMA command requires dw count alignment\n");
3333 if (command & PACKET3_CP_DMA_CMD_SAS) {
3334 /* src address space is register */
3335 if (((info & 0x60000000) >> 29) == 0) {
3336 start_reg = idx_value << 2;
3337 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3339 if (!evergreen_vm_reg_valid(reg)) {
3340 DRM_ERROR("CP DMA Bad SRC register\n");
3344 for (i = 0; i < (command & 0x1fffff); i++) {
3345 reg = start_reg + (4 * i);
3346 if (!evergreen_vm_reg_valid(reg)) {
3347 DRM_ERROR("CP DMA Bad SRC register\n");
3354 if (command & PACKET3_CP_DMA_CMD_DAS) {
3355 /* dst address space is register */
3356 if (((info & 0x00300000) >> 20) == 0) {
3357 start_reg = ib[idx + 2];
3358 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3360 if (!evergreen_vm_reg_valid(reg)) {
3361 DRM_ERROR("CP DMA Bad DST register\n");
3365 for (i = 0; i < (command & 0x1fffff); i++) {
3366 reg = start_reg + (4 * i);
3367 if (!evergreen_vm_reg_valid(reg)) {
3368 DRM_ERROR("CP DMA Bad DST register\n");
3382 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3386 struct radeon_cs_packet pkt;
3390 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
3391 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
3394 case RADEON_PACKET_TYPE0:
3395 dev_err(rdev->dev, "Packet0 not allowed!\n");
3398 case RADEON_PACKET_TYPE2:
3401 case RADEON_PACKET_TYPE3:
3402 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3403 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3404 idx += pkt.count + 2;
3407 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
3413 } while (idx < ib->length_dw);
3419 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3420 * @rdev: radeon_device pointer
3421 * @ib: radeon_ib pointer
3423 * Parses the DMA IB from the VM CS ioctl
3424 * checks for errors. (Cayman-SI)
3425 * Returns 0 for success and an error on failure.
3427 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3430 u32 header, cmd, count, sub_cmd;
3433 header = ib->ptr[idx];
3434 cmd = GET_DMA_CMD(header);
3435 count = GET_DMA_COUNT(header);
3436 sub_cmd = GET_DMA_SUB_CMD(header);
3439 case DMA_PACKET_WRITE:
3450 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
3454 case DMA_PACKET_COPY:
3456 /* Copy L2L, DW aligned */
3464 /* Copy L2L, byte aligned */
3468 /* Copy L2L, partial */
3472 /* Copy L2L, DW aligned, broadcast */
3476 /* Copy L2T Frame to Field */
3480 /* Copy L2T/T2L, partial */
3484 /* Copy L2T broadcast */
3488 /* Copy L2T/T2L (tile units) */
3492 /* Copy T2T, partial (tile units) */
3496 /* Copy L2T broadcast (tile units) */
3501 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
3505 case DMA_PACKET_CONSTANT_FILL:
3508 case DMA_PACKET_NOP:
3512 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3515 } while (idx < ib->length_dw);