2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 * $FreeBSD: head/sys/dev/drm2/radeon/evergreen_cs.c 254885 2013-08-25 19:37:15Z dumbbell $
33 #include "radeon_asic.h"
34 #include "evergreend.h"
35 #include "evergreen_reg_safe.h"
36 #include "cayman_reg_safe.h"
38 #define MAX(a,b) (((a)>(b))?(a):(b))
39 #define MIN(a,b) (((a)<(b))?(a):(b))
41 struct evergreen_cs_track {
47 u32 nsamples; /* unused */
48 struct radeon_bo *cb_color_bo[12];
49 u32 cb_color_bo_offset[12];
50 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
51 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
52 u32 cb_color_info[12];
53 u32 cb_color_view[12];
54 u32 cb_color_pitch[12];
55 u32 cb_color_slice[12];
56 u32 cb_color_slice_idx[12];
57 u32 cb_color_attrib[12];
58 u32 cb_color_cmask_slice[8];/* unused */
59 u32 cb_color_fmask_slice[8];/* unused */
61 u32 cb_shader_mask; /* unused */
62 u32 vgt_strmout_config;
63 u32 vgt_strmout_buffer_config;
64 struct radeon_bo *vgt_strmout_bo[4];
65 u32 vgt_strmout_bo_offset[4];
66 u32 vgt_strmout_size[4];
73 u32 db_z_write_offset;
74 struct radeon_bo *db_z_read_bo;
75 struct radeon_bo *db_z_write_bo;
78 u32 db_s_write_offset;
79 struct radeon_bo *db_s_read_bo;
80 struct radeon_bo *db_s_write_bo;
81 bool sx_misc_kill_all_prims;
87 struct radeon_bo *htile_bo;
90 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
92 if (tiling_flags & RADEON_TILING_MACRO)
93 return ARRAY_2D_TILED_THIN1;
94 else if (tiling_flags & RADEON_TILING_MICRO)
95 return ARRAY_1D_TILED_THIN1;
97 return ARRAY_LINEAR_GENERAL;
100 static u32 evergreen_cs_get_num_banks(u32 nbanks)
104 return ADDR_SURF_2_BANK;
106 return ADDR_SURF_4_BANK;
109 return ADDR_SURF_8_BANK;
111 return ADDR_SURF_16_BANK;
115 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
119 for (i = 0; i < 8; i++) {
120 track->cb_color_fmask_bo[i] = NULL;
121 track->cb_color_cmask_bo[i] = NULL;
122 track->cb_color_cmask_slice[i] = 0;
123 track->cb_color_fmask_slice[i] = 0;
126 for (i = 0; i < 12; i++) {
127 track->cb_color_bo[i] = NULL;
128 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
129 track->cb_color_info[i] = 0;
130 track->cb_color_view[i] = 0xFFFFFFFF;
131 track->cb_color_pitch[i] = 0;
132 track->cb_color_slice[i] = 0xfffffff;
133 track->cb_color_slice_idx[i] = 0;
135 track->cb_target_mask = 0xFFFFFFFF;
136 track->cb_shader_mask = 0xFFFFFFFF;
137 track->cb_dirty = true;
139 track->db_depth_slice = 0xffffffff;
140 track->db_depth_view = 0xFFFFC000;
141 track->db_depth_size = 0xFFFFFFFF;
142 track->db_depth_control = 0xFFFFFFFF;
143 track->db_z_info = 0xFFFFFFFF;
144 track->db_z_read_offset = 0xFFFFFFFF;
145 track->db_z_write_offset = 0xFFFFFFFF;
146 track->db_z_read_bo = NULL;
147 track->db_z_write_bo = NULL;
148 track->db_s_info = 0xFFFFFFFF;
149 track->db_s_read_offset = 0xFFFFFFFF;
150 track->db_s_write_offset = 0xFFFFFFFF;
151 track->db_s_read_bo = NULL;
152 track->db_s_write_bo = NULL;
153 track->db_dirty = true;
154 track->htile_bo = NULL;
155 track->htile_offset = 0xFFFFFFFF;
156 track->htile_surface = 0;
158 for (i = 0; i < 4; i++) {
159 track->vgt_strmout_size[i] = 0;
160 track->vgt_strmout_bo[i] = NULL;
161 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
163 track->streamout_dirty = true;
164 track->sx_misc_kill_all_prims = false;
168 /* value gathered from cs */
184 unsigned long base_align;
187 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
188 struct eg_surface *surf,
191 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
192 surf->base_align = surf->bpe;
198 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
199 struct eg_surface *surf,
202 struct evergreen_cs_track *track = p->track;
205 palign = MAX(64, track->group_size / surf->bpe);
206 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
207 surf->base_align = track->group_size;
208 surf->palign = palign;
210 if (surf->nbx & (palign - 1)) {
212 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
213 __func__, __LINE__, prefix, surf->nbx, palign);
220 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
221 struct eg_surface *surf,
224 struct evergreen_cs_track *track = p->track;
227 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
228 palign = MAX(8, palign);
229 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
230 surf->base_align = track->group_size;
231 surf->palign = palign;
233 if ((surf->nbx & (palign - 1))) {
235 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
236 __func__, __LINE__, prefix, surf->nbx, palign,
237 track->group_size, surf->bpe, surf->nsamples);
241 if ((surf->nby & (8 - 1))) {
243 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
244 __func__, __LINE__, prefix, surf->nby);
251 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
252 struct eg_surface *surf,
255 struct evergreen_cs_track *track = p->track;
256 unsigned palign, halign, tileb, slice_pt;
257 unsigned mtile_pr, mtile_ps, mtileb;
259 tileb = 64 * surf->bpe * surf->nsamples;
261 if (tileb > surf->tsplit) {
262 slice_pt = tileb / surf->tsplit;
264 tileb = tileb / slice_pt;
265 /* macro tile width & height */
266 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
267 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
268 mtileb = (palign / 8) * (halign / 8) * tileb;
269 mtile_pr = surf->nbx / palign;
270 mtile_ps = (mtile_pr * surf->nby) / halign;
271 surf->layer_size = mtile_ps * mtileb * slice_pt;
272 surf->base_align = (palign / 8) * (halign / 8) * tileb;
273 surf->palign = palign;
274 surf->halign = halign;
276 if ((surf->nbx & (palign - 1))) {
278 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
279 __func__, __LINE__, prefix, surf->nbx, palign);
283 if ((surf->nby & (halign - 1))) {
285 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
286 __func__, __LINE__, prefix, surf->nby, halign);
294 static int evergreen_surface_check(struct radeon_cs_parser *p,
295 struct eg_surface *surf,
298 /* some common value computed here */
299 surf->bpe = r600_fmt_get_blocksize(surf->format);
301 switch (surf->mode) {
302 case ARRAY_LINEAR_GENERAL:
303 return evergreen_surface_check_linear(p, surf, prefix);
304 case ARRAY_LINEAR_ALIGNED:
305 return evergreen_surface_check_linear_aligned(p, surf, prefix);
306 case ARRAY_1D_TILED_THIN1:
307 return evergreen_surface_check_1d(p, surf, prefix);
308 case ARRAY_2D_TILED_THIN1:
309 return evergreen_surface_check_2d(p, surf, prefix);
311 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
312 __func__, __LINE__, prefix, surf->mode);
318 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
319 struct eg_surface *surf,
322 switch (surf->mode) {
323 case ARRAY_2D_TILED_THIN1:
325 case ARRAY_LINEAR_GENERAL:
326 case ARRAY_LINEAR_ALIGNED:
327 case ARRAY_1D_TILED_THIN1:
330 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
331 __func__, __LINE__, prefix, surf->mode);
335 switch (surf->nbanks) {
336 case 0: surf->nbanks = 2; break;
337 case 1: surf->nbanks = 4; break;
338 case 2: surf->nbanks = 8; break;
339 case 3: surf->nbanks = 16; break;
341 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
342 __func__, __LINE__, prefix, surf->nbanks);
345 switch (surf->bankw) {
346 case 0: surf->bankw = 1; break;
347 case 1: surf->bankw = 2; break;
348 case 2: surf->bankw = 4; break;
349 case 3: surf->bankw = 8; break;
351 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
352 __func__, __LINE__, prefix, surf->bankw);
355 switch (surf->bankh) {
356 case 0: surf->bankh = 1; break;
357 case 1: surf->bankh = 2; break;
358 case 2: surf->bankh = 4; break;
359 case 3: surf->bankh = 8; break;
361 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
362 __func__, __LINE__, prefix, surf->bankh);
365 switch (surf->mtilea) {
366 case 0: surf->mtilea = 1; break;
367 case 1: surf->mtilea = 2; break;
368 case 2: surf->mtilea = 4; break;
369 case 3: surf->mtilea = 8; break;
371 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
372 __func__, __LINE__, prefix, surf->mtilea);
375 switch (surf->tsplit) {
376 case 0: surf->tsplit = 64; break;
377 case 1: surf->tsplit = 128; break;
378 case 2: surf->tsplit = 256; break;
379 case 3: surf->tsplit = 512; break;
380 case 4: surf->tsplit = 1024; break;
381 case 5: surf->tsplit = 2048; break;
382 case 6: surf->tsplit = 4096; break;
384 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
385 __func__, __LINE__, prefix, surf->tsplit);
391 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
393 struct evergreen_cs_track *track = p->track;
394 struct eg_surface surf;
395 unsigned pitch, slice, mslice;
396 unsigned long offset;
399 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
400 pitch = track->cb_color_pitch[id];
401 slice = track->cb_color_slice[id];
402 surf.nbx = (pitch + 1) * 8;
403 surf.nby = ((slice + 1) * 64) / surf.nbx;
404 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
405 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
406 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
407 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
408 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
409 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
410 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
413 if (!r600_fmt_is_valid_color(surf.format)) {
414 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
415 __func__, __LINE__, surf.format,
416 id, track->cb_color_info[id]);
420 r = evergreen_surface_value_conv_check(p, &surf, "cb");
425 r = evergreen_surface_check(p, &surf, "cb");
427 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
428 __func__, __LINE__, id, track->cb_color_pitch[id],
429 track->cb_color_slice[id], track->cb_color_attrib[id],
430 track->cb_color_info[id]);
434 offset = track->cb_color_bo_offset[id] << 8;
435 if (offset & (surf.base_align - 1)) {
436 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
437 __func__, __LINE__, id, offset, surf.base_align);
441 offset += surf.layer_size * mslice;
442 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
443 /* old ddx are broken they allocate bo with w*h*bpp but
444 * program slice with ALIGN(h, 8), catch this and patch
448 volatile u32 *ib = p->ib.ptr;
449 unsigned long tmp, nby, bsize, size, min = 0;
451 /* find the height the ddx wants */
455 bsize = radeon_bo_size(track->cb_color_bo[id]);
456 tmp = track->cb_color_bo_offset[id] << 8;
457 for (nby = surf.nby; nby > min; nby--) {
458 size = nby * surf.nbx * surf.bpe * surf.nsamples;
459 if ((tmp + size * mslice) <= bsize) {
465 slice = ((nby * surf.nbx) / 64) - 1;
466 if (!evergreen_surface_check(p, &surf, "cb")) {
467 /* check if this one works */
468 tmp += surf.layer_size * mslice;
470 ib[track->cb_color_slice_idx[id]] = slice;
476 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
477 "offset %d, max layer %d, bo size %ld, slice %d)\n",
478 __func__, __LINE__, id, surf.layer_size,
479 track->cb_color_bo_offset[id] << 8, mslice,
480 radeon_bo_size(track->cb_color_bo[id]), slice);
481 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
482 __func__, __LINE__, surf.nbx, surf.nby,
483 surf.mode, surf.bpe, surf.nsamples,
484 surf.bankw, surf.bankh,
485 surf.tsplit, surf.mtilea);
493 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
494 unsigned nbx, unsigned nby)
496 struct evergreen_cs_track *track = p->track;
499 if (track->htile_bo == NULL) {
500 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
501 __func__, __LINE__, track->db_z_info);
505 if (G_028ABC_LINEAR(track->htile_surface)) {
506 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
507 nbx = roundup(nbx, 16 * 8);
508 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
509 nby = roundup(nby, track->npipes * 8);
511 /* always assume 8x8 htile */
512 /* align is htile align * 8, htile align vary according to
513 * number of pipe and tile width and nby
515 switch (track->npipes) {
517 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
518 nbx = roundup(nbx, 64 * 8);
519 nby = roundup(nby, 64 * 8);
522 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
523 nbx = roundup(nbx, 64 * 8);
524 nby = roundup(nby, 32 * 8);
527 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
528 nbx = roundup(nbx, 32 * 8);
529 nby = roundup(nby, 32 * 8);
532 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
533 nbx = roundup(nbx, 32 * 8);
534 nby = roundup(nby, 16 * 8);
537 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
538 __func__, __LINE__, track->npipes);
542 /* compute number of htile */
545 /* size must be aligned on npipes * 2K boundary */
546 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
547 size += track->htile_offset;
549 if (size > radeon_bo_size(track->htile_bo)) {
550 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
551 __func__, __LINE__, radeon_bo_size(track->htile_bo),
558 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
560 struct evergreen_cs_track *track = p->track;
561 struct eg_surface surf;
562 unsigned pitch, slice, mslice;
563 unsigned long offset;
566 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
567 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
568 slice = track->db_depth_slice;
569 surf.nbx = (pitch + 1) * 8;
570 surf.nby = ((slice + 1) * 64) / surf.nbx;
571 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
572 surf.format = G_028044_FORMAT(track->db_s_info);
573 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
574 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
575 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
576 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
577 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
580 if (surf.format != 1) {
581 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
582 __func__, __LINE__, surf.format);
585 /* replace by color format so we can use same code */
586 surf.format = V_028C70_COLOR_8;
588 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
593 r = evergreen_surface_check(p, &surf, NULL);
595 /* old userspace doesn't compute proper depth/stencil alignment
596 * check that alignment against a bigger byte per elements and
597 * only report if that alignment is wrong too.
599 surf.format = V_028C70_COLOR_8_8_8_8;
600 r = evergreen_surface_check(p, &surf, "stencil");
602 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
603 __func__, __LINE__, track->db_depth_size,
604 track->db_depth_slice, track->db_s_info, track->db_z_info);
609 offset = track->db_s_read_offset << 8;
610 if (offset & (surf.base_align - 1)) {
611 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
612 __func__, __LINE__, offset, surf.base_align);
615 offset += surf.layer_size * mslice;
616 if (offset > radeon_bo_size(track->db_s_read_bo)) {
617 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
618 "offset %ld, max layer %d, bo size %ld)\n",
619 __func__, __LINE__, surf.layer_size,
620 (unsigned long)track->db_s_read_offset << 8, mslice,
621 radeon_bo_size(track->db_s_read_bo));
622 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
623 __func__, __LINE__, track->db_depth_size,
624 track->db_depth_slice, track->db_s_info, track->db_z_info);
628 offset = track->db_s_write_offset << 8;
629 if (offset & (surf.base_align - 1)) {
630 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
631 __func__, __LINE__, offset, surf.base_align);
634 offset += surf.layer_size * mslice;
635 if (offset > radeon_bo_size(track->db_s_write_bo)) {
636 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
637 "offset %ld, max layer %d, bo size %ld)\n",
638 __func__, __LINE__, surf.layer_size,
639 (unsigned long)track->db_s_write_offset << 8, mslice,
640 radeon_bo_size(track->db_s_write_bo));
645 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
646 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
655 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
657 struct evergreen_cs_track *track = p->track;
658 struct eg_surface surf;
659 unsigned pitch, slice, mslice;
660 unsigned long offset;
663 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
664 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
665 slice = track->db_depth_slice;
666 surf.nbx = (pitch + 1) * 8;
667 surf.nby = ((slice + 1) * 64) / surf.nbx;
668 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
669 surf.format = G_028040_FORMAT(track->db_z_info);
670 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
671 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
672 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
673 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
674 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
677 switch (surf.format) {
679 surf.format = V_028C70_COLOR_16;
682 case V_028040_Z_32_FLOAT:
683 surf.format = V_028C70_COLOR_8_8_8_8;
686 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
687 __func__, __LINE__, surf.format);
691 r = evergreen_surface_value_conv_check(p, &surf, "depth");
693 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
694 __func__, __LINE__, track->db_depth_size,
695 track->db_depth_slice, track->db_z_info);
699 r = evergreen_surface_check(p, &surf, "depth");
701 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
702 __func__, __LINE__, track->db_depth_size,
703 track->db_depth_slice, track->db_z_info);
707 offset = track->db_z_read_offset << 8;
708 if (offset & (surf.base_align - 1)) {
709 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
710 __func__, __LINE__, offset, surf.base_align);
713 offset += surf.layer_size * mslice;
714 if (offset > radeon_bo_size(track->db_z_read_bo)) {
715 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
716 "offset %ld, max layer %d, bo size %ld)\n",
717 __func__, __LINE__, surf.layer_size,
718 (unsigned long)track->db_z_read_offset << 8, mslice,
719 radeon_bo_size(track->db_z_read_bo));
723 offset = track->db_z_write_offset << 8;
724 if (offset & (surf.base_align - 1)) {
725 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
726 __func__, __LINE__, offset, surf.base_align);
729 offset += surf.layer_size * mslice;
730 if (offset > radeon_bo_size(track->db_z_write_bo)) {
731 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
732 "offset %ld, max layer %d, bo size %ld)\n",
733 __func__, __LINE__, surf.layer_size,
734 (unsigned long)track->db_z_write_offset << 8, mslice,
735 radeon_bo_size(track->db_z_write_bo));
740 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
741 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
750 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
751 struct radeon_bo *texture,
752 struct radeon_bo *mipmap,
755 struct eg_surface surf;
756 unsigned long toffset, moffset;
757 unsigned dim, llevel, mslice, width, height, depth, i;
761 texdw[0] = radeon_get_ib_value(p, idx + 0);
762 texdw[1] = radeon_get_ib_value(p, idx + 1);
763 texdw[2] = radeon_get_ib_value(p, idx + 2);
764 texdw[3] = radeon_get_ib_value(p, idx + 3);
765 texdw[4] = radeon_get_ib_value(p, idx + 4);
766 texdw[5] = radeon_get_ib_value(p, idx + 5);
767 texdw[6] = radeon_get_ib_value(p, idx + 6);
768 texdw[7] = radeon_get_ib_value(p, idx + 7);
769 dim = G_030000_DIM(texdw[0]);
770 llevel = G_030014_LAST_LEVEL(texdw[5]);
771 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
772 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
773 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
774 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
775 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
776 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
777 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
778 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
779 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
780 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
781 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
782 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
783 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
784 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
786 toffset = texdw[2] << 8;
787 moffset = texdw[3] << 8;
789 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
790 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
791 __func__, __LINE__, surf.format);
795 case V_030000_SQ_TEX_DIM_1D:
796 case V_030000_SQ_TEX_DIM_2D:
797 case V_030000_SQ_TEX_DIM_CUBEMAP:
798 case V_030000_SQ_TEX_DIM_1D_ARRAY:
799 case V_030000_SQ_TEX_DIM_2D_ARRAY:
802 case V_030000_SQ_TEX_DIM_2D_MSAA:
803 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
804 surf.nsamples = 1 << llevel;
808 case V_030000_SQ_TEX_DIM_3D:
811 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
812 __func__, __LINE__, dim);
816 r = evergreen_surface_value_conv_check(p, &surf, "texture");
822 evergreen_surface_check(p, &surf, NULL);
823 surf.nby = roundup(surf.nby, surf.halign);
825 r = evergreen_surface_check(p, &surf, "texture");
827 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
828 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
829 texdw[5], texdw[6], texdw[7]);
833 /* check texture size */
834 if (toffset & (surf.base_align - 1)) {
835 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
836 __func__, __LINE__, toffset, surf.base_align);
839 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
840 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
841 __func__, __LINE__, moffset, surf.base_align);
844 if (dim == SQ_TEX_DIM_3D) {
845 toffset += surf.layer_size * depth;
847 toffset += surf.layer_size * mslice;
849 if (toffset > radeon_bo_size(texture)) {
850 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
851 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
852 __func__, __LINE__, surf.layer_size,
853 (unsigned long)texdw[2] << 8, mslice,
854 depth, radeon_bo_size(texture),
861 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
865 return 0; /* everything's ok */
869 /* check mipmap size */
870 for (i = 1; i <= llevel; i++) {
873 w = r600_mip_minify(width, i);
874 h = r600_mip_minify(height, i);
875 d = r600_mip_minify(depth, i);
876 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
877 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
880 case ARRAY_2D_TILED_THIN1:
881 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
882 surf.mode = ARRAY_1D_TILED_THIN1;
884 /* recompute alignment */
885 evergreen_surface_check(p, &surf, NULL);
887 case ARRAY_LINEAR_GENERAL:
888 case ARRAY_LINEAR_ALIGNED:
889 case ARRAY_1D_TILED_THIN1:
892 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
893 __func__, __LINE__, surf.mode);
896 surf.nbx = roundup(surf.nbx, surf.palign);
897 surf.nby = roundup(surf.nby, surf.halign);
899 r = evergreen_surface_check(p, &surf, "mipmap");
904 if (dim == SQ_TEX_DIM_3D) {
905 moffset += surf.layer_size * d;
907 moffset += surf.layer_size * mslice;
909 if (moffset > radeon_bo_size(mipmap)) {
910 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
911 "offset %ld, coffset %ld, max layer %d, depth %d, "
912 "bo size %ld) level0 (%d %d %d)\n",
913 __func__, __LINE__, i, surf.layer_size,
914 (unsigned long)texdw[3] << 8, moffset, mslice,
915 d, radeon_bo_size(mipmap),
916 width, height, depth);
917 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
918 __func__, __LINE__, surf.nbx, surf.nby,
919 surf.mode, surf.bpe, surf.nsamples,
920 surf.bankw, surf.bankh,
921 surf.tsplit, surf.mtilea);
929 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
931 struct evergreen_cs_track *track = p->track;
934 unsigned buffer_mask = 0;
936 /* check streamout */
937 if (track->streamout_dirty && track->vgt_strmout_config) {
938 for (i = 0; i < 4; i++) {
939 if (track->vgt_strmout_config & (1 << i)) {
940 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
944 for (i = 0; i < 4; i++) {
945 if (buffer_mask & (1 << i)) {
946 if (track->vgt_strmout_bo[i]) {
947 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
948 (u64)track->vgt_strmout_size[i];
949 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
950 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
951 i, (uintmax_t)offset,
952 radeon_bo_size(track->vgt_strmout_bo[i]));
956 dev_warn(p->dev, "No buffer for streamout %d\n", i);
961 track->streamout_dirty = false;
964 if (track->sx_misc_kill_all_prims)
967 /* check that we have a cb for each enabled target
969 if (track->cb_dirty) {
970 tmp = track->cb_target_mask;
971 for (i = 0; i < 8; i++) {
972 if ((tmp >> (i * 4)) & 0xF) {
973 /* at least one component is enabled */
974 if (track->cb_color_bo[i] == NULL) {
975 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
976 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
980 r = evergreen_cs_track_validate_cb(p, i);
986 track->cb_dirty = false;
989 if (track->db_dirty) {
990 /* Check stencil buffer */
991 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
992 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
993 r = evergreen_cs_track_validate_stencil(p);
997 /* Check depth buffer */
998 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
999 G_028800_Z_ENABLE(track->db_depth_control)) {
1000 r = evergreen_cs_track_validate_depth(p);
1004 track->db_dirty = false;
1011 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
1012 * @parser: parser structure holding parsing context.
1014 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1015 * Real work is done by r600_cs_common_vline_parse function.
1016 * Here we just set up ASIC-specific register table and call
1017 * the common implementation function.
1019 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1022 static uint32_t vline_start_end[6] = {
1023 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1024 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1025 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1026 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1027 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1028 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1030 static uint32_t vline_status[6] = {
1031 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1032 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1033 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1034 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1035 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1036 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1039 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
1042 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1043 struct radeon_cs_packet *pkt,
1044 unsigned idx, unsigned reg)
1049 case EVERGREEN_VLINE_START_END:
1050 r = evergreen_cs_packet_parse_vline(p);
1052 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1058 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n",
1065 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1066 struct radeon_cs_packet *pkt)
1074 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1075 r = evergreen_packet0_check(p, pkt, idx, reg);
1084 * evergreen_cs_check_reg() - check if register is authorized or not
1085 * @parser: parser structure holding parsing context
1086 * @reg: register we are testing
1087 * @idx: index into the cs buffer
1089 * This function will test against evergreen_reg_safe_bm and return 0
1090 * if register is safe. If register is not flag as safe this function
1091 * will test it against a list of register needind special handling.
1093 static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1095 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1096 struct radeon_cs_reloc *reloc;
1101 if (p->rdev->family >= CHIP_CAYMAN)
1102 last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
1104 last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
1107 if (i >= last_reg) {
1108 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1111 m = 1 << ((reg >> 2) & 31);
1112 if (p->rdev->family >= CHIP_CAYMAN) {
1113 if (!(cayman_reg_safe_bm[i] & m))
1116 if (!(evergreen_reg_safe_bm[i] & m))
1121 /* force following reg to 0 in an attempt to disable out buffer
1122 * which will need us to better understand how it works to perform
1123 * security check on it (Jerome)
1125 case SQ_ESGS_RING_SIZE:
1126 case SQ_GSVS_RING_SIZE:
1127 case SQ_ESTMP_RING_SIZE:
1128 case SQ_GSTMP_RING_SIZE:
1129 case SQ_HSTMP_RING_SIZE:
1130 case SQ_LSTMP_RING_SIZE:
1131 case SQ_PSTMP_RING_SIZE:
1132 case SQ_VSTMP_RING_SIZE:
1133 case SQ_ESGS_RING_ITEMSIZE:
1134 case SQ_ESTMP_RING_ITEMSIZE:
1135 case SQ_GSTMP_RING_ITEMSIZE:
1136 case SQ_GSVS_RING_ITEMSIZE:
1137 case SQ_GS_VERT_ITEMSIZE:
1138 case SQ_GS_VERT_ITEMSIZE_1:
1139 case SQ_GS_VERT_ITEMSIZE_2:
1140 case SQ_GS_VERT_ITEMSIZE_3:
1141 case SQ_GSVS_RING_OFFSET_1:
1142 case SQ_GSVS_RING_OFFSET_2:
1143 case SQ_GSVS_RING_OFFSET_3:
1144 case SQ_HSTMP_RING_ITEMSIZE:
1145 case SQ_LSTMP_RING_ITEMSIZE:
1146 case SQ_PSTMP_RING_ITEMSIZE:
1147 case SQ_VSTMP_RING_ITEMSIZE:
1148 case VGT_TF_RING_SIZE:
1149 /* get value to populate the IB don't remove */
1150 /*tmp =radeon_get_ib_value(p, idx);
1153 case SQ_ESGS_RING_BASE:
1154 case SQ_GSVS_RING_BASE:
1155 case SQ_ESTMP_RING_BASE:
1156 case SQ_GSTMP_RING_BASE:
1157 case SQ_HSTMP_RING_BASE:
1158 case SQ_LSTMP_RING_BASE:
1159 case SQ_PSTMP_RING_BASE:
1160 case SQ_VSTMP_RING_BASE:
1161 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1163 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1167 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1169 case DB_DEPTH_CONTROL:
1170 track->db_depth_control = radeon_get_ib_value(p, idx);
1171 track->db_dirty = true;
1173 case CAYMAN_DB_EQAA:
1174 if (p->rdev->family < CHIP_CAYMAN) {
1175 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1180 case CAYMAN_DB_DEPTH_INFO:
1181 if (p->rdev->family < CHIP_CAYMAN) {
1182 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1188 track->db_z_info = radeon_get_ib_value(p, idx);
1189 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1190 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1192 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1196 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1197 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1198 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1199 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1200 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1201 unsigned bankw, bankh, mtaspect, tile_split;
1203 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1204 &bankw, &bankh, &mtaspect,
1206 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1207 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1208 DB_BANK_WIDTH(bankw) |
1209 DB_BANK_HEIGHT(bankh) |
1210 DB_MACRO_TILE_ASPECT(mtaspect);
1213 track->db_dirty = true;
1215 case DB_STENCIL_INFO:
1216 track->db_s_info = radeon_get_ib_value(p, idx);
1217 track->db_dirty = true;
1220 track->db_depth_view = radeon_get_ib_value(p, idx);
1221 track->db_dirty = true;
1224 track->db_depth_size = radeon_get_ib_value(p, idx);
1225 track->db_dirty = true;
1227 case R_02805C_DB_DEPTH_SLICE:
1228 track->db_depth_slice = radeon_get_ib_value(p, idx);
1229 track->db_dirty = true;
1231 case DB_Z_READ_BASE:
1232 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1234 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1238 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1239 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1240 track->db_z_read_bo = reloc->robj;
1241 track->db_dirty = true;
1243 case DB_Z_WRITE_BASE:
1244 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1246 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1250 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1251 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1252 track->db_z_write_bo = reloc->robj;
1253 track->db_dirty = true;
1255 case DB_STENCIL_READ_BASE:
1256 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1258 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1262 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1263 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1264 track->db_s_read_bo = reloc->robj;
1265 track->db_dirty = true;
1267 case DB_STENCIL_WRITE_BASE:
1268 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1270 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1274 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1275 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1276 track->db_s_write_bo = reloc->robj;
1277 track->db_dirty = true;
1279 case VGT_STRMOUT_CONFIG:
1280 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1281 track->streamout_dirty = true;
1283 case VGT_STRMOUT_BUFFER_CONFIG:
1284 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1285 track->streamout_dirty = true;
1287 case VGT_STRMOUT_BUFFER_BASE_0:
1288 case VGT_STRMOUT_BUFFER_BASE_1:
1289 case VGT_STRMOUT_BUFFER_BASE_2:
1290 case VGT_STRMOUT_BUFFER_BASE_3:
1291 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1293 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1297 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1298 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1299 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1300 track->vgt_strmout_bo[tmp] = reloc->robj;
1301 track->streamout_dirty = true;
1303 case VGT_STRMOUT_BUFFER_SIZE_0:
1304 case VGT_STRMOUT_BUFFER_SIZE_1:
1305 case VGT_STRMOUT_BUFFER_SIZE_2:
1306 case VGT_STRMOUT_BUFFER_SIZE_3:
1307 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1308 /* size in register is DWs, convert to bytes */
1309 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1310 track->streamout_dirty = true;
1313 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1315 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1319 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1320 case CB_TARGET_MASK:
1321 track->cb_target_mask = radeon_get_ib_value(p, idx);
1322 track->cb_dirty = true;
1324 case CB_SHADER_MASK:
1325 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1326 track->cb_dirty = true;
1328 case PA_SC_AA_CONFIG:
1329 if (p->rdev->family >= CHIP_CAYMAN) {
1330 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1334 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1335 track->nsamples = 1 << tmp;
1337 case CAYMAN_PA_SC_AA_CONFIG:
1338 if (p->rdev->family < CHIP_CAYMAN) {
1339 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1343 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1344 track->nsamples = 1 << tmp;
1346 case CB_COLOR0_VIEW:
1347 case CB_COLOR1_VIEW:
1348 case CB_COLOR2_VIEW:
1349 case CB_COLOR3_VIEW:
1350 case CB_COLOR4_VIEW:
1351 case CB_COLOR5_VIEW:
1352 case CB_COLOR6_VIEW:
1353 case CB_COLOR7_VIEW:
1354 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1355 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1356 track->cb_dirty = true;
1358 case CB_COLOR8_VIEW:
1359 case CB_COLOR9_VIEW:
1360 case CB_COLOR10_VIEW:
1361 case CB_COLOR11_VIEW:
1362 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1363 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1364 track->cb_dirty = true;
1366 case CB_COLOR0_INFO:
1367 case CB_COLOR1_INFO:
1368 case CB_COLOR2_INFO:
1369 case CB_COLOR3_INFO:
1370 case CB_COLOR4_INFO:
1371 case CB_COLOR5_INFO:
1372 case CB_COLOR6_INFO:
1373 case CB_COLOR7_INFO:
1374 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1375 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1376 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1377 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1379 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1383 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1384 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1386 track->cb_dirty = true;
1388 case CB_COLOR8_INFO:
1389 case CB_COLOR9_INFO:
1390 case CB_COLOR10_INFO:
1391 case CB_COLOR11_INFO:
1392 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1393 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1394 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1395 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1397 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1401 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1402 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1404 track->cb_dirty = true;
1406 case CB_COLOR0_PITCH:
1407 case CB_COLOR1_PITCH:
1408 case CB_COLOR2_PITCH:
1409 case CB_COLOR3_PITCH:
1410 case CB_COLOR4_PITCH:
1411 case CB_COLOR5_PITCH:
1412 case CB_COLOR6_PITCH:
1413 case CB_COLOR7_PITCH:
1414 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1415 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1416 track->cb_dirty = true;
1418 case CB_COLOR8_PITCH:
1419 case CB_COLOR9_PITCH:
1420 case CB_COLOR10_PITCH:
1421 case CB_COLOR11_PITCH:
1422 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1423 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1424 track->cb_dirty = true;
1426 case CB_COLOR0_SLICE:
1427 case CB_COLOR1_SLICE:
1428 case CB_COLOR2_SLICE:
1429 case CB_COLOR3_SLICE:
1430 case CB_COLOR4_SLICE:
1431 case CB_COLOR5_SLICE:
1432 case CB_COLOR6_SLICE:
1433 case CB_COLOR7_SLICE:
1434 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1435 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1436 track->cb_color_slice_idx[tmp] = idx;
1437 track->cb_dirty = true;
1439 case CB_COLOR8_SLICE:
1440 case CB_COLOR9_SLICE:
1441 case CB_COLOR10_SLICE:
1442 case CB_COLOR11_SLICE:
1443 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1444 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1445 track->cb_color_slice_idx[tmp] = idx;
1446 track->cb_dirty = true;
1448 case CB_COLOR0_ATTRIB:
1449 case CB_COLOR1_ATTRIB:
1450 case CB_COLOR2_ATTRIB:
1451 case CB_COLOR3_ATTRIB:
1452 case CB_COLOR4_ATTRIB:
1453 case CB_COLOR5_ATTRIB:
1454 case CB_COLOR6_ATTRIB:
1455 case CB_COLOR7_ATTRIB:
1456 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1458 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1462 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1463 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1464 unsigned bankw, bankh, mtaspect, tile_split;
1466 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1467 &bankw, &bankh, &mtaspect,
1469 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1470 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1471 CB_BANK_WIDTH(bankw) |
1472 CB_BANK_HEIGHT(bankh) |
1473 CB_MACRO_TILE_ASPECT(mtaspect);
1476 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1477 track->cb_color_attrib[tmp] = ib[idx];
1478 track->cb_dirty = true;
1480 case CB_COLOR8_ATTRIB:
1481 case CB_COLOR9_ATTRIB:
1482 case CB_COLOR10_ATTRIB:
1483 case CB_COLOR11_ATTRIB:
1484 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1486 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1490 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1491 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1492 unsigned bankw, bankh, mtaspect, tile_split;
1494 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1495 &bankw, &bankh, &mtaspect,
1497 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1498 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1499 CB_BANK_WIDTH(bankw) |
1500 CB_BANK_HEIGHT(bankh) |
1501 CB_MACRO_TILE_ASPECT(mtaspect);
1504 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1505 track->cb_color_attrib[tmp] = ib[idx];
1506 track->cb_dirty = true;
1508 case CB_COLOR0_FMASK:
1509 case CB_COLOR1_FMASK:
1510 case CB_COLOR2_FMASK:
1511 case CB_COLOR3_FMASK:
1512 case CB_COLOR4_FMASK:
1513 case CB_COLOR5_FMASK:
1514 case CB_COLOR6_FMASK:
1515 case CB_COLOR7_FMASK:
1516 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1517 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1519 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1522 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1523 track->cb_color_fmask_bo[tmp] = reloc->robj;
1525 case CB_COLOR0_CMASK:
1526 case CB_COLOR1_CMASK:
1527 case CB_COLOR2_CMASK:
1528 case CB_COLOR3_CMASK:
1529 case CB_COLOR4_CMASK:
1530 case CB_COLOR5_CMASK:
1531 case CB_COLOR6_CMASK:
1532 case CB_COLOR7_CMASK:
1533 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1534 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1536 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1539 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1540 track->cb_color_cmask_bo[tmp] = reloc->robj;
1542 case CB_COLOR0_FMASK_SLICE:
1543 case CB_COLOR1_FMASK_SLICE:
1544 case CB_COLOR2_FMASK_SLICE:
1545 case CB_COLOR3_FMASK_SLICE:
1546 case CB_COLOR4_FMASK_SLICE:
1547 case CB_COLOR5_FMASK_SLICE:
1548 case CB_COLOR6_FMASK_SLICE:
1549 case CB_COLOR7_FMASK_SLICE:
1550 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1551 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1553 case CB_COLOR0_CMASK_SLICE:
1554 case CB_COLOR1_CMASK_SLICE:
1555 case CB_COLOR2_CMASK_SLICE:
1556 case CB_COLOR3_CMASK_SLICE:
1557 case CB_COLOR4_CMASK_SLICE:
1558 case CB_COLOR5_CMASK_SLICE:
1559 case CB_COLOR6_CMASK_SLICE:
1560 case CB_COLOR7_CMASK_SLICE:
1561 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1562 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1564 case CB_COLOR0_BASE:
1565 case CB_COLOR1_BASE:
1566 case CB_COLOR2_BASE:
1567 case CB_COLOR3_BASE:
1568 case CB_COLOR4_BASE:
1569 case CB_COLOR5_BASE:
1570 case CB_COLOR6_BASE:
1571 case CB_COLOR7_BASE:
1572 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1574 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1578 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1579 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1580 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1581 track->cb_color_bo[tmp] = reloc->robj;
1582 track->cb_dirty = true;
1584 case CB_COLOR8_BASE:
1585 case CB_COLOR9_BASE:
1586 case CB_COLOR10_BASE:
1587 case CB_COLOR11_BASE:
1588 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1590 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1594 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1595 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1596 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1597 track->cb_color_bo[tmp] = reloc->robj;
1598 track->cb_dirty = true;
1600 case DB_HTILE_DATA_BASE:
1601 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1603 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1607 track->htile_offset = radeon_get_ib_value(p, idx);
1608 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1609 track->htile_bo = reloc->robj;
1610 track->db_dirty = true;
1612 case DB_HTILE_SURFACE:
1614 track->htile_surface = radeon_get_ib_value(p, idx);
1615 /* force 8x8 htile width and height */
1617 track->db_dirty = true;
1619 case CB_IMMED0_BASE:
1620 case CB_IMMED1_BASE:
1621 case CB_IMMED2_BASE:
1622 case CB_IMMED3_BASE:
1623 case CB_IMMED4_BASE:
1624 case CB_IMMED5_BASE:
1625 case CB_IMMED6_BASE:
1626 case CB_IMMED7_BASE:
1627 case CB_IMMED8_BASE:
1628 case CB_IMMED9_BASE:
1629 case CB_IMMED10_BASE:
1630 case CB_IMMED11_BASE:
1631 case SQ_PGM_START_FS:
1632 case SQ_PGM_START_ES:
1633 case SQ_PGM_START_VS:
1634 case SQ_PGM_START_GS:
1635 case SQ_PGM_START_PS:
1636 case SQ_PGM_START_HS:
1637 case SQ_PGM_START_LS:
1638 case SQ_CONST_MEM_BASE:
1639 case SQ_ALU_CONST_CACHE_GS_0:
1640 case SQ_ALU_CONST_CACHE_GS_1:
1641 case SQ_ALU_CONST_CACHE_GS_2:
1642 case SQ_ALU_CONST_CACHE_GS_3:
1643 case SQ_ALU_CONST_CACHE_GS_4:
1644 case SQ_ALU_CONST_CACHE_GS_5:
1645 case SQ_ALU_CONST_CACHE_GS_6:
1646 case SQ_ALU_CONST_CACHE_GS_7:
1647 case SQ_ALU_CONST_CACHE_GS_8:
1648 case SQ_ALU_CONST_CACHE_GS_9:
1649 case SQ_ALU_CONST_CACHE_GS_10:
1650 case SQ_ALU_CONST_CACHE_GS_11:
1651 case SQ_ALU_CONST_CACHE_GS_12:
1652 case SQ_ALU_CONST_CACHE_GS_13:
1653 case SQ_ALU_CONST_CACHE_GS_14:
1654 case SQ_ALU_CONST_CACHE_GS_15:
1655 case SQ_ALU_CONST_CACHE_PS_0:
1656 case SQ_ALU_CONST_CACHE_PS_1:
1657 case SQ_ALU_CONST_CACHE_PS_2:
1658 case SQ_ALU_CONST_CACHE_PS_3:
1659 case SQ_ALU_CONST_CACHE_PS_4:
1660 case SQ_ALU_CONST_CACHE_PS_5:
1661 case SQ_ALU_CONST_CACHE_PS_6:
1662 case SQ_ALU_CONST_CACHE_PS_7:
1663 case SQ_ALU_CONST_CACHE_PS_8:
1664 case SQ_ALU_CONST_CACHE_PS_9:
1665 case SQ_ALU_CONST_CACHE_PS_10:
1666 case SQ_ALU_CONST_CACHE_PS_11:
1667 case SQ_ALU_CONST_CACHE_PS_12:
1668 case SQ_ALU_CONST_CACHE_PS_13:
1669 case SQ_ALU_CONST_CACHE_PS_14:
1670 case SQ_ALU_CONST_CACHE_PS_15:
1671 case SQ_ALU_CONST_CACHE_VS_0:
1672 case SQ_ALU_CONST_CACHE_VS_1:
1673 case SQ_ALU_CONST_CACHE_VS_2:
1674 case SQ_ALU_CONST_CACHE_VS_3:
1675 case SQ_ALU_CONST_CACHE_VS_4:
1676 case SQ_ALU_CONST_CACHE_VS_5:
1677 case SQ_ALU_CONST_CACHE_VS_6:
1678 case SQ_ALU_CONST_CACHE_VS_7:
1679 case SQ_ALU_CONST_CACHE_VS_8:
1680 case SQ_ALU_CONST_CACHE_VS_9:
1681 case SQ_ALU_CONST_CACHE_VS_10:
1682 case SQ_ALU_CONST_CACHE_VS_11:
1683 case SQ_ALU_CONST_CACHE_VS_12:
1684 case SQ_ALU_CONST_CACHE_VS_13:
1685 case SQ_ALU_CONST_CACHE_VS_14:
1686 case SQ_ALU_CONST_CACHE_VS_15:
1687 case SQ_ALU_CONST_CACHE_HS_0:
1688 case SQ_ALU_CONST_CACHE_HS_1:
1689 case SQ_ALU_CONST_CACHE_HS_2:
1690 case SQ_ALU_CONST_CACHE_HS_3:
1691 case SQ_ALU_CONST_CACHE_HS_4:
1692 case SQ_ALU_CONST_CACHE_HS_5:
1693 case SQ_ALU_CONST_CACHE_HS_6:
1694 case SQ_ALU_CONST_CACHE_HS_7:
1695 case SQ_ALU_CONST_CACHE_HS_8:
1696 case SQ_ALU_CONST_CACHE_HS_9:
1697 case SQ_ALU_CONST_CACHE_HS_10:
1698 case SQ_ALU_CONST_CACHE_HS_11:
1699 case SQ_ALU_CONST_CACHE_HS_12:
1700 case SQ_ALU_CONST_CACHE_HS_13:
1701 case SQ_ALU_CONST_CACHE_HS_14:
1702 case SQ_ALU_CONST_CACHE_HS_15:
1703 case SQ_ALU_CONST_CACHE_LS_0:
1704 case SQ_ALU_CONST_CACHE_LS_1:
1705 case SQ_ALU_CONST_CACHE_LS_2:
1706 case SQ_ALU_CONST_CACHE_LS_3:
1707 case SQ_ALU_CONST_CACHE_LS_4:
1708 case SQ_ALU_CONST_CACHE_LS_5:
1709 case SQ_ALU_CONST_CACHE_LS_6:
1710 case SQ_ALU_CONST_CACHE_LS_7:
1711 case SQ_ALU_CONST_CACHE_LS_8:
1712 case SQ_ALU_CONST_CACHE_LS_9:
1713 case SQ_ALU_CONST_CACHE_LS_10:
1714 case SQ_ALU_CONST_CACHE_LS_11:
1715 case SQ_ALU_CONST_CACHE_LS_12:
1716 case SQ_ALU_CONST_CACHE_LS_13:
1717 case SQ_ALU_CONST_CACHE_LS_14:
1718 case SQ_ALU_CONST_CACHE_LS_15:
1719 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1721 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1725 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1727 case SX_MEMORY_EXPORT_BASE:
1728 if (p->rdev->family >= CHIP_CAYMAN) {
1729 dev_warn(p->dev, "bad SET_CONFIG_REG "
1733 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1735 dev_warn(p->dev, "bad SET_CONFIG_REG "
1739 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1741 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1742 if (p->rdev->family < CHIP_CAYMAN) {
1743 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1747 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1749 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1753 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1756 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1759 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1765 static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1769 if (p->rdev->family >= CHIP_CAYMAN)
1770 last_reg = DRM_ARRAY_SIZE(cayman_reg_safe_bm);
1772 last_reg = DRM_ARRAY_SIZE(evergreen_reg_safe_bm);
1775 if (i >= last_reg) {
1776 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1779 m = 1 << ((reg >> 2) & 31);
1780 if (p->rdev->family >= CHIP_CAYMAN) {
1781 if (!(cayman_reg_safe_bm[i] & m))
1784 if (!(evergreen_reg_safe_bm[i] & m))
1787 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1791 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1792 struct radeon_cs_packet *pkt)
1794 struct radeon_cs_reloc *reloc;
1795 struct evergreen_cs_track *track;
1799 unsigned start_reg, end_reg, reg;
1803 track = (struct evergreen_cs_track *)p->track;
1806 idx_value = radeon_get_ib_value(p, idx);
1808 switch (pkt->opcode) {
1809 case PACKET3_SET_PREDICATION:
1815 if (pkt->count != 1) {
1816 DRM_ERROR("bad SET PREDICATION\n");
1820 tmp = radeon_get_ib_value(p, idx + 1);
1821 pred_op = (tmp >> 16) & 0x7;
1823 /* for the clear predicate operation */
1828 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1832 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1834 DRM_ERROR("bad SET PREDICATION\n");
1838 offset = reloc->lobj.gpu_offset +
1839 (idx_value & 0xfffffff0) +
1840 ((u64)(tmp & 0xff) << 32);
1842 ib[idx + 0] = offset;
1843 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1846 case PACKET3_CONTEXT_CONTROL:
1847 if (pkt->count != 1) {
1848 DRM_ERROR("bad CONTEXT_CONTROL\n");
1852 case PACKET3_INDEX_TYPE:
1853 case PACKET3_NUM_INSTANCES:
1854 case PACKET3_CLEAR_STATE:
1856 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1860 case CAYMAN_PACKET3_DEALLOC_STATE:
1861 if (p->rdev->family < CHIP_CAYMAN) {
1862 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1866 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1870 case PACKET3_INDEX_BASE:
1874 if (pkt->count != 1) {
1875 DRM_ERROR("bad INDEX_BASE\n");
1878 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1880 DRM_ERROR("bad INDEX_BASE\n");
1884 offset = reloc->lobj.gpu_offset +
1886 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1889 ib[idx+1] = upper_32_bits(offset) & 0xff;
1891 r = evergreen_cs_track_check(p);
1893 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1898 case PACKET3_DRAW_INDEX:
1901 if (pkt->count != 3) {
1902 DRM_ERROR("bad DRAW_INDEX\n");
1905 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1907 DRM_ERROR("bad DRAW_INDEX\n");
1911 offset = reloc->lobj.gpu_offset +
1913 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1916 ib[idx+1] = upper_32_bits(offset) & 0xff;
1918 r = evergreen_cs_track_check(p);
1920 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1925 case PACKET3_DRAW_INDEX_2:
1929 if (pkt->count != 4) {
1930 DRM_ERROR("bad DRAW_INDEX_2\n");
1933 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1935 DRM_ERROR("bad DRAW_INDEX_2\n");
1939 offset = reloc->lobj.gpu_offset +
1940 radeon_get_ib_value(p, idx+1) +
1941 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1944 ib[idx+2] = upper_32_bits(offset) & 0xff;
1946 r = evergreen_cs_track_check(p);
1948 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1953 case PACKET3_DRAW_INDEX_AUTO:
1954 if (pkt->count != 1) {
1955 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1958 r = evergreen_cs_track_check(p);
1960 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1964 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1965 if (pkt->count != 2) {
1966 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1969 r = evergreen_cs_track_check(p);
1971 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1975 case PACKET3_DRAW_INDEX_IMMD:
1976 if (pkt->count < 2) {
1977 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1980 r = evergreen_cs_track_check(p);
1982 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1986 case PACKET3_DRAW_INDEX_OFFSET:
1987 if (pkt->count != 2) {
1988 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1991 r = evergreen_cs_track_check(p);
1993 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1997 case PACKET3_DRAW_INDEX_OFFSET_2:
1998 if (pkt->count != 3) {
1999 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2002 r = evergreen_cs_track_check(p);
2004 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2008 case PACKET3_DISPATCH_DIRECT:
2009 if (pkt->count != 3) {
2010 DRM_ERROR("bad DISPATCH_DIRECT\n");
2013 r = evergreen_cs_track_check(p);
2015 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2019 case PACKET3_DISPATCH_INDIRECT:
2020 if (pkt->count != 1) {
2021 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2024 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2026 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2029 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2030 r = evergreen_cs_track_check(p);
2032 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2036 case PACKET3_WAIT_REG_MEM:
2037 if (pkt->count != 5) {
2038 DRM_ERROR("bad WAIT_REG_MEM\n");
2041 /* bit 4 is reg (0) or mem (1) */
2042 if (idx_value & 0x10) {
2045 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2047 DRM_ERROR("bad WAIT_REG_MEM\n");
2051 offset = reloc->lobj.gpu_offset +
2052 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2053 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2055 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2056 ib[idx+2] = upper_32_bits(offset) & 0xff;
2057 } else if (idx_value & 0x100) {
2058 DRM_ERROR("cannot use PFP on REG wait\n");
2062 case PACKET3_CP_DMA:
2064 u32 command, size, info;
2066 if (pkt->count != 4) {
2067 DRM_ERROR("bad CP DMA\n");
2070 command = radeon_get_ib_value(p, idx+4);
2071 size = command & 0x1fffff;
2072 info = radeon_get_ib_value(p, idx+1);
2073 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2074 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2075 ((((info & 0x00300000) >> 20) == 0) &&
2076 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2077 ((((info & 0x60000000) >> 29) == 0) &&
2078 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2079 /* non mem to mem copies requires dw aligned count */
2081 DRM_ERROR("CP DMA command requires dw count alignment\n");
2085 if (command & PACKET3_CP_DMA_CMD_SAS) {
2086 /* src address space is register */
2088 if (((info & 0x60000000) >> 29) != 1) {
2089 DRM_ERROR("CP DMA SAS not supported\n");
2093 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2094 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2097 /* src address space is memory */
2098 if (((info & 0x60000000) >> 29) == 0) {
2099 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2101 DRM_ERROR("bad CP DMA SRC\n");
2105 tmp = radeon_get_ib_value(p, idx) +
2106 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2108 offset = reloc->lobj.gpu_offset + tmp;
2110 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2111 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
2112 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
2117 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2118 } else if (((info & 0x60000000) >> 29) != 2) {
2119 DRM_ERROR("bad CP DMA SRC_SEL\n");
2123 if (command & PACKET3_CP_DMA_CMD_DAS) {
2124 /* dst address space is register */
2126 if (((info & 0x00300000) >> 20) != 1) {
2127 DRM_ERROR("CP DMA DAS not supported\n");
2131 /* dst address space is memory */
2132 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2133 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2136 if (((info & 0x00300000) >> 20) == 0) {
2137 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2139 DRM_ERROR("bad CP DMA DST\n");
2143 tmp = radeon_get_ib_value(p, idx+2) +
2144 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2146 offset = reloc->lobj.gpu_offset + tmp;
2148 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2149 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
2150 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
2155 ib[idx+3] = upper_32_bits(offset) & 0xff;
2157 DRM_ERROR("bad CP DMA DST_SEL\n");
2163 case PACKET3_SURFACE_SYNC:
2164 if (pkt->count != 3) {
2165 DRM_ERROR("bad SURFACE_SYNC\n");
2168 /* 0xffffffff/0x0 is flush all cache flag */
2169 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2170 radeon_get_ib_value(p, idx + 2) != 0) {
2171 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2173 DRM_ERROR("bad SURFACE_SYNC\n");
2176 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2179 case PACKET3_EVENT_WRITE:
2180 if (pkt->count != 2 && pkt->count != 0) {
2181 DRM_ERROR("bad EVENT_WRITE\n");
2187 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2189 DRM_ERROR("bad EVENT_WRITE\n");
2192 offset = reloc->lobj.gpu_offset +
2193 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2194 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2196 ib[idx+1] = offset & 0xfffffff8;
2197 ib[idx+2] = upper_32_bits(offset) & 0xff;
2200 case PACKET3_EVENT_WRITE_EOP:
2204 if (pkt->count != 4) {
2205 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2208 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2210 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2214 offset = reloc->lobj.gpu_offset +
2215 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2216 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2218 ib[idx+1] = offset & 0xfffffffc;
2219 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2222 case PACKET3_EVENT_WRITE_EOS:
2226 if (pkt->count != 3) {
2227 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2230 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2232 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2236 offset = reloc->lobj.gpu_offset +
2237 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2238 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2240 ib[idx+1] = offset & 0xfffffffc;
2241 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2244 case PACKET3_SET_CONFIG_REG:
2245 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2246 end_reg = 4 * pkt->count + start_reg - 4;
2247 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2248 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2249 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2250 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2253 for (i = 0; i < pkt->count; i++) {
2254 reg = start_reg + (4 * i);
2255 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2260 case PACKET3_SET_CONTEXT_REG:
2261 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2262 end_reg = 4 * pkt->count + start_reg - 4;
2263 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2264 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2265 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2266 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2269 for (i = 0; i < pkt->count; i++) {
2270 reg = start_reg + (4 * i);
2271 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2276 case PACKET3_SET_RESOURCE:
2277 if (pkt->count % 8) {
2278 DRM_ERROR("bad SET_RESOURCE\n");
2281 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2282 end_reg = 4 * pkt->count + start_reg - 4;
2283 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2284 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2285 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2286 DRM_ERROR("bad SET_RESOURCE\n");
2289 for (i = 0; i < (pkt->count / 8); i++) {
2290 struct radeon_bo *texture, *mipmap;
2291 u32 toffset, moffset;
2292 u32 size, offset, mip_address, tex_dim;
2294 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2295 case SQ_TEX_VTX_VALID_TEXTURE:
2297 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2299 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2302 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2303 ib[idx+1+(i*8)+1] |=
2304 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2305 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
2306 unsigned bankw, bankh, mtaspect, tile_split;
2308 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2309 &bankw, &bankh, &mtaspect,
2311 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2312 ib[idx+1+(i*8)+7] |=
2313 TEX_BANK_WIDTH(bankw) |
2314 TEX_BANK_HEIGHT(bankh) |
2315 MACRO_TILE_ASPECT(mtaspect) |
2316 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2319 texture = reloc->robj;
2320 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2323 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2324 mip_address = ib[idx+1+(i*8)+3];
2326 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2328 !radeon_cs_packet_next_is_pkt3_nop(p)) {
2329 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2330 * It should be 0 if FMASK is disabled. */
2334 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2336 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2339 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2340 mipmap = reloc->robj;
2343 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2346 ib[idx+1+(i*8)+2] += toffset;
2347 ib[idx+1+(i*8)+3] += moffset;
2349 case SQ_TEX_VTX_VALID_BUFFER:
2353 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2355 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2358 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2359 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2360 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2361 /* force size to size of the buffer */
2362 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2363 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2366 offset64 = reloc->lobj.gpu_offset + offset;
2367 ib[idx+1+(i*8)+0] = offset64;
2368 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2369 (upper_32_bits(offset64) & 0xff);
2372 case SQ_TEX_VTX_INVALID_TEXTURE:
2373 case SQ_TEX_VTX_INVALID_BUFFER:
2375 DRM_ERROR("bad SET_RESOURCE\n");
2380 case PACKET3_SET_ALU_CONST:
2381 /* XXX fix me ALU const buffers only */
2383 case PACKET3_SET_BOOL_CONST:
2384 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2385 end_reg = 4 * pkt->count + start_reg - 4;
2386 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2387 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2388 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2389 DRM_ERROR("bad SET_BOOL_CONST\n");
2393 case PACKET3_SET_LOOP_CONST:
2394 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2395 end_reg = 4 * pkt->count + start_reg - 4;
2396 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2397 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2398 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2399 DRM_ERROR("bad SET_LOOP_CONST\n");
2403 case PACKET3_SET_CTL_CONST:
2404 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2405 end_reg = 4 * pkt->count + start_reg - 4;
2406 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2407 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2408 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2409 DRM_ERROR("bad SET_CTL_CONST\n");
2413 case PACKET3_SET_SAMPLER:
2414 if (pkt->count % 3) {
2415 DRM_ERROR("bad SET_SAMPLER\n");
2418 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2419 end_reg = 4 * pkt->count + start_reg - 4;
2420 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2421 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2422 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2423 DRM_ERROR("bad SET_SAMPLER\n");
2427 case PACKET3_STRMOUT_BUFFER_UPDATE:
2428 if (pkt->count != 4) {
2429 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2432 /* Updating memory at DST_ADDRESS. */
2433 if (idx_value & 0x1) {
2435 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2437 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2440 offset = radeon_get_ib_value(p, idx+1);
2441 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2442 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2443 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
2444 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2447 offset += reloc->lobj.gpu_offset;
2449 ib[idx+2] = upper_32_bits(offset) & 0xff;
2451 /* Reading data from SRC_ADDRESS. */
2452 if (((idx_value >> 1) & 0x3) == 2) {
2454 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2456 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2459 offset = radeon_get_ib_value(p, idx+3);
2460 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2461 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2462 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
2463 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2466 offset += reloc->lobj.gpu_offset;
2468 ib[idx+4] = upper_32_bits(offset) & 0xff;
2471 case PACKET3_MEM_WRITE:
2475 if (pkt->count != 3) {
2476 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2479 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2481 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2484 offset = radeon_get_ib_value(p, idx+0);
2485 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2487 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2490 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2491 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
2492 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
2495 offset += reloc->lobj.gpu_offset;
2497 ib[idx+1] = upper_32_bits(offset) & 0xff;
2500 case PACKET3_COPY_DW:
2501 if (pkt->count != 4) {
2502 DRM_ERROR("bad COPY_DW (invalid count)\n");
2505 if (idx_value & 0x1) {
2507 /* SRC is memory. */
2508 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2510 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2513 offset = radeon_get_ib_value(p, idx+1);
2514 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2515 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2516 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
2517 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2520 offset += reloc->lobj.gpu_offset;
2522 ib[idx+2] = upper_32_bits(offset) & 0xff;
2525 reg = radeon_get_ib_value(p, idx+1) << 2;
2526 if (!evergreen_is_safe_reg(p, reg, idx+1))
2529 if (idx_value & 0x2) {
2531 /* DST is memory. */
2532 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2534 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2537 offset = radeon_get_ib_value(p, idx+3);
2538 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2539 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2540 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
2541 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2544 offset += reloc->lobj.gpu_offset;
2546 ib[idx+4] = upper_32_bits(offset) & 0xff;
2549 reg = radeon_get_ib_value(p, idx+3) << 2;
2550 if (!evergreen_is_safe_reg(p, reg, idx+3))
2557 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2563 int evergreen_cs_parse(struct radeon_cs_parser *p)
2565 struct radeon_cs_packet pkt;
2566 struct evergreen_cs_track *track;
2570 if (p->track == NULL) {
2571 /* initialize tracker, we are in kms */
2572 track = kmalloc(sizeof(*track), M_DRM,
2576 evergreen_cs_track_init(track);
2577 if (p->rdev->family >= CHIP_CAYMAN)
2578 tmp = p->rdev->config.cayman.tile_config;
2580 tmp = p->rdev->config.evergreen.tile_config;
2582 switch (tmp & 0xf) {
2598 switch ((tmp & 0xf0) >> 4) {
2611 switch ((tmp & 0xf00) >> 8) {
2613 track->group_size = 256;
2617 track->group_size = 512;
2621 switch ((tmp & 0xf000) >> 12) {
2623 track->row_size = 1;
2627 track->row_size = 2;
2630 track->row_size = 4;
2637 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2639 drm_free(p->track, M_DRM);
2643 p->idx += pkt.count + 2;
2645 case RADEON_PACKET_TYPE0:
2646 r = evergreen_cs_parse_packet0(p, &pkt);
2648 case RADEON_PACKET_TYPE2:
2650 case RADEON_PACKET_TYPE3:
2651 r = evergreen_packet3_check(p, &pkt);
2654 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2655 drm_free(p->track, M_DRM);
2660 drm_free(p->track, M_DRM);
2664 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2666 for (r = 0; r < p->ib.length_dw; r++) {
2667 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
2671 drm_free(p->track, M_DRM);
2677 * evergreen_dma_cs_parse() - parse the DMA IB
2678 * @p: parser structure holding parsing context.
2680 * Parses the DMA IB from the CS ioctl and updates
2681 * the GPU addresses based on the reloc information and
2682 * checks for errors. (Evergreen-Cayman)
2683 * Returns 0 for success and an error on failure.
2685 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2687 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2688 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2689 u32 header, cmd, count, sub_cmd;
2690 volatile u32 *ib = p->ib.ptr;
2692 u64 src_offset, dst_offset, dst2_offset;
2696 if (p->idx >= ib_chunk->length_dw) {
2697 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2698 p->idx, ib_chunk->length_dw);
2702 header = radeon_get_ib_value(p, idx);
2703 cmd = GET_DMA_CMD(header);
2704 count = GET_DMA_COUNT(header);
2705 sub_cmd = GET_DMA_SUB_CMD(header);
2708 case DMA_PACKET_WRITE:
2709 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2711 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2717 dst_offset = ib[idx+1];
2720 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2721 p->idx += count + 7;
2725 dst_offset = ib[idx+1];
2726 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2728 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2729 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2730 p->idx += count + 3;
2733 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib[idx+0]);
2736 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2737 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
2738 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
2742 case DMA_PACKET_COPY:
2743 r = r600_dma_cs_next_reloc(p, &src_reloc);
2745 DRM_ERROR("bad DMA_PACKET_COPY\n");
2748 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2750 DRM_ERROR("bad DMA_PACKET_COPY\n");
2754 /* Copy L2L, DW aligned */
2757 src_offset = ib[idx+2];
2758 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2759 dst_offset = ib[idx+1];
2760 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2761 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2762 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%ju %lu)\n",
2763 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2766 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2767 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%ju %lu)\n",
2768 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2771 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2772 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2773 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2774 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2780 if (ib[idx + 2] & (1 << 31)) {
2781 /* tiled src, linear dst */
2782 src_offset = ib[idx+1];
2784 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2786 dst_offset = radeon_get_ib_value(p, idx + 7);
2787 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2788 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2789 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2791 /* linear src, tiled dst */
2792 src_offset = ib[idx+7];
2793 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2794 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2795 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2797 dst_offset = ib[idx+1];
2799 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2801 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2802 dev_warn(p->dev, "DMA L2T, src buffer too small (%ju %lu)\n",
2803 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2806 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2807 dev_warn(p->dev, "DMA L2T, dst buffer too small (%ju %lu)\n",
2808 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2813 /* Copy L2L, byte aligned */
2816 src_offset = ib[idx+2];
2817 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2818 dst_offset = ib[idx+1];
2819 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2820 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
2821 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%ju %lu)\n",
2822 (uintmax_t)src_offset + count, radeon_bo_size(src_reloc->robj));
2825 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
2826 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%ju %lu)\n",
2827 (uintmax_t)dst_offset + count, radeon_bo_size(dst_reloc->robj));
2830 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2831 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2832 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2833 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2836 /* Copy L2L, partial */
2839 if (p->family < CHIP_CAYMAN) {
2840 DRM_ERROR("L2L Partial is cayman only !\n");
2843 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
2844 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2845 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
2846 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2850 /* Copy L2L, DW aligned, broadcast */
2852 /* L2L, dw, broadcast */
2853 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2855 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
2858 dst_offset = ib[idx+1];
2859 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2860 dst2_offset = ib[idx+2];
2861 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
2862 src_offset = ib[idx+3];
2863 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2864 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2865 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%ju %lu)\n",
2866 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2869 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2870 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%ju %lu)\n",
2871 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2874 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2875 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%ju %lu)\n",
2876 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2879 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2880 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
2881 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2882 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2883 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
2884 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2887 /* Copy L2T Frame to Field */
2889 if (ib[idx + 2] & (1 << 31)) {
2890 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2893 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2895 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2898 dst_offset = ib[idx+1];
2900 dst2_offset = ib[idx+2];
2902 src_offset = ib[idx+8];
2903 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2904 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2905 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%ju %lu)\n",
2906 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2909 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2910 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
2911 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2914 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2915 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%ju %lu)\n",
2916 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2919 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2920 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2921 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2922 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2925 /* Copy L2T/T2L, partial */
2927 /* L2T, T2L partial */
2928 if (p->family < CHIP_CAYMAN) {
2929 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2933 if (ib[idx + 2 ] & (1 << 31)) {
2934 /* tiled src, linear dst */
2935 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2937 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2938 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2940 /* linear src, tiled dst */
2941 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2942 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2944 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2948 /* Copy L2T broadcast */
2950 /* L2T, broadcast */
2951 if (ib[idx + 2] & (1 << 31)) {
2952 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2955 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2957 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2960 dst_offset = ib[idx+1];
2962 dst2_offset = ib[idx+2];
2964 src_offset = ib[idx+8];
2965 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2966 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2967 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
2968 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2971 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2972 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
2973 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2976 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2977 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
2978 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2981 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2982 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2983 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2984 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2987 /* Copy L2T/T2L (tile units) */
2991 if (ib[idx + 2] & (1 << 31)) {
2992 /* tiled src, linear dst */
2993 src_offset = ib[idx+1];
2995 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2997 dst_offset = ib[idx+7];
2998 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2999 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3000 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3002 /* linear src, tiled dst */
3003 src_offset = ib[idx+7];
3004 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3005 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3006 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3008 dst_offset = ib[idx+1];
3010 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3012 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3013 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%ju %lu)\n",
3014 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3017 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3018 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%ju %lu)\n",
3019 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3024 /* Copy T2T, partial (tile units) */
3027 if (p->family < CHIP_CAYMAN) {
3028 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3031 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3032 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3035 /* Copy L2T broadcast (tile units) */
3037 /* L2T, broadcast */
3038 if (ib[idx + 2] & (1 << 31)) {
3039 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3042 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3044 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3047 dst_offset = ib[idx+1];
3049 dst2_offset = ib[idx+2];
3051 src_offset = ib[idx+8];
3052 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
3053 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3054 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%ju %lu)\n",
3055 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3058 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3059 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%ju %lu)\n",
3060 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3063 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3064 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%ju %lu)\n",
3065 (uintmax_t)dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3068 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3069 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3070 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3071 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3075 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib[idx+0]);
3079 case DMA_PACKET_CONSTANT_FILL:
3080 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3082 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3085 dst_offset = radeon_get_ib_value(p, idx+1);
3086 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3087 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3088 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
3089 (uintmax_t)dst_offset, radeon_bo_size(dst_reloc->robj));
3092 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3093 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3096 case DMA_PACKET_NOP:
3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3103 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3105 for (r = 0; r < p->ib->length_dw; r++) {
3106 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]);
3114 static bool evergreen_vm_reg_valid(u32 reg)
3116 /* context regs are fine */
3120 /* check config regs */
3123 case GRBM_GFX_INDEX:
3124 case CP_STRMOUT_CNTL:
3127 case VGT_VTX_VECT_EJECT_REG:
3128 case VGT_CACHE_INVALIDATION:
3129 case VGT_GS_VERTEX_REUSE:
3130 case VGT_PRIMITIVE_TYPE:
3131 case VGT_INDEX_TYPE:
3132 case VGT_NUM_INDICES:
3133 case VGT_NUM_INSTANCES:
3134 case VGT_COMPUTE_DIM_X:
3135 case VGT_COMPUTE_DIM_Y:
3136 case VGT_COMPUTE_DIM_Z:
3137 case VGT_COMPUTE_START_X:
3138 case VGT_COMPUTE_START_Y:
3139 case VGT_COMPUTE_START_Z:
3140 case VGT_COMPUTE_INDEX:
3141 case VGT_COMPUTE_THREAD_GROUP_SIZE:
3142 case VGT_HS_OFFCHIP_PARAM:
3144 case PA_SU_LINE_STIPPLE_VALUE:
3145 case PA_SC_LINE_STIPPLE_STATE:
3147 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
3148 case SQ_DYN_GPR_SIMD_LOCK_EN:
3150 case SQ_GPR_RESOURCE_MGMT_1:
3151 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
3152 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
3153 case SQ_CONST_MEM_BASE:
3154 case SQ_STATIC_THREAD_MGMT_1:
3155 case SQ_STATIC_THREAD_MGMT_2:
3156 case SQ_STATIC_THREAD_MGMT_3:
3157 case SPI_CONFIG_CNTL:
3158 case SPI_CONFIG_CNTL_1:
3165 case TD_PS_BORDER_COLOR_INDEX:
3166 case TD_PS_BORDER_COLOR_RED:
3167 case TD_PS_BORDER_COLOR_GREEN:
3168 case TD_PS_BORDER_COLOR_BLUE:
3169 case TD_PS_BORDER_COLOR_ALPHA:
3170 case TD_VS_BORDER_COLOR_INDEX:
3171 case TD_VS_BORDER_COLOR_RED:
3172 case TD_VS_BORDER_COLOR_GREEN:
3173 case TD_VS_BORDER_COLOR_BLUE:
3174 case TD_VS_BORDER_COLOR_ALPHA:
3175 case TD_GS_BORDER_COLOR_INDEX:
3176 case TD_GS_BORDER_COLOR_RED:
3177 case TD_GS_BORDER_COLOR_GREEN:
3178 case TD_GS_BORDER_COLOR_BLUE:
3179 case TD_GS_BORDER_COLOR_ALPHA:
3180 case TD_HS_BORDER_COLOR_INDEX:
3181 case TD_HS_BORDER_COLOR_RED:
3182 case TD_HS_BORDER_COLOR_GREEN:
3183 case TD_HS_BORDER_COLOR_BLUE:
3184 case TD_HS_BORDER_COLOR_ALPHA:
3185 case TD_LS_BORDER_COLOR_INDEX:
3186 case TD_LS_BORDER_COLOR_RED:
3187 case TD_LS_BORDER_COLOR_GREEN:
3188 case TD_LS_BORDER_COLOR_BLUE:
3189 case TD_LS_BORDER_COLOR_ALPHA:
3190 case TD_CS_BORDER_COLOR_INDEX:
3191 case TD_CS_BORDER_COLOR_RED:
3192 case TD_CS_BORDER_COLOR_GREEN:
3193 case TD_CS_BORDER_COLOR_BLUE:
3194 case TD_CS_BORDER_COLOR_ALPHA:
3195 case SQ_ESGS_RING_SIZE:
3196 case SQ_GSVS_RING_SIZE:
3197 case SQ_ESTMP_RING_SIZE:
3198 case SQ_GSTMP_RING_SIZE:
3199 case SQ_HSTMP_RING_SIZE:
3200 case SQ_LSTMP_RING_SIZE:
3201 case SQ_PSTMP_RING_SIZE:
3202 case SQ_VSTMP_RING_SIZE:
3203 case SQ_ESGS_RING_ITEMSIZE:
3204 case SQ_ESTMP_RING_ITEMSIZE:
3205 case SQ_GSTMP_RING_ITEMSIZE:
3206 case SQ_GSVS_RING_ITEMSIZE:
3207 case SQ_GS_VERT_ITEMSIZE:
3208 case SQ_GS_VERT_ITEMSIZE_1:
3209 case SQ_GS_VERT_ITEMSIZE_2:
3210 case SQ_GS_VERT_ITEMSIZE_3:
3211 case SQ_GSVS_RING_OFFSET_1:
3212 case SQ_GSVS_RING_OFFSET_2:
3213 case SQ_GSVS_RING_OFFSET_3:
3214 case SQ_HSTMP_RING_ITEMSIZE:
3215 case SQ_LSTMP_RING_ITEMSIZE:
3216 case SQ_PSTMP_RING_ITEMSIZE:
3217 case SQ_VSTMP_RING_ITEMSIZE:
3218 case VGT_TF_RING_SIZE:
3219 case SQ_ESGS_RING_BASE:
3220 case SQ_GSVS_RING_BASE:
3221 case SQ_ESTMP_RING_BASE:
3222 case SQ_GSTMP_RING_BASE:
3223 case SQ_HSTMP_RING_BASE:
3224 case SQ_LSTMP_RING_BASE:
3225 case SQ_PSTMP_RING_BASE:
3226 case SQ_VSTMP_RING_BASE:
3227 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
3228 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
3231 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
3236 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3237 u32 *ib, struct radeon_cs_packet *pkt)
3239 u32 idx = pkt->idx + 1;
3240 u32 idx_value = ib[idx];
3241 u32 start_reg, end_reg, reg, i;
3244 switch (pkt->opcode) {
3246 case PACKET3_SET_BASE:
3247 case PACKET3_CLEAR_STATE:
3248 case PACKET3_INDEX_BUFFER_SIZE:
3249 case PACKET3_DISPATCH_DIRECT:
3250 case PACKET3_DISPATCH_INDIRECT:
3251 case PACKET3_MODE_CONTROL:
3252 case PACKET3_SET_PREDICATION:
3253 case PACKET3_COND_EXEC:
3254 case PACKET3_PRED_EXEC:
3255 case PACKET3_DRAW_INDIRECT:
3256 case PACKET3_DRAW_INDEX_INDIRECT:
3257 case PACKET3_INDEX_BASE:
3258 case PACKET3_DRAW_INDEX_2:
3259 case PACKET3_CONTEXT_CONTROL:
3260 case PACKET3_DRAW_INDEX_OFFSET:
3261 case PACKET3_INDEX_TYPE:
3262 case PACKET3_DRAW_INDEX:
3263 case PACKET3_DRAW_INDEX_AUTO:
3264 case PACKET3_DRAW_INDEX_IMMD:
3265 case PACKET3_NUM_INSTANCES:
3266 case PACKET3_DRAW_INDEX_MULTI_AUTO:
3267 case PACKET3_STRMOUT_BUFFER_UPDATE:
3268 case PACKET3_DRAW_INDEX_OFFSET_2:
3269 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
3270 case PACKET3_MPEG_INDEX:
3271 case PACKET3_WAIT_REG_MEM:
3272 case PACKET3_MEM_WRITE:
3273 case PACKET3_SURFACE_SYNC:
3274 case PACKET3_EVENT_WRITE:
3275 case PACKET3_EVENT_WRITE_EOP:
3276 case PACKET3_EVENT_WRITE_EOS:
3277 case PACKET3_SET_CONTEXT_REG:
3278 case PACKET3_SET_BOOL_CONST:
3279 case PACKET3_SET_LOOP_CONST:
3280 case PACKET3_SET_RESOURCE:
3281 case PACKET3_SET_SAMPLER:
3282 case PACKET3_SET_CTL_CONST:
3283 case PACKET3_SET_RESOURCE_OFFSET:
3284 case PACKET3_SET_CONTEXT_REG_INDIRECT:
3285 case PACKET3_SET_RESOURCE_INDIRECT:
3286 case CAYMAN_PACKET3_DEALLOC_STATE:
3288 case PACKET3_COND_WRITE:
3289 if (idx_value & 0x100) {
3290 reg = ib[idx + 5] * 4;
3291 if (!evergreen_vm_reg_valid(reg))
3295 case PACKET3_COPY_DW:
3296 if (idx_value & 0x2) {
3297 reg = ib[idx + 3] * 4;
3298 if (!evergreen_vm_reg_valid(reg))
3302 case PACKET3_SET_CONFIG_REG:
3303 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
3304 end_reg = 4 * pkt->count + start_reg - 4;
3305 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
3306 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
3307 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
3308 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
3311 for (i = 0; i < pkt->count; i++) {
3312 reg = start_reg + (4 * i);
3313 if (!evergreen_vm_reg_valid(reg))
3317 case PACKET3_CP_DMA:
3318 command = ib[idx + 4];
3320 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3321 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3322 ((((info & 0x00300000) >> 20) == 0) &&
3323 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3324 ((((info & 0x60000000) >> 29) == 0) &&
3325 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3326 /* non mem to mem copies requires dw aligned count */
3327 if ((command & 0x1fffff) % 4) {
3328 DRM_ERROR("CP DMA command requires dw count alignment\n");
3332 if (command & PACKET3_CP_DMA_CMD_SAS) {
3333 /* src address space is register */
3334 if (((info & 0x60000000) >> 29) == 0) {
3335 start_reg = idx_value << 2;
3336 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3338 if (!evergreen_vm_reg_valid(reg)) {
3339 DRM_ERROR("CP DMA Bad SRC register\n");
3343 for (i = 0; i < (command & 0x1fffff); i++) {
3344 reg = start_reg + (4 * i);
3345 if (!evergreen_vm_reg_valid(reg)) {
3346 DRM_ERROR("CP DMA Bad SRC register\n");
3353 if (command & PACKET3_CP_DMA_CMD_DAS) {
3354 /* dst address space is register */
3355 if (((info & 0x00300000) >> 20) == 0) {
3356 start_reg = ib[idx + 2];
3357 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3359 if (!evergreen_vm_reg_valid(reg)) {
3360 DRM_ERROR("CP DMA Bad DST register\n");
3364 for (i = 0; i < (command & 0x1fffff); i++) {
3365 reg = start_reg + (4 * i);
3366 if (!evergreen_vm_reg_valid(reg)) {
3367 DRM_ERROR("CP DMA Bad DST register\n");
3381 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3385 struct radeon_cs_packet pkt;
3389 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
3390 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
3393 case RADEON_PACKET_TYPE0:
3394 dev_err(rdev->dev, "Packet0 not allowed!\n");
3397 case RADEON_PACKET_TYPE2:
3400 case RADEON_PACKET_TYPE3:
3401 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3402 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3403 idx += pkt.count + 2;
3406 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
3412 } while (idx < ib->length_dw);
3418 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3419 * @rdev: radeon_device pointer
3420 * @ib: radeon_ib pointer
3422 * Parses the DMA IB from the VM CS ioctl
3423 * checks for errors. (Cayman-SI)
3424 * Returns 0 for success and an error on failure.
3426 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3429 u32 header, cmd, count, sub_cmd;
3432 header = ib->ptr[idx];
3433 cmd = GET_DMA_CMD(header);
3434 count = GET_DMA_COUNT(header);
3435 sub_cmd = GET_DMA_SUB_CMD(header);
3438 case DMA_PACKET_WRITE:
3449 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
3453 case DMA_PACKET_COPY:
3455 /* Copy L2L, DW aligned */
3463 /* Copy L2L, byte aligned */
3467 /* Copy L2L, partial */
3471 /* Copy L2L, DW aligned, broadcast */
3475 /* Copy L2T Frame to Field */
3479 /* Copy L2T/T2L, partial */
3483 /* Copy L2T broadcast */
3487 /* Copy L2T/T2L (tile units) */
3491 /* Copy T2T, partial (tile units) */
3495 /* Copy L2T broadcast (tile units) */
3500 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
3504 case DMA_PACKET_CONSTANT_FILL:
3507 case DMA_PACKET_NOP:
3511 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3514 } while (idx < ib->length_dw);