2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <uapi_drm/radeon_drm.h>
30 #include "radeon_reg.h"
33 #include "radeon_trace.h"
36 #define RADEON_CS_MAX_PRIORITY 32u
37 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
39 /* This is based on the bucket sort with O(n) time complexity.
40 * An item with priority "i" is added to bucket[i]. The lists are then
41 * concatenated in descending order.
43 struct radeon_cs_buckets {
44 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
47 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
51 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
52 INIT_LIST_HEAD(&b->bucket[i]);
55 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
56 struct list_head *item, unsigned priority)
58 /* Since buffers which appear sooner in the relocation list are
59 * likely to be used more often than buffers which appear later
60 * in the list, the sort mustn't change the ordering of buffers
61 * with the same priority, i.e. it must be stable.
63 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
66 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
67 struct list_head *out_list)
71 /* Connect the sorted buckets in the output list. */
72 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
73 list_splice(&b->bucket[i], out_list);
77 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
79 struct radeon_cs_chunk *chunk;
80 struct radeon_cs_buckets buckets;
84 if (p->chunk_relocs_idx == -1) {
87 chunk = &p->chunks[p->chunk_relocs_idx];
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
100 radeon_cs_buckets_init(&buckets);
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) {
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
116 p->relocs[i].handle = 0;
120 p->relocs[i].gobj = drm_gem_object_lookup(p->filp, r->handle);
121 if (p->relocs[i].gobj == NULL) {
122 DRM_ERROR("gem object lookup failed 0x%x\n",
126 p->relocs_ptr[i] = &p->relocs[i];
127 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
129 /* The userspace buffer priorities are from 0 to 15. A higher
130 * number means the buffer is more important.
131 * Also, the buffers used for write have a higher priority than
132 * the buffers used for read only, which doubles the range
133 * to 0 to 31. 32 is reserved for the kernel driver.
135 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
138 /* the first reloc of an UVD job is the msg and that must be in
139 VRAM, also but everything into VRAM on AGP cards and older
140 IGP chips to avoid image corruptions */
141 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
142 (i == 0 || (p->rdev->flags & RADEON_IS_AGP) ||
143 p->rdev->family == CHIP_RS780 ||
144 p->rdev->family == CHIP_RS880)) {
146 /* TODO: is this still needed for NI+ ? */
147 p->relocs[i].prefered_domains =
148 RADEON_GEM_DOMAIN_VRAM;
150 p->relocs[i].allowed_domains =
151 RADEON_GEM_DOMAIN_VRAM;
153 /* prioritize this over any other relocation */
154 priority = RADEON_CS_MAX_PRIORITY;
156 uint32_t domain = r->write_domain ?
157 r->write_domain : r->read_domains;
159 if (domain & RADEON_GEM_DOMAIN_CPU) {
160 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
161 "for command submission\n");
165 p->relocs[i].prefered_domains = domain;
166 if (domain == RADEON_GEM_DOMAIN_VRAM)
167 domain |= RADEON_GEM_DOMAIN_GTT;
168 p->relocs[i].allowed_domains = domain;
171 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
172 p->relocs[i].handle = r->handle;
174 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
178 radeon_cs_buckets_get_list(&buckets, &p->validated);
180 if (p->cs_flags & RADEON_CS_USE_VM)
181 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
184 return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
187 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
189 p->priority = priority;
193 DRM_ERROR("unknown ring id: %d\n", ring);
195 case RADEON_CS_RING_GFX:
196 p->ring = RADEON_RING_TYPE_GFX_INDEX;
198 case RADEON_CS_RING_COMPUTE:
199 if (p->rdev->family >= CHIP_TAHITI) {
201 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
203 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
205 p->ring = RADEON_RING_TYPE_GFX_INDEX;
207 case RADEON_CS_RING_DMA:
208 if (p->rdev->family >= CHIP_CAYMAN) {
210 p->ring = R600_RING_TYPE_DMA_INDEX;
212 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
213 } else if (p->rdev->family >= CHIP_RV770) {
214 p->ring = R600_RING_TYPE_DMA_INDEX;
219 case RADEON_CS_RING_UVD:
220 p->ring = R600_RING_TYPE_UVD_INDEX;
222 case RADEON_CS_RING_VCE:
223 /* TODO: only use the low priority ring for now */
224 p->ring = TN_RING_TYPE_VCE1_INDEX;
230 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
234 for (i = 0; i < p->nrelocs; i++) {
235 if (!p->relocs[i].robj)
238 radeon_semaphore_sync_to(p->ib.semaphore,
239 p->relocs[i].robj->tbo.sync_obj);
243 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
244 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
246 struct drm_radeon_cs *cs = data;
247 uint64_t *chunk_array_ptr;
249 u32 ring = RADEON_CS_RING_GFX;
252 if (!cs->num_chunks) {
256 INIT_LIST_HEAD(&p->validated);
259 p->ib.semaphore = NULL;
260 p->const_ib.sa_bo = NULL;
261 p->const_ib.semaphore = NULL;
262 p->chunk_ib_idx = -1;
263 p->chunk_relocs_idx = -1;
264 p->chunk_flags_idx = -1;
265 p->chunk_const_ib_idx = -1;
266 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
267 if (p->chunks_array == NULL) {
270 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
271 if (copy_from_user(p->chunks_array, chunk_array_ptr,
272 sizeof(uint64_t)*cs->num_chunks)) {
276 p->nchunks = cs->num_chunks;
277 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
278 if (p->chunks == NULL) {
281 for (i = 0; i < p->nchunks; i++) {
282 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
283 struct drm_radeon_cs_chunk user_chunk;
284 uint32_t __user *cdata;
286 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
287 if (copy_from_user(&user_chunk, chunk_ptr,
288 sizeof(struct drm_radeon_cs_chunk))) {
291 p->chunks[i].length_dw = user_chunk.length_dw;
292 p->chunks[i].chunk_id = user_chunk.chunk_id;
293 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
294 p->chunk_relocs_idx = i;
296 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
298 /* zero length IB isn't useful */
299 if (p->chunks[i].length_dw == 0)
302 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
303 p->chunk_const_ib_idx = i;
304 /* zero length CONST IB isn't useful */
305 if (p->chunks[i].length_dw == 0)
308 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
309 p->chunk_flags_idx = i;
310 /* zero length flags aren't useful */
311 if (p->chunks[i].length_dw == 0)
315 size = p->chunks[i].length_dw;
316 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
317 p->chunks[i].user_ptr = cdata;
318 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
321 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
322 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
326 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
327 size *= sizeof(uint32_t);
328 if (p->chunks[i].kdata == NULL) {
331 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
334 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
335 p->cs_flags = p->chunks[i].kdata[0];
336 if (p->chunks[i].length_dw > 1)
337 ring = p->chunks[i].kdata[1];
338 if (p->chunks[i].length_dw > 2)
339 priority = (s32)p->chunks[i].kdata[2];
343 /* these are KMS only */
345 if ((p->cs_flags & RADEON_CS_USE_VM) &&
346 !p->rdev->vm_manager.enabled) {
347 DRM_ERROR("VM not active on asic!\n");
351 if (radeon_cs_get_ring(p, ring, priority))
354 /* we only support VM on some SI+ rings */
355 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
356 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
357 DRM_ERROR("Ring %d requires VM!\n", p->ring);
361 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
362 DRM_ERROR("VM not supported on ring %d!\n",
372 static int cmp_size_smaller_first(void *priv, struct list_head *a,
375 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
376 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
378 /* Sort A before B if A is smaller. */
379 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
383 * cs_parser_fini() - clean parser states
384 * @parser: parser structure holding parsing context.
385 * @error: error number
387 * If error is set than unvalidate buffer, otherwise just free memory
388 * used by parsing context.
390 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
395 /* Sort the buffer list from the smallest to largest buffer,
396 * which affects the order of buffers in the LRU list.
397 * This assures that the smallest buffers are added first
398 * to the LRU list, so they are likely to be later evicted
399 * first, instead of large buffers whose eviction is more
402 * This slightly lowers the number of bytes moved by TTM
403 * per frame under memory pressure.
405 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
407 ttm_eu_fence_buffer_objects(&parser->ticket,
410 } else if (backoff) {
411 ttm_eu_backoff_reservation(&parser->ticket,
415 if (parser->relocs != NULL) {
416 for (i = 0; i < parser->nrelocs; i++) {
417 if (parser->relocs[i].gobj)
418 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
421 kfree(parser->track);
422 kfree(parser->relocs);
423 kfree(parser->relocs_ptr);
424 drm_free_large(parser->vm_bos);
425 for (i = 0; i < parser->nchunks; i++)
426 drm_free_large(parser->chunks[i].kdata);
427 kfree(parser->chunks);
428 kfree(parser->chunks_array);
429 radeon_ib_free(parser->rdev, &parser->ib);
430 radeon_ib_free(parser->rdev, &parser->const_ib);
433 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
434 struct radeon_cs_parser *parser)
438 if (parser->chunk_ib_idx == -1)
441 if (parser->cs_flags & RADEON_CS_USE_VM)
444 r = radeon_cs_parse(rdev, parser->ring, parser);
445 if (r || parser->parser_error) {
446 DRM_ERROR("Invalid command stream !\n");
450 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
451 radeon_uvd_note_usage(rdev);
452 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
453 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
454 radeon_vce_note_usage(rdev);
456 radeon_cs_sync_rings(parser);
457 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
459 DRM_ERROR("Failed to schedule IB !\n");
464 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
465 struct radeon_vm *vm)
467 struct radeon_device *rdev = p->rdev;
468 struct radeon_bo_va *bo_va;
471 r = radeon_vm_update_page_directory(rdev, vm);
475 r = radeon_vm_clear_freed(rdev, vm);
479 if (vm->ib_bo_va == NULL) {
480 DRM_ERROR("Tmp BO not in VM!\n");
484 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
485 &rdev->ring_tmp_bo.bo->tbo.mem);
489 for (i = 0; i < p->nrelocs; i++) {
490 struct radeon_bo *bo;
492 /* ignore duplicates */
493 if (p->relocs_ptr[i] != &p->relocs[i])
496 bo = p->relocs[i].robj;
497 bo_va = radeon_vm_bo_find(vm, bo);
499 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
503 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
508 return radeon_vm_clear_invalids(rdev, vm);
511 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
512 struct radeon_cs_parser *parser)
514 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
515 struct radeon_vm *vm = &fpriv->vm;
518 if (parser->chunk_ib_idx == -1)
520 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
523 if (parser->const_ib.length_dw) {
524 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
530 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
535 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
536 radeon_uvd_note_usage(rdev);
538 lockmgr(&vm->mutex, LK_EXCLUSIVE);
539 r = radeon_bo_vm_update_pte(parser, vm);
543 radeon_cs_sync_rings(parser);
544 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
546 if ((rdev->family >= CHIP_TAHITI) &&
547 (parser->chunk_const_ib_idx != -1)) {
548 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
550 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
554 lockmgr(&vm->mutex, LK_RELEASE);
558 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
561 r = radeon_gpu_reset(rdev);
568 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
570 struct radeon_cs_chunk *ib_chunk;
571 struct radeon_vm *vm = NULL;
574 if (parser->chunk_ib_idx == -1)
577 if (parser->cs_flags & RADEON_CS_USE_VM) {
578 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
581 if ((rdev->family >= CHIP_TAHITI) &&
582 (parser->chunk_const_ib_idx != -1)) {
583 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
584 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
585 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
588 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
589 vm, ib_chunk->length_dw * 4);
591 DRM_ERROR("Failed to get const ib !\n");
594 parser->const_ib.is_const_ib = true;
595 parser->const_ib.length_dw = ib_chunk->length_dw;
596 if (copy_from_user(parser->const_ib.ptr,
598 ib_chunk->length_dw * 4))
602 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
603 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
604 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
608 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
610 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
611 vm, ib_chunk->length_dw * 4);
613 DRM_ERROR("Failed to get ib !\n");
616 parser->ib.length_dw = ib_chunk->length_dw;
618 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
619 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
624 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
626 struct radeon_device *rdev = dev->dev_private;
627 struct radeon_cs_parser parser;
630 lockmgr(&rdev->exclusive_lock, LK_EXCLUSIVE);
631 if (!rdev->accel_working) {
632 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
635 if (rdev->in_reset) {
636 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
637 r = radeon_gpu_reset(rdev);
642 /* initialize parser */
643 memset(&parser, 0, sizeof(struct radeon_cs_parser));
646 parser.dev = rdev->dev;
647 parser.family = rdev->family;
648 r = radeon_cs_parser_init(&parser, data);
650 DRM_ERROR("Failed to initialize parser !\n");
651 radeon_cs_parser_fini(&parser, r, false);
652 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
653 r = radeon_cs_handle_lockup(rdev, r);
657 r = radeon_cs_ib_fill(rdev, &parser);
659 r = radeon_cs_parser_relocs(&parser);
660 if (r && r != -ERESTARTSYS)
661 DRM_ERROR("Failed to parse relocation %d!\n", r);
665 radeon_cs_parser_fini(&parser, r, false);
666 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
667 r = radeon_cs_handle_lockup(rdev, r);
672 trace_radeon_cs(&parser);
675 r = radeon_cs_ib_chunk(rdev, &parser);
679 r = radeon_cs_ib_vm_chunk(rdev, &parser);
684 radeon_cs_parser_fini(&parser, r, true);
685 lockmgr(&rdev->exclusive_lock, LK_RELEASE);
686 r = radeon_cs_handle_lockup(rdev, r);
691 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
692 * @parser: parser structure holding parsing context.
693 * @pkt: where to store packet information
695 * Assume that chunk_ib_index is properly set. Will return -EINVAL
696 * if packet is bigger than remaining ib size. or if packets is unknown.
698 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
699 struct radeon_cs_packet *pkt,
702 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
703 struct radeon_device *rdev = p->rdev;
706 if (idx >= ib_chunk->length_dw) {
707 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
708 idx, ib_chunk->length_dw);
711 header = radeon_get_ib_value(p, idx);
713 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
714 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
717 case RADEON_PACKET_TYPE0:
718 if (rdev->family < CHIP_R600) {
719 pkt->reg = R100_CP_PACKET0_GET_REG(header);
721 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
723 pkt->reg = R600_CP_PACKET0_GET_REG(header);
725 case RADEON_PACKET_TYPE3:
726 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
728 case RADEON_PACKET_TYPE2:
732 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
735 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
736 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
737 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
744 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
745 * @p: structure holding the parser context.
747 * Check if the next packet is NOP relocation packet3.
749 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
751 struct radeon_cs_packet p3reloc;
754 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
757 if (p3reloc.type != RADEON_PACKET_TYPE3)
759 if (p3reloc.opcode != RADEON_PACKET3_NOP)
765 * radeon_cs_dump_packet() - dump raw packet context
766 * @p: structure holding the parser context.
767 * @pkt: structure holding the packet.
769 * Used mostly for debugging and error reporting.
771 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
772 struct radeon_cs_packet *pkt)
774 volatile uint32_t *ib;
780 for (i = 0; i <= (pkt->count + 1); i++, idx++)
781 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
785 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
786 * @parser: parser structure holding parsing context.
787 * @data: pointer to relocation data
788 * @offset_start: starting offset
789 * @offset_mask: offset mask (to align start offset on)
790 * @reloc: reloc informations
792 * Check if next packet is relocation packet3, do bo validation and compute
793 * GPU offset using the provided start.
795 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
796 struct radeon_cs_reloc **cs_reloc,
799 struct radeon_cs_chunk *relocs_chunk;
800 struct radeon_cs_packet p3reloc;
804 if (p->chunk_relocs_idx == -1) {
805 DRM_ERROR("No relocation chunk !\n");
809 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
810 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
813 p->idx += p3reloc.count + 2;
814 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
815 p3reloc.opcode != RADEON_PACKET3_NOP) {
816 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
818 radeon_cs_dump_packet(p, &p3reloc);
821 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
822 if (idx >= relocs_chunk->length_dw) {
823 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
824 idx, relocs_chunk->length_dw);
825 radeon_cs_dump_packet(p, &p3reloc);
828 /* FIXME: we assume reloc size is 4 dwords */
830 *cs_reloc = p->relocs;
831 (*cs_reloc)->gpu_offset =
832 (u64)relocs_chunk->kdata[idx + 3] << 32;
833 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
835 *cs_reloc = p->relocs_ptr[(idx / 4)];