2 * Copyright (c) 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
26 * Mika Kuoppala <mika.kuoppala@intel.com>
32 static const char *ring_str(int ring)
35 case RCS: return "render";
36 case VCS: return "bsd";
37 case BCS: return "blt";
38 case VECS: return "vebox";
39 case VCS2: return "bsd2";
44 static const char *pin_flag(int pinned)
54 static const char *tiling_flag(int tiling)
58 case I915_TILING_NONE: return "";
59 case I915_TILING_X: return " X";
60 case I915_TILING_Y: return " Y";
64 static const char *dirty_flag(int dirty)
66 return dirty ? " dirty" : "";
69 static const char *purgeable_flag(int purgeable)
71 return purgeable ? " purgeable" : "";
74 static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
77 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
82 if (e->bytes == e->size - 1 || e->err)
88 static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
91 if (e->pos + len <= e->start) {
96 /* First vsnprintf needs to fit in its entirety for memmove */
105 static void __i915_error_advance(struct drm_i915_error_state_buf *e,
108 /* If this is first printf in this window, adjust it so that
109 * start position matches start of the buffer
112 if (e->pos < e->start) {
113 const size_t off = e->start - e->pos;
115 /* Should not happen but be paranoid */
116 if (off > len || e->bytes) {
121 memmove(e->buf, e->buf + off, len - off);
122 e->bytes = len - off;
131 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
132 const char *f, va_list args)
136 if (!__i915_error_ok(e))
139 /* Seek the first printf which is hits start position */
140 if (e->pos < e->start) {
144 len = vsnprintf(NULL, 0, f, tmp);
147 if (!__i915_error_seek(e, len))
151 len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
152 if (len >= e->size - e->bytes)
153 len = e->size - e->bytes - 1;
155 __i915_error_advance(e, len);
158 static void i915_error_puts(struct drm_i915_error_state_buf *e,
163 if (!__i915_error_ok(e))
168 /* Seek the first printf which is hits start position */
169 if (e->pos < e->start) {
170 if (!__i915_error_seek(e, len))
174 if (len >= e->size - e->bytes)
175 len = e->size - e->bytes - 1;
176 memcpy(e->buf + e->bytes, str, len);
178 __i915_error_advance(e, len);
181 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
182 #define err_puts(e, s) i915_error_puts(e, s)
184 static void print_error_buffers(struct drm_i915_error_state_buf *m,
186 struct drm_i915_error_buffer *err,
191 err_printf(m, " %s [%d]:\n", name, count);
194 err_printf(m, " %08x_%08x %8u %02x %02x [ ",
195 upper_32_bits(err->gtt_offset),
196 lower_32_bits(err->gtt_offset),
200 for (i = 0; i < I915_NUM_ENGINES; i++)
201 err_printf(m, "%02x ", err->rseqno[i]);
203 err_printf(m, "] %02x", err->wseqno);
204 err_puts(m, pin_flag(err->pinned));
205 err_puts(m, tiling_flag(err->tiling));
206 err_puts(m, dirty_flag(err->dirty));
207 err_puts(m, purgeable_flag(err->purgeable));
208 err_puts(m, err->userptr ? " userptr" : "");
209 err_puts(m, err->ring != -1 ? " " : "");
210 err_puts(m, ring_str(err->ring));
211 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
214 err_printf(m, " (name: %d)", err->name);
215 if (err->fence_reg != I915_FENCE_REG_NONE)
216 err_printf(m, " (fence: %d)", err->fence_reg);
223 static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
230 case HANGCHECK_ACTIVE:
241 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
242 struct drm_device *dev,
243 struct drm_i915_error_state *error,
246 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
251 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
252 err_printf(m, " START: 0x%08x\n", ring->start);
253 err_printf(m, " HEAD: 0x%08x\n", ring->head);
254 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
255 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
256 err_printf(m, " HWS: 0x%08x\n", ring->hws);
257 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
258 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
259 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
260 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
261 if (INTEL_INFO(dev)->gen >= 4) {
262 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
263 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
264 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
266 err_printf(m, " INSTPM: 0x%08x\n", ring->instpm);
267 err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
268 lower_32_bits(ring->faddr));
269 if (INTEL_INFO(dev)->gen >= 6) {
270 err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi);
271 err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg);
272 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
273 ring->semaphore_mboxes[0],
274 ring->semaphore_seqno[0]);
275 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
276 ring->semaphore_mboxes[1],
277 ring->semaphore_seqno[1]);
278 if (HAS_VEBOX(dev)) {
279 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
280 ring->semaphore_mboxes[2],
281 ring->semaphore_seqno[2]);
284 if (USES_PPGTT(dev)) {
285 err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
287 if (INTEL_INFO(dev)->gen >= 8) {
289 for (i = 0; i < 4; i++)
290 err_printf(m, " PDP%d: 0x%016llx\n",
291 i, ring->vm_info.pdp[i]);
293 err_printf(m, " PP_DIR_BASE: 0x%08x\n",
294 ring->vm_info.pp_dir_base);
297 err_printf(m, " seqno: 0x%08x\n", ring->seqno);
298 err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
299 err_printf(m, " waiting: %s\n", yesno(ring->waiting));
300 err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
301 err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
302 err_printf(m, " hangcheck: %s [%d]\n",
303 hangcheck_action_to_str(ring->hangcheck_action),
304 ring->hangcheck_score);
307 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
312 i915_error_vprintf(e, f, args);
316 static void print_error_obj(struct drm_i915_error_state_buf *m,
317 struct drm_i915_error_object *obj)
319 int page, offset, elt;
321 for (page = offset = 0; page < obj->page_count; page++) {
322 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
323 err_printf(m, "%08x : %08x\n", offset,
324 obj->pages[page][elt]);
330 int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
331 const struct i915_error_state_file_priv *error_priv)
333 struct drm_device *dev = error_priv->dev;
334 struct drm_i915_private *dev_priv = to_i915(dev);
335 struct drm_i915_error_state *error = error_priv->error;
336 struct drm_i915_error_object *obj;
337 int i, j, offset, elt;
338 int max_hangcheck_score;
341 err_printf(m, "no error state collected\n");
345 err_printf(m, "%s\n", error->error_msg);
346 err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
347 error->time.tv_usec);
348 err_printf(m, "Kernel: UTS_RELEASE \n");
349 max_hangcheck_score = 0;
350 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
351 if (error->ring[i].hangcheck_score > max_hangcheck_score)
352 max_hangcheck_score = error->ring[i].hangcheck_score;
354 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
355 if (error->ring[i].hangcheck_score == max_hangcheck_score &&
356 error->ring[i].pid != -1) {
357 err_printf(m, "Active process (on ring %s): %s [%d]\n",
363 err_printf(m, "Reset count: %u\n", error->reset_count);
364 err_printf(m, "Suspend count: %u\n", error->suspend_count);
365 err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
366 err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
367 err_printf(m, "PCI Subsystem: %04x:%04x\n",
368 dev->pdev->subsystem_vendor,
369 dev->pdev->subsystem_device);
370 err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
373 struct intel_csr *csr = &dev_priv->csr;
375 err_printf(m, "DMC loaded: %s\n",
376 yesno(csr->dmc_payload != NULL));
377 err_printf(m, "DMC fw version: %d.%d\n",
378 CSR_VERSION_MAJOR(csr->version),
379 CSR_VERSION_MINOR(csr->version));
382 err_printf(m, "EIR: 0x%08x\n", error->eir);
383 err_printf(m, "IER: 0x%08x\n", error->ier);
384 if (INTEL_INFO(dev)->gen >= 8) {
385 for (i = 0; i < 4; i++)
386 err_printf(m, "GTIER gt %d: 0x%08x\n", i,
388 } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
389 err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
390 err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
391 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
392 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
393 err_printf(m, "CCID: 0x%08x\n", error->ccid);
394 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
396 for (i = 0; i < dev_priv->num_fence_regs; i++)
397 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
399 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
400 err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
401 error->extra_instdone[i]);
403 if (INTEL_INFO(dev)->gen >= 6) {
404 err_printf(m, "ERROR: 0x%08x\n", error->error);
406 if (INTEL_INFO(dev)->gen >= 8)
407 err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
408 error->fault_data1, error->fault_data0);
410 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
414 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
417 i915_ring_error_state(m, dev, error, i);
419 for (i = 0; i < error->vm_count; i++) {
420 err_printf(m, "vm[%d]\n", i);
422 print_error_buffers(m, "Active",
424 error->active_bo_count[i]);
426 print_error_buffers(m, "Pinned",
428 error->pinned_bo_count[i]);
431 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
432 obj = error->ring[i].batchbuffer;
434 err_puts(m, dev_priv->engine[i].name);
435 if (error->ring[i].pid != -1)
436 err_printf(m, " (submitted by %s [%d])",
439 err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
440 upper_32_bits(obj->gtt_offset),
441 lower_32_bits(obj->gtt_offset));
442 print_error_obj(m, obj);
445 obj = error->ring[i].wa_batchbuffer;
447 err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
448 dev_priv->engine[i].name,
449 lower_32_bits(obj->gtt_offset));
450 print_error_obj(m, obj);
453 if (error->ring[i].num_requests) {
454 err_printf(m, "%s --- %d requests\n",
455 dev_priv->engine[i].name,
456 error->ring[i].num_requests);
457 for (j = 0; j < error->ring[i].num_requests; j++) {
458 err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
459 error->ring[i].requests[j].seqno,
460 error->ring[i].requests[j].jiffies,
461 error->ring[i].requests[j].tail);
465 if (error->ring[i].num_waiters) {
466 err_printf(m, "%s --- %d waiters\n",
467 dev_priv->engine[i].name,
468 error->ring[i].num_waiters);
469 for (j = 0; j < error->ring[i].num_waiters; j++) {
470 err_printf(m, " seqno 0x%08x for %s [%d]\n",
471 error->ring[i].waiters[j].seqno,
472 error->ring[i].waiters[j].comm,
473 error->ring[i].waiters[j].pid);
477 if ((obj = error->ring[i].ringbuffer)) {
478 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
479 dev_priv->engine[i].name,
480 lower_32_bits(obj->gtt_offset));
481 print_error_obj(m, obj);
484 if ((obj = error->ring[i].hws_page)) {
485 u64 hws_offset = obj->gtt_offset;
486 u32 *hws_page = &obj->pages[0][0];
488 if (i915.enable_execlists) {
489 hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
490 hws_page = &obj->pages[LRC_PPHWSP_PN][0];
492 err_printf(m, "%s --- HW Status = 0x%08llx\n",
493 dev_priv->engine[i].name, hws_offset);
495 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
496 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
506 obj = error->ring[i].wa_ctx;
508 u64 wa_ctx_offset = obj->gtt_offset;
509 u32 *wa_ctx_page = &obj->pages[0][0];
510 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
511 u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
512 engine->wa_ctx.per_ctx.size);
514 err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
515 dev_priv->engine[i].name, wa_ctx_offset);
517 for (elt = 0; elt < wa_ctx_size; elt += 4) {
518 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
520 wa_ctx_page[elt + 0],
521 wa_ctx_page[elt + 1],
522 wa_ctx_page[elt + 2],
523 wa_ctx_page[elt + 3]);
528 if ((obj = error->ring[i].ctx)) {
529 err_printf(m, "%s --- HW Context = 0x%08x\n",
530 dev_priv->engine[i].name,
531 lower_32_bits(obj->gtt_offset));
532 print_error_obj(m, obj);
536 if ((obj = error->semaphore_obj)) {
537 err_printf(m, "Semaphore page = 0x%08x\n",
538 lower_32_bits(obj->gtt_offset));
539 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
540 err_printf(m, "[%04x] %08x %08x %08x %08x\n",
543 obj->pages[0][elt+1],
544 obj->pages[0][elt+2],
545 obj->pages[0][elt+3]);
550 intel_overlay_print_error_state(m, error->overlay);
553 intel_display_print_error_state(m, dev, error->display);
556 if (m->bytes == 0 && m->err)
562 int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
563 struct drm_i915_private *i915,
564 size_t count, loff_t pos)
566 memset(ebuf, 0, sizeof(*ebuf));
569 /* We need to have enough room to store any i915_error_state printf
570 * so that we can move it to start position.
572 ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
573 ebuf->buf = kmalloc(ebuf->size, M_DRM, M_WAITOK);
575 if (ebuf->buf == NULL) {
576 ebuf->size = PAGE_SIZE;
577 ebuf->buf = kmalloc(ebuf->size, M_DRM, M_WAITOK);
580 if (ebuf->buf == NULL) {
582 ebuf->buf = kmalloc(ebuf->size, M_DRM, M_WAITOK);
585 if (ebuf->buf == NULL)
593 static void i915_error_object_free(struct drm_i915_error_object *obj)
600 for (page = 0; page < obj->page_count; page++)
601 kfree(obj->pages[page]);
606 static void i915_error_state_free(struct kref *error_ref)
608 struct drm_i915_error_state *error = container_of(error_ref,
609 typeof(*error), ref);
612 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
613 i915_error_object_free(error->ring[i].batchbuffer);
614 i915_error_object_free(error->ring[i].wa_batchbuffer);
615 i915_error_object_free(error->ring[i].ringbuffer);
616 i915_error_object_free(error->ring[i].hws_page);
617 i915_error_object_free(error->ring[i].ctx);
618 i915_error_object_free(error->ring[i].wa_ctx);
619 kfree(error->ring[i].requests);
620 kfree(error->ring[i].waiters);
623 i915_error_object_free(error->semaphore_obj);
625 for (i = 0; i < error->vm_count; i++)
626 kfree(error->active_bo[i]);
628 kfree(error->active_bo);
629 kfree(error->active_bo_count);
630 kfree(error->pinned_bo);
631 kfree(error->pinned_bo_count);
632 kfree(error->overlay);
633 kfree(error->display);
637 static struct drm_i915_error_object *
638 i915_error_object_create(struct drm_i915_private *dev_priv,
639 struct drm_i915_gem_object *src,
640 struct i915_address_space *vm)
642 struct i915_ggtt *ggtt = &dev_priv->ggtt;
643 struct drm_i915_error_object *dst;
644 struct i915_vma *vma = NULL;
650 if (src == NULL || src->pages == NULL)
653 num_pages = src->base.size >> PAGE_SHIFT;
655 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), M_DRM, M_NOWAIT);
659 if (i915_gem_obj_bound(src, vm))
660 dst->gtt_offset = i915_gem_obj_offset(src, vm);
662 dst->gtt_offset = -1;
664 reloc_offset = dst->gtt_offset;
665 if (i915_is_ggtt(vm))
666 vma = i915_gem_obj_to_ggtt(src);
667 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
668 vma && (vma->bound & GLOBAL_BIND) &&
669 reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
671 /* Cannot access stolen address directly, try to use the aperture */
675 if (!(vma && vma->bound & GLOBAL_BIND))
678 reloc_offset = i915_gem_obj_ggtt_offset(src);
679 if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
683 /* Cannot access snooped pages through the aperture */
684 if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
688 dst->page_count = num_pages;
689 while (num_pages--) {
695 d = kmalloc(PAGE_SIZE, M_DRM, M_NOWAIT);
700 local_irq_save(flags);
705 /* Simply ignore tiling or any overlapping fence.
706 * It's part of the error state, and this hopefully
707 * captures what the GPU read.
710 s = io_mapping_map_atomic_wc(ggtt->mappable,
712 memcpy_fromio(d, s, PAGE_SIZE);
713 io_mapping_unmap_atomic(s);
718 page = i915_gem_object_get_page(src, i);
720 drm_clflush_pages(&page, 1);
722 s = kmap_atomic(page);
723 memcpy(d, s, PAGE_SIZE);
726 drm_clflush_pages(&page, 1);
729 local_irq_restore(flags);
733 reloc_offset += PAGE_SIZE;
740 kfree(dst->pages[i]);
744 #define i915_error_ggtt_object_create(dev_priv, src) \
745 i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
747 static void capture_bo(struct drm_i915_error_buffer *err,
748 struct i915_vma *vma)
750 struct drm_i915_gem_object *obj = vma->obj;
753 err->size = obj->base.size;
754 err->name = obj->base.name;
755 for (i = 0; i < I915_NUM_ENGINES; i++)
756 err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
757 err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
758 err->gtt_offset = vma->node.start;
759 err->read_domains = obj->base.read_domains;
760 err->write_domain = obj->base.write_domain;
761 err->fence_reg = obj->fence_reg;
763 if (i915_gem_obj_is_pinned(obj))
765 err->tiling = obj->tiling_mode;
766 err->dirty = obj->dirty;
767 err->purgeable = obj->madv != I915_MADV_WILLNEED;
768 err->userptr = obj->userptr.mm != NULL;
769 err->ring = obj->last_write_req ?
770 i915_gem_request_get_engine(obj->last_write_req)->id : -1;
771 err->cache_level = obj->cache_level;
774 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
775 int count, struct list_head *head)
777 struct i915_vma *vma;
780 list_for_each_entry(vma, head, vm_link) {
781 capture_bo(err++, vma);
789 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
790 int count, struct list_head *head,
791 struct i915_address_space *vm)
793 struct drm_i915_gem_object *obj;
794 struct drm_i915_error_buffer * const first = err;
795 struct drm_i915_error_buffer * const last = err + count;
797 list_for_each_entry(obj, head, global_list) {
798 struct i915_vma *vma;
803 list_for_each_entry(vma, &obj->vma_list, obj_link)
804 if (vma->vm == vm && vma->pin_count > 0)
805 capture_bo(err++, vma);
811 /* Generate a semi-unique error code. The code is not meant to have meaning, The
812 * code's only purpose is to try to prevent false duplicated bug reports by
813 * grossly estimating a GPU error state.
815 * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
816 * the hang if we could strip the GTT offset information from it.
818 * It's only a small step better than a random number in its current form.
820 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
821 struct drm_i915_error_state *error,
824 uint32_t error_code = 0;
827 /* IPEHR would be an ideal way to detect errors, as it's the gross
828 * measure of "the command that hung." However, has some very common
829 * synchronization commands which almost always appear in the case
830 * strictly a client bug. Use instdone to differentiate those some.
832 for (i = 0; i < I915_NUM_ENGINES; i++) {
833 if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
837 return error->ring[i].ipehr ^ error->ring[i].instdone;
844 static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
845 struct drm_i915_error_state *error)
849 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
850 for (i = 0; i < dev_priv->num_fence_regs; i++)
851 error->fence[i] = I915_READ(FENCE_REG(i));
852 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
853 for (i = 0; i < dev_priv->num_fence_regs; i++)
854 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
855 } else if (INTEL_GEN(dev_priv) >= 6) {
856 for (i = 0; i < dev_priv->num_fence_regs; i++)
857 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
862 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
863 struct drm_i915_error_state *error,
864 struct intel_engine_cs *engine,
865 struct drm_i915_error_ring *ering)
867 struct intel_engine_cs *to;
868 enum intel_engine_id id;
870 if (!i915.semaphores)
873 if (!error->semaphore_obj)
874 error->semaphore_obj =
875 i915_error_ggtt_object_create(dev_priv,
876 dev_priv->semaphore_obj);
878 for_each_engine_id(to, dev_priv, id) {
886 signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
888 tmp = error->semaphore_obj->pages[0];
889 idx = intel_ring_sync_index(engine, to);
891 ering->semaphore_mboxes[idx] = tmp[signal_offset];
892 ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
896 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
897 struct intel_engine_cs *engine,
898 struct drm_i915_error_ring *ering)
900 ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
901 ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
902 ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
903 ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
905 if (HAS_VEBOX(dev_priv)) {
906 ering->semaphore_mboxes[2] =
907 I915_READ(RING_SYNC_2(engine->mmio_base));
908 ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
912 static void engine_record_waiters(struct intel_engine_cs *engine,
913 struct drm_i915_error_ring *ering)
915 struct intel_breadcrumbs *b = &engine->breadcrumbs;
916 struct drm_i915_error_waiter *waiter;
920 ering->num_waiters = 0;
921 ering->waiters = NULL;
923 lockmgr(&b->lock, LK_EXCLUSIVE);
925 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
927 lockmgr(&b->lock, LK_RELEASE);
931 waiter = kmalloc_array(count,
932 sizeof(struct drm_i915_error_waiter),
937 ering->waiters = waiter;
939 lockmgr(&b->lock, LK_EXCLUSIVE);
940 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
941 struct intel_wait *w = container_of(rb, typeof(*w), node);
944 strcpy(waiter->comm, w->tsk->comm);
945 waiter->pid = w->tsk->pid;
947 waiter->seqno = w->seqno;
950 if (++ering->num_waiters == count)
953 lockmgr(&b->lock, LK_RELEASE);
956 static void i915_record_ring_state(struct drm_i915_private *dev_priv,
957 struct drm_i915_error_state *error,
958 struct intel_engine_cs *engine,
959 struct drm_i915_error_ring *ering)
961 if (INTEL_GEN(dev_priv) >= 6) {
962 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
963 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
964 if (INTEL_GEN(dev_priv) >= 8)
965 gen8_record_semaphore_state(dev_priv, error, engine,
968 gen6_record_semaphore_state(dev_priv, engine, ering);
971 if (INTEL_GEN(dev_priv) >= 4) {
972 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
973 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
974 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
975 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
976 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
977 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
978 if (INTEL_GEN(dev_priv) >= 8) {
979 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
980 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
982 ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
984 ering->faddr = I915_READ(DMA_FADD_I8XX);
985 ering->ipeir = I915_READ(IPEIR);
986 ering->ipehr = I915_READ(IPEHR);
987 ering->instdone = I915_READ(GEN2_INSTDONE);
990 ering->waiting = intel_engine_has_waiter(engine);
991 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
992 ering->acthd = intel_ring_get_active_head(engine);
993 ering->seqno = intel_engine_get_seqno(engine);
994 ering->last_seqno = engine->last_submitted_seqno;
995 ering->start = I915_READ_START(engine);
996 ering->head = I915_READ_HEAD(engine);
997 ering->tail = I915_READ_TAIL(engine);
998 ering->ctl = I915_READ_CTL(engine);
1000 if (I915_NEED_GFX_HWS(dev_priv)) {
1003 if (IS_GEN7(dev_priv)) {
1004 switch (engine->id) {
1007 mmio = RENDER_HWS_PGA_GEN7;
1010 mmio = BLT_HWS_PGA_GEN7;
1013 mmio = BSD_HWS_PGA_GEN7;
1016 mmio = VEBOX_HWS_PGA_GEN7;
1019 } else if (IS_GEN6(engine->i915)) {
1020 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
1022 /* XXX: gen8 returns to sanity */
1023 mmio = RING_HWS_PGA(engine->mmio_base);
1026 ering->hws = I915_READ(mmio);
1029 ering->hangcheck_score = engine->hangcheck.score;
1030 ering->hangcheck_action = engine->hangcheck.action;
1032 if (USES_PPGTT(dev_priv)) {
1035 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
1037 if (IS_GEN6(dev_priv))
1038 ering->vm_info.pp_dir_base =
1039 I915_READ(RING_PP_DIR_BASE_READ(engine));
1040 else if (IS_GEN7(dev_priv))
1041 ering->vm_info.pp_dir_base =
1042 I915_READ(RING_PP_DIR_BASE(engine));
1043 else if (INTEL_GEN(dev_priv) >= 8)
1044 for (i = 0; i < 4; i++) {
1045 ering->vm_info.pdp[i] =
1046 I915_READ(GEN8_RING_PDP_UDW(engine, i));
1047 ering->vm_info.pdp[i] <<= 32;
1048 ering->vm_info.pdp[i] |=
1049 I915_READ(GEN8_RING_PDP_LDW(engine, i));
1055 static void i915_gem_record_active_context(struct intel_engine_cs *engine,
1056 struct drm_i915_error_state *error,
1057 struct drm_i915_error_ring *ering)
1059 struct drm_i915_private *dev_priv = engine->i915;
1060 struct drm_i915_gem_object *obj;
1062 /* Currently render ring is the only HW context user */
1063 if (engine->id != RCS || !error->ccid)
1066 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1067 if (!i915_gem_obj_ggtt_bound(obj))
1070 if ((error->ccid & LINUX_PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
1071 ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
1077 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1078 struct drm_i915_error_state *error)
1080 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1081 struct drm_i915_gem_request *request;
1084 for (i = 0; i < I915_NUM_ENGINES; i++) {
1085 struct intel_engine_cs *engine = &dev_priv->engine[i];
1087 error->ring[i].pid = -1;
1089 if (!intel_engine_initialized(engine))
1092 error->ring[i].valid = true;
1094 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1095 engine_record_waiters(engine, &error->ring[i]);
1097 request = i915_gem_find_active_request(engine);
1099 struct i915_address_space *vm;
1100 struct intel_ringbuffer *rb;
1102 vm = request->ctx->ppgtt ?
1103 &request->ctx->ppgtt->base : &ggtt->base;
1105 /* We need to copy these to an anonymous buffer
1106 * as the simplest method to avoid being overwritten
1109 error->ring[i].batchbuffer =
1110 i915_error_object_create(dev_priv,
1114 if (HAS_BROKEN_CS_TLB(dev_priv))
1115 error->ring[i].wa_batchbuffer =
1116 i915_error_ggtt_object_create(dev_priv,
1117 engine->scratch.obj);
1121 struct task_struct *task;
1124 task = pid_task(request->pid, PIDTYPE_PID);
1126 strcpy(error->ring[i].comm, task->comm);
1127 error->ring[i].pid = task->pid;
1134 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1136 rb = request->ringbuf;
1137 error->ring[i].cpu_ring_head = rb->head;
1138 error->ring[i].cpu_ring_tail = rb->tail;
1139 error->ring[i].ringbuffer =
1140 i915_error_ggtt_object_create(dev_priv,
1144 error->ring[i].hws_page =
1145 i915_error_ggtt_object_create(dev_priv,
1146 engine->status_page.obj);
1148 if (engine->wa_ctx.obj) {
1149 error->ring[i].wa_ctx =
1150 i915_error_ggtt_object_create(dev_priv,
1151 engine->wa_ctx.obj);
1154 i915_gem_record_active_context(engine, error, &error->ring[i]);
1157 list_for_each_entry(request, &engine->request_list, list)
1160 error->ring[i].num_requests = count;
1161 error->ring[i].requests =
1162 kcalloc(count, sizeof(*error->ring[i].requests),
1164 if (error->ring[i].requests == NULL) {
1165 error->ring[i].num_requests = 0;
1170 list_for_each_entry(request, &engine->request_list, list) {
1171 struct drm_i915_error_request *erq;
1173 if (count >= error->ring[i].num_requests) {
1175 * If the ring request list was changed in
1176 * between the point where the error request
1177 * list was created and dimensioned and this
1178 * point then just exit early to avoid crashes.
1180 * We don't need to communicate that the
1181 * request list changed state during error
1182 * state capture and that the error state is
1183 * slightly incorrect as a consequence since we
1184 * are typically only interested in the request
1185 * list state at the point of error state
1186 * capture, not in any changes happening during
1192 erq = &error->ring[i].requests[count++];
1193 erq->seqno = request->fence.seqno;
1194 erq->jiffies = request->emitted_jiffies;
1195 erq->tail = request->postfix;
1200 /* FIXME: Since pin count/bound list is global, we duplicate what we capture per
1203 static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1204 struct drm_i915_error_state *error,
1205 struct i915_address_space *vm,
1208 struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
1209 struct drm_i915_gem_object *obj;
1210 struct i915_vma *vma;
1214 list_for_each_entry(vma, &vm->active_list, vm_link)
1216 error->active_bo_count[ndx] = i;
1218 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1219 list_for_each_entry(vma, &obj->vma_list, obj_link)
1220 if (vma->vm == vm && vma->pin_count > 0)
1223 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
1226 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
1228 pinned_bo = active_bo + error->active_bo_count[ndx];
1232 error->active_bo_count[ndx] =
1233 capture_active_bo(active_bo,
1234 error->active_bo_count[ndx],
1238 error->pinned_bo_count[ndx] =
1239 capture_pinned_bo(pinned_bo,
1240 error->pinned_bo_count[ndx],
1241 &dev_priv->mm.bound_list, vm);
1242 error->active_bo[ndx] = active_bo;
1243 error->pinned_bo[ndx] = pinned_bo;
1246 static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1247 struct drm_i915_error_state *error)
1249 struct i915_address_space *vm;
1252 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1255 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
1256 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
1257 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
1259 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1262 if (error->active_bo == NULL ||
1263 error->pinned_bo == NULL ||
1264 error->active_bo_count == NULL ||
1265 error->pinned_bo_count == NULL) {
1266 kfree(error->active_bo);
1267 kfree(error->active_bo_count);
1268 kfree(error->pinned_bo);
1269 kfree(error->pinned_bo_count);
1271 error->active_bo = NULL;
1272 error->active_bo_count = NULL;
1273 error->pinned_bo = NULL;
1274 error->pinned_bo_count = NULL;
1276 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1277 i915_gem_capture_vm(dev_priv, error, vm, i++);
1279 error->vm_count = cnt;
1283 /* Capture all registers which don't fit into another category. */
1284 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1285 struct drm_i915_error_state *error)
1287 struct drm_device *dev = &dev_priv->drm;
1290 /* General organization
1291 * 1. Registers specific to a single generation
1292 * 2. Registers which belong to multiple generations
1293 * 3. Feature specific registers.
1294 * 4. Everything else
1295 * Please try to follow the order.
1298 /* 1: Registers specific to a single generation */
1299 if (IS_VALLEYVIEW(dev)) {
1300 error->gtier[0] = I915_READ(GTIER);
1301 error->ier = I915_READ(VLV_IER);
1302 error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
1306 error->err_int = I915_READ(GEN7_ERR_INT);
1308 if (INTEL_INFO(dev)->gen >= 8) {
1309 error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
1310 error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
1314 error->forcewake = I915_READ_FW(FORCEWAKE);
1315 error->gab_ctl = I915_READ(GAB_CTL);
1316 error->gfx_mode = I915_READ(GFX_MODE);
1319 /* 2: Registers which belong to multiple generations */
1320 if (INTEL_INFO(dev)->gen >= 7)
1321 error->forcewake = I915_READ_FW(FORCEWAKE_MT);
1323 if (INTEL_INFO(dev)->gen >= 6) {
1324 error->derrmr = I915_READ(DERRMR);
1325 error->error = I915_READ(ERROR_GEN6);
1326 error->done_reg = I915_READ(DONE_REG);
1329 /* 3: Feature specific registers */
1330 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1331 error->gam_ecochk = I915_READ(GAM_ECOCHK);
1332 error->gac_eco = I915_READ(GAC_ECO_BITS);
1335 /* 4: Everything else */
1336 if (HAS_HW_CONTEXTS(dev))
1337 error->ccid = I915_READ(CCID);
1339 if (INTEL_INFO(dev)->gen >= 8) {
1340 error->ier = I915_READ(GEN8_DE_MISC_IER);
1341 for (i = 0; i < 4; i++)
1342 error->gtier[i] = I915_READ(GEN8_GT_IER(i));
1343 } else if (HAS_PCH_SPLIT(dev)) {
1344 error->ier = I915_READ(DEIER);
1345 error->gtier[0] = I915_READ(GTIER);
1346 } else if (IS_GEN2(dev)) {
1347 error->ier = I915_READ16(IER);
1348 } else if (!IS_VALLEYVIEW(dev)) {
1349 error->ier = I915_READ(IER);
1351 error->eir = I915_READ(EIR);
1352 error->pgtbl_er = I915_READ(PGTBL_ER);
1354 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1357 static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1358 struct drm_i915_error_state *error,
1360 const char *error_msg)
1363 int ring_id = -1, len;
1365 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1367 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1368 "GPU HANG: ecode %d:%d:0x%08x",
1369 INTEL_GEN(dev_priv), ring_id, ecode);
1371 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1372 len += scnprintf(error->error_msg + len,
1373 sizeof(error->error_msg) - len,
1375 error->ring[ring_id].comm,
1376 error->ring[ring_id].pid);
1378 scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
1379 ", reason: %s, action: %s",
1381 engine_mask ? "reset" : "continue");
1384 static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1385 struct drm_i915_error_state *error)
1388 #ifdef CONFIG_INTEL_IOMMU
1389 error->iommu = intel_iommu_gfx_mapped;
1391 error->reset_count = i915_reset_count(&dev_priv->gpu_error);
1392 error->suspend_count = dev_priv->suspend_count;
1396 * i915_capture_error_state - capture an error record for later analysis
1399 * Should be called when an error is detected (either a hang or an error
1400 * interrupt) to capture error state from the time of the error. Fills
1401 * out a structure which becomes available in debugfs for user level tools
1404 void i915_capture_error_state(struct drm_i915_private *dev_priv,
1406 const char *error_msg)
1409 struct drm_i915_error_state *error;
1410 unsigned long flags;
1412 if (READ_ONCE(dev_priv->gpu_error.first_error))
1415 /* Account for pipe specific data like PIPE*STAT */
1416 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1418 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1422 kref_init(&error->ref);
1424 i915_capture_gen_state(dev_priv, error);
1425 i915_capture_reg_state(dev_priv, error);
1426 i915_gem_capture_buffers(dev_priv, error);
1427 i915_gem_record_fences(dev_priv, error);
1428 i915_gem_record_rings(dev_priv, error);
1430 do_gettimeofday(&error->time);
1432 error->overlay = intel_overlay_capture_error_state(dev_priv);
1433 error->display = intel_display_capture_error_state(dev_priv);
1435 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1436 DRM_INFO("%s\n", error->error_msg);
1438 if (!error->simulated) {
1439 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1440 if (!dev_priv->gpu_error.first_error) {
1441 dev_priv->gpu_error.first_error = error;
1444 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1448 i915_error_state_free(&error->ref);
1453 DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
1454 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1455 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1456 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1457 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1458 dev_priv->drm.primary->index);
1463 void i915_error_state_get(struct drm_device *dev,
1464 struct i915_error_state_file_priv *error_priv)
1466 struct drm_i915_private *dev_priv = to_i915(dev);
1468 spin_lock_irq(&dev_priv->gpu_error.lock);
1469 error_priv->error = dev_priv->gpu_error.first_error;
1470 if (error_priv->error)
1471 kref_get(&error_priv->error->ref);
1472 spin_unlock_irq(&dev_priv->gpu_error.lock);
1476 void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1478 if (error_priv->error)
1479 kref_put(&error_priv->error->ref, i915_error_state_free);
1482 void i915_destroy_error_state(struct drm_device *dev)
1484 struct drm_i915_private *dev_priv = to_i915(dev);
1485 struct drm_i915_error_state *error;
1487 spin_lock_irq(&dev_priv->gpu_error.lock);
1488 error = dev_priv->gpu_error.first_error;
1489 dev_priv->gpu_error.first_error = NULL;
1490 spin_unlock_irq(&dev_priv->gpu_error.lock);
1493 kref_put(&error->ref, i915_error_state_free);
1496 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1499 case I915_CACHE_NONE: return " uncached";
1500 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1501 case I915_CACHE_L3_LLC: return " L3+LLC";
1502 case I915_CACHE_WT: return " WT";
1507 /* NB: please notice the memset */
1508 void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1511 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1513 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
1514 instdone[0] = I915_READ(GEN2_INSTDONE);
1515 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
1516 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1517 instdone[1] = I915_READ(GEN4_INSTDONE1);
1518 } else if (INTEL_GEN(dev_priv) >= 7) {
1519 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1520 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1521 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1522 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);