2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/export.h>
31 #include "intel_drv.h"
32 #include "intel_ringbuffer.h"
33 #include <drm/i915_drm.h>
36 #define DRM_I915_RING_DEBUG 1
39 #if defined(CONFIG_DEBUG_FS)
47 static const char *yesno(int v)
49 return v ? "yes" : "no";
52 static int i915_capabilities(struct seq_file *m, void *data)
54 struct drm_info_node *node = (struct drm_info_node *) m->private;
55 struct drm_device *dev = node->minor->dev;
56 const struct intel_device_info *info = INTEL_INFO(dev);
58 seq_printf(m, "gen: %d\n", info->gen);
59 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
60 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
61 #define DEV_INFO_SEP ;
69 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
71 if (obj->user_pin_count > 0)
73 else if (obj->pin_count > 0)
79 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
81 switch (obj->tiling_mode) {
83 case I915_TILING_NONE: return " ";
84 case I915_TILING_X: return "X";
85 case I915_TILING_Y: return "Y";
89 static const char *cache_level_str(int type)
92 case I915_CACHE_NONE: return " uncached";
93 case I915_CACHE_LLC: return " snooped (LLC)";
94 case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
100 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
102 seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
105 get_tiling_flag(obj),
106 obj->base.size / 1024,
107 obj->base.read_domains,
108 obj->base.write_domain,
109 obj->last_read_seqno,
110 obj->last_write_seqno,
111 obj->last_fenced_seqno,
112 cache_level_str(obj->cache_level),
113 obj->dirty ? " dirty" : "",
114 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
116 seq_printf(m, " (name: %d)", obj->base.name);
118 seq_printf(m, " (pinned x %d)", obj->pin_count);
119 if (obj->fence_reg != I915_FENCE_REG_NONE)
120 seq_printf(m, " (fence: %d)", obj->fence_reg);
121 if (obj->gtt_space != NULL)
122 seq_printf(m, " (gtt offset: %08x, size: %08x)",
123 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
125 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
126 if (obj->pin_mappable || obj->fault_mappable) {
128 if (obj->pin_mappable)
130 if (obj->fault_mappable)
133 seq_printf(m, " (%s mappable)", s);
135 if (obj->ring != NULL)
136 seq_printf(m, " (%s)", obj->ring->name);
139 static int i915_gem_object_list_info(struct seq_file *m, void *data)
141 struct drm_info_node *node = (struct drm_info_node *) m->private;
142 uintptr_t list = (uintptr_t) node->info_ent->data;
143 struct list_head *head;
144 struct drm_device *dev = node->minor->dev;
145 drm_i915_private_t *dev_priv = dev->dev_private;
146 struct drm_i915_gem_object *obj;
147 size_t total_obj_size, total_gtt_size;
150 ret = mutex_lock_interruptible(&dev->struct_mutex);
156 seq_printf(m, "Active:\n");
157 head = &dev_priv->mm.active_list;
160 seq_printf(m, "Inactive:\n");
161 head = &dev_priv->mm.inactive_list;
164 mutex_unlock(&dev->struct_mutex);
168 total_obj_size = total_gtt_size = count = 0;
169 list_for_each_entry(obj, head, mm_list) {
171 describe_obj(m, obj);
173 total_obj_size += obj->base.size;
174 total_gtt_size += obj->gtt_space->size;
177 mutex_unlock(&dev->struct_mutex);
179 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
180 count, total_obj_size, total_gtt_size);
184 #define count_objects(list, member) do { \
185 list_for_each_entry(obj, list, member) { \
186 size += obj->gtt_space->size; \
188 if (obj->map_and_fenceable) { \
189 mappable_size += obj->gtt_space->size; \
195 static int i915_gem_object_info(struct seq_file *m, void* data)
197 struct drm_info_node *node = (struct drm_info_node *) m->private;
198 struct drm_device *dev = node->minor->dev;
199 struct drm_i915_private *dev_priv = dev->dev_private;
200 u32 count, mappable_count, purgeable_count;
201 size_t size, mappable_size, purgeable_size;
202 struct drm_i915_gem_object *obj;
205 ret = mutex_lock_interruptible(&dev->struct_mutex);
209 seq_printf(m, "%u objects, %zu bytes\n",
210 dev_priv->mm.object_count,
211 dev_priv->mm.object_memory);
213 size = count = mappable_size = mappable_count = 0;
214 count_objects(&dev_priv->mm.bound_list, gtt_list);
215 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
216 count, mappable_count, size, mappable_size);
218 size = count = mappable_size = mappable_count = 0;
219 count_objects(&dev_priv->mm.active_list, mm_list);
220 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
221 count, mappable_count, size, mappable_size);
223 size = count = mappable_size = mappable_count = 0;
224 count_objects(&dev_priv->mm.inactive_list, mm_list);
225 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
226 count, mappable_count, size, mappable_size);
228 size = count = purgeable_size = purgeable_count = 0;
229 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
230 size += obj->base.size, ++count;
231 if (obj->madv == I915_MADV_DONTNEED)
232 purgeable_size += obj->base.size, ++purgeable_count;
234 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
236 size = count = mappable_size = mappable_count = 0;
237 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
238 if (obj->fault_mappable) {
239 size += obj->gtt_space->size;
242 if (obj->pin_mappable) {
243 mappable_size += obj->gtt_space->size;
246 if (obj->madv == I915_MADV_DONTNEED) {
247 purgeable_size += obj->base.size;
251 seq_printf(m, "%u purgeable objects, %zu bytes\n",
252 purgeable_count, purgeable_size);
253 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
254 mappable_count, mappable_size);
255 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
258 seq_printf(m, "%zu [%lu] gtt total\n",
260 dev_priv->gtt.mappable_end - dev_priv->gtt.start);
262 mutex_unlock(&dev->struct_mutex);
267 static int i915_gem_gtt_info(struct seq_file *m, void* data)
269 struct drm_info_node *node = (struct drm_info_node *) m->private;
270 struct drm_device *dev = node->minor->dev;
271 uintptr_t list = (uintptr_t) node->info_ent->data;
272 struct drm_i915_private *dev_priv = dev->dev_private;
273 struct drm_i915_gem_object *obj;
274 size_t total_obj_size, total_gtt_size;
277 ret = mutex_lock_interruptible(&dev->struct_mutex);
281 total_obj_size = total_gtt_size = count = 0;
282 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
283 if (list == PINNED_LIST && obj->pin_count == 0)
287 describe_obj(m, obj);
289 total_obj_size += obj->base.size;
290 total_gtt_size += obj->gtt_space->size;
294 mutex_unlock(&dev->struct_mutex);
296 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
297 count, total_obj_size, total_gtt_size);
302 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
304 struct drm_info_node *node = (struct drm_info_node *) m->private;
305 struct drm_device *dev = node->minor->dev;
307 struct intel_crtc *crtc;
309 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
310 const char pipe = pipe_name(crtc->pipe);
311 const char plane = plane_name(crtc->plane);
312 struct intel_unpin_work *work;
314 spin_lock_irqsave(&dev->event_lock, flags);
315 work = crtc->unpin_work;
317 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
324 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
327 if (work->enable_stall_check)
328 seq_printf(m, "Stall check enabled, ");
330 seq_printf(m, "Stall check waiting for page flip ioctl, ");
331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
333 if (work->old_fb_obj) {
334 struct drm_i915_gem_object *obj = work->old_fb_obj;
336 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
338 if (work->pending_flip_obj) {
339 struct drm_i915_gem_object *obj = work->pending_flip_obj;
341 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
344 spin_unlock_irqrestore(&dev->event_lock, flags);
350 static int i915_gem_request_info(struct seq_file *m, void *data)
352 struct drm_info_node *node = (struct drm_info_node *) m->private;
353 struct drm_device *dev = node->minor->dev;
354 drm_i915_private_t *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 struct drm_i915_gem_request *gem_request;
359 ret = mutex_lock_interruptible(&dev->struct_mutex);
364 for_each_ring(ring, dev_priv, i) {
365 if (list_empty(&ring->request_list))
368 seq_printf(m, "%s requests:\n", ring->name);
369 list_for_each_entry(gem_request,
372 seq_printf(m, " %d @ %d\n",
374 (int) (jiffies - gem_request->emitted_jiffies));
378 mutex_unlock(&dev->struct_mutex);
381 seq_printf(m, "No requests\n");
386 static void i915_ring_seqno_info(struct seq_file *m,
387 struct intel_ring_buffer *ring)
389 if (ring->get_seqno) {
390 seq_printf(m, "Current sequence (%s): %u\n",
391 ring->name, ring->get_seqno(ring, false));
395 static int i915_gem_seqno_info(struct seq_file *m, void *data)
397 struct drm_info_node *node = (struct drm_info_node *) m->private;
398 struct drm_device *dev = node->minor->dev;
399 drm_i915_private_t *dev_priv = dev->dev_private;
400 struct intel_ring_buffer *ring;
403 ret = mutex_lock_interruptible(&dev->struct_mutex);
407 for_each_ring(ring, dev_priv, i)
408 i915_ring_seqno_info(m, ring);
410 mutex_unlock(&dev->struct_mutex);
416 static int i915_interrupt_info(struct seq_file *m, void *data)
418 struct drm_info_node *node = (struct drm_info_node *) m->private;
419 struct drm_device *dev = node->minor->dev;
420 drm_i915_private_t *dev_priv = dev->dev_private;
421 struct intel_ring_buffer *ring;
424 ret = mutex_lock_interruptible(&dev->struct_mutex);
428 if (IS_VALLEYVIEW(dev)) {
429 seq_printf(m, "Display IER:\t%08x\n",
431 seq_printf(m, "Display IIR:\t%08x\n",
433 seq_printf(m, "Display IIR_RW:\t%08x\n",
434 I915_READ(VLV_IIR_RW));
435 seq_printf(m, "Display IMR:\t%08x\n",
438 seq_printf(m, "Pipe %c stat:\t%08x\n",
440 I915_READ(PIPESTAT(pipe)));
442 seq_printf(m, "Master IER:\t%08x\n",
443 I915_READ(VLV_MASTER_IER));
445 seq_printf(m, "Render IER:\t%08x\n",
447 seq_printf(m, "Render IIR:\t%08x\n",
449 seq_printf(m, "Render IMR:\t%08x\n",
452 seq_printf(m, "PM IER:\t\t%08x\n",
453 I915_READ(GEN6_PMIER));
454 seq_printf(m, "PM IIR:\t\t%08x\n",
455 I915_READ(GEN6_PMIIR));
456 seq_printf(m, "PM IMR:\t\t%08x\n",
457 I915_READ(GEN6_PMIMR));
459 seq_printf(m, "Port hotplug:\t%08x\n",
460 I915_READ(PORT_HOTPLUG_EN));
461 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
462 I915_READ(VLV_DPFLIPSTAT));
463 seq_printf(m, "DPINVGTT:\t%08x\n",
464 I915_READ(DPINVGTT));
466 } else if (!HAS_PCH_SPLIT(dev)) {
467 seq_printf(m, "Interrupt enable: %08x\n",
469 seq_printf(m, "Interrupt identity: %08x\n",
471 seq_printf(m, "Interrupt mask: %08x\n",
474 seq_printf(m, "Pipe %c stat: %08x\n",
476 I915_READ(PIPESTAT(pipe)));
478 seq_printf(m, "North Display Interrupt enable: %08x\n",
480 seq_printf(m, "North Display Interrupt identity: %08x\n",
482 seq_printf(m, "North Display Interrupt mask: %08x\n",
484 seq_printf(m, "South Display Interrupt enable: %08x\n",
486 seq_printf(m, "South Display Interrupt identity: %08x\n",
488 seq_printf(m, "South Display Interrupt mask: %08x\n",
490 seq_printf(m, "Graphics Interrupt enable: %08x\n",
492 seq_printf(m, "Graphics Interrupt identity: %08x\n",
494 seq_printf(m, "Graphics Interrupt mask: %08x\n",
497 seq_printf(m, "Interrupts received: %d\n",
498 atomic_read(&dev_priv->irq_received));
499 for_each_ring(ring, dev_priv, i) {
500 if (IS_GEN6(dev) || IS_GEN7(dev)) {
502 "Graphics Interrupt mask (%s): %08x\n",
503 ring->name, I915_READ_IMR(ring));
505 i915_ring_seqno_info(m, ring);
507 mutex_unlock(&dev->struct_mutex);
512 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
514 struct drm_info_node *node = (struct drm_info_node *) m->private;
515 struct drm_device *dev = node->minor->dev;
516 drm_i915_private_t *dev_priv = dev->dev_private;
519 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
524 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
525 for (i = 0; i < dev_priv->num_fence_regs; i++) {
526 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
528 seq_printf(m, "Fence %d, pin count = %d, object = ",
529 i, dev_priv->fence_regs[i].pin_count);
531 seq_printf(m, "unused");
533 describe_obj(m, obj);
537 mutex_unlock(&dev->struct_mutex);
541 static int i915_hws_info(struct seq_file *m, void *data)
543 struct drm_info_node *node = (struct drm_info_node *) m->private;
544 struct drm_device *dev = node->minor->dev;
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct intel_ring_buffer *ring;
550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
551 hws = ring->status_page.page_addr;
555 for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
556 seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
558 hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
563 static const char *ring_str(int ring)
566 case RCS: return "render";
567 case VCS: return "bsd";
568 case BCS: return "blt";
573 static const char *pin_flag(int pinned)
583 static const char *tiling_flag(int tiling)
587 case I915_TILING_NONE: return "";
588 case I915_TILING_X: return " X";
589 case I915_TILING_Y: return " Y";
593 static const char *dirty_flag(int dirty)
595 return dirty ? " dirty" : "";
598 static const char *purgeable_flag(int purgeable)
600 return purgeable ? " purgeable" : "";
603 static void print_error_buffers(struct seq_file *m,
605 struct drm_i915_error_buffer *err,
608 seq_printf(m, "%s [%d]:\n", name, count);
611 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
616 err->rseqno, err->wseqno,
617 pin_flag(err->pinned),
618 tiling_flag(err->tiling),
619 dirty_flag(err->dirty),
620 purgeable_flag(err->purgeable),
621 err->ring != -1 ? " " : "",
623 cache_level_str(err->cache_level));
626 seq_printf(m, " (name: %d)", err->name);
627 if (err->fence_reg != I915_FENCE_REG_NONE)
628 seq_printf(m, " (fence: %d)", err->fence_reg);
635 static void i915_ring_error_state(struct seq_file *m,
636 struct drm_device *dev,
637 struct drm_i915_error_state *error,
640 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
641 seq_printf(m, "%s command stream:\n", ring_str(ring));
642 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
643 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
644 seq_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
645 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
646 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
647 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
648 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
649 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
650 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
652 if (INTEL_INFO(dev)->gen >= 4)
653 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
654 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
655 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
656 if (INTEL_INFO(dev)->gen >= 6) {
657 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
658 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
659 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
660 error->semaphore_mboxes[ring][0],
661 error->semaphore_seqno[ring][0]);
662 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
663 error->semaphore_mboxes[ring][1],
664 error->semaphore_seqno[ring][1]);
666 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
667 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
668 seq_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
669 seq_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
672 struct i915_error_state_file_priv {
673 struct drm_device *dev;
674 struct drm_i915_error_state *error;
677 static int i915_error_state(struct seq_file *m, void *unused)
679 struct i915_error_state_file_priv *error_priv = m->private;
680 struct drm_device *dev = error_priv->dev;
681 drm_i915_private_t *dev_priv = dev->dev_private;
682 struct drm_i915_error_state *error = error_priv->error;
683 struct intel_ring_buffer *ring;
684 int i, j, page, offset, elt;
687 seq_printf(m, "no error state collected\n");
691 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
692 error->time.tv_usec);
693 seq_printf(m, "Kernel: " UTS_RELEASE "\n");
694 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
695 seq_printf(m, "EIR: 0x%08x\n", error->eir);
696 seq_printf(m, "IER: 0x%08x\n", error->ier);
697 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
698 seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
699 seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
700 seq_printf(m, "CCID: 0x%08x\n", error->ccid);
702 for (i = 0; i < dev_priv->num_fence_regs; i++)
703 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
705 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
706 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
708 if (INTEL_INFO(dev)->gen >= 6) {
709 seq_printf(m, "ERROR: 0x%08x\n", error->error);
710 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
713 if (INTEL_INFO(dev)->gen == 7)
714 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
716 for_each_ring(ring, dev_priv, i)
717 i915_ring_error_state(m, dev, error, i);
719 if (error->active_bo)
720 print_error_buffers(m, "Active",
722 error->active_bo_count);
724 if (error->pinned_bo)
725 print_error_buffers(m, "Pinned",
727 error->pinned_bo_count);
729 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
730 struct drm_i915_error_object *obj;
732 if ((obj = error->ring[i].batchbuffer)) {
733 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
734 dev_priv->ring[i].name,
737 for (page = 0; page < obj->page_count; page++) {
738 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
739 seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
745 if (error->ring[i].num_requests) {
746 seq_printf(m, "%s --- %d requests\n",
747 dev_priv->ring[i].name,
748 error->ring[i].num_requests);
749 for (j = 0; j < error->ring[i].num_requests; j++) {
750 seq_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
751 error->ring[i].requests[j].seqno,
752 error->ring[i].requests[j].jiffies,
753 error->ring[i].requests[j].tail);
757 if ((obj = error->ring[i].ringbuffer)) {
758 seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
759 dev_priv->ring[i].name,
762 for (page = 0; page < obj->page_count; page++) {
763 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
764 seq_printf(m, "%08x : %08x\n",
766 obj->pages[page][elt]);
772 obj = error->ring[i].ctx;
774 seq_printf(m, "%s --- HW Context = 0x%08x\n",
775 dev_priv->ring[i].name,
778 for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
779 seq_printf(m, "[%04x] %08x %08x %08x %08x\n",
782 obj->pages[0][elt+1],
783 obj->pages[0][elt+2],
784 obj->pages[0][elt+3]);
791 intel_overlay_print_error_state(m, error->overlay);
794 intel_display_print_error_state(m, dev, error->display);
800 i915_error_state_write(struct file *filp,
801 const char __user *ubuf,
805 struct seq_file *m = filp->private_data;
806 struct i915_error_state_file_priv *error_priv = m->private;
807 struct drm_device *dev = error_priv->dev;
810 DRM_DEBUG_DRIVER("Resetting error state\n");
812 ret = mutex_lock_interruptible(&dev->struct_mutex);
816 i915_destroy_error_state(dev);
817 mutex_unlock(&dev->struct_mutex);
822 static int i915_error_state_open(struct inode *inode, struct file *file)
824 struct drm_device *dev = inode->i_private;
825 drm_i915_private_t *dev_priv = dev->dev_private;
826 struct i915_error_state_file_priv *error_priv;
829 error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
833 error_priv->dev = dev;
835 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
836 error_priv->error = dev_priv->gpu_error.first_error;
837 if (error_priv->error)
838 kref_get(&error_priv->error->ref);
839 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
841 return single_open(file, i915_error_state, error_priv);
844 static int i915_error_state_release(struct inode *inode, struct file *file)
846 struct seq_file *m = file->private_data;
847 struct i915_error_state_file_priv *error_priv = m->private;
849 if (error_priv->error)
850 kref_put(&error_priv->error->ref, i915_error_state_free);
853 return single_release(inode, file);
856 static const struct file_operations i915_error_state_fops = {
857 .owner = THIS_MODULE,
858 .open = i915_error_state_open,
860 .write = i915_error_state_write,
861 .llseek = default_llseek,
862 .release = i915_error_state_release,
866 i915_next_seqno_get(void *data, u64 *val)
868 struct drm_device *dev = data;
869 drm_i915_private_t *dev_priv = dev->dev_private;
872 ret = mutex_lock_interruptible(&dev->struct_mutex);
876 *val = dev_priv->next_seqno;
877 mutex_unlock(&dev->struct_mutex);
883 i915_next_seqno_set(void *data, u64 val)
885 struct drm_device *dev = data;
888 ret = mutex_lock_interruptible(&dev->struct_mutex);
892 ret = i915_gem_set_seqno(dev, val);
893 mutex_unlock(&dev->struct_mutex);
898 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
899 i915_next_seqno_get, i915_next_seqno_set,
902 static int i915_rstdby_delays(struct seq_file *m, void *unused)
904 struct drm_info_node *node = (struct drm_info_node *) m->private;
905 struct drm_device *dev = node->minor->dev;
906 drm_i915_private_t *dev_priv = dev->dev_private;
910 ret = mutex_lock_interruptible(&dev->struct_mutex);
914 crstanddelay = I915_READ16(CRSTANDVID);
916 mutex_unlock(&dev->struct_mutex);
918 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
923 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
925 struct drm_info_node *node = (struct drm_info_node *) m->private;
926 struct drm_device *dev = node->minor->dev;
927 drm_i915_private_t *dev_priv = dev->dev_private;
931 u16 rgvswctl = I915_READ16(MEMSWCTL);
932 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
934 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
935 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
936 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
938 seq_printf(m, "Current P-state: %d\n",
939 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
940 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
941 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
942 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
943 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
945 u32 rpupei, rpcurup, rpprevup;
946 u32 rpdownei, rpcurdown, rpprevdown;
949 /* RPSTAT1 is in the GT power well */
950 ret = mutex_lock_interruptible(&dev->struct_mutex);
954 gen6_gt_force_wake_get(dev_priv);
956 rpstat = I915_READ(GEN6_RPSTAT1);
957 rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
958 rpcurup = I915_READ(GEN6_RP_CUR_UP);
959 rpprevup = I915_READ(GEN6_RP_PREV_UP);
960 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
961 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
962 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
964 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
966 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
967 cagf *= GT_FREQUENCY_MULTIPLIER;
969 gen6_gt_force_wake_put(dev_priv);
970 mutex_unlock(&dev->struct_mutex);
972 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
973 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
974 seq_printf(m, "Render p-state ratio: %d\n",
975 (gt_perf_status & 0xff00) >> 8);
976 seq_printf(m, "Render p-state VID: %d\n",
977 gt_perf_status & 0xff);
978 seq_printf(m, "Render p-state limit: %d\n",
979 rp_state_limits & 0xff);
980 seq_printf(m, "CAGF: %dMHz\n", cagf);
981 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
983 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
984 GEN6_CURBSYTAVG_MASK);
985 seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
986 GEN6_CURBSYTAVG_MASK);
987 seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
989 seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
990 GEN6_CURBSYTAVG_MASK);
991 seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
992 GEN6_CURBSYTAVG_MASK);
994 max_freq = (rp_state_cap & 0xff0000) >> 16;
995 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
996 max_freq * GT_FREQUENCY_MULTIPLIER);
998 max_freq = (rp_state_cap & 0xff00) >> 8;
999 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1000 max_freq * GT_FREQUENCY_MULTIPLIER);
1002 max_freq = rp_state_cap & 0xff;
1003 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1004 max_freq * GT_FREQUENCY_MULTIPLIER);
1006 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1007 dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER);
1009 seq_printf(m, "no P-state info available\n");
1015 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1017 struct drm_info_node *node = (struct drm_info_node *) m->private;
1018 struct drm_device *dev = node->minor->dev;
1019 drm_i915_private_t *dev_priv = dev->dev_private;
1023 ret = mutex_lock_interruptible(&dev->struct_mutex);
1027 for (i = 0; i < 16; i++) {
1028 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1029 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1030 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1033 mutex_unlock(&dev->struct_mutex);
1038 static inline int MAP_TO_MV(int map)
1040 return 1250 - (map * 25);
1043 static int i915_inttoext_table(struct seq_file *m, void *unused)
1045 struct drm_info_node *node = (struct drm_info_node *) m->private;
1046 struct drm_device *dev = node->minor->dev;
1047 drm_i915_private_t *dev_priv = dev->dev_private;
1051 ret = mutex_lock_interruptible(&dev->struct_mutex);
1055 for (i = 1; i <= 32; i++) {
1056 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1057 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1060 mutex_unlock(&dev->struct_mutex);
1065 static int ironlake_drpc_info(struct seq_file *m)
1067 struct drm_info_node *node = (struct drm_info_node *) m->private;
1068 struct drm_device *dev = node->minor->dev;
1069 drm_i915_private_t *dev_priv = dev->dev_private;
1070 u32 rgvmodectl, rstdbyctl;
1074 ret = mutex_lock_interruptible(&dev->struct_mutex);
1078 rgvmodectl = I915_READ(MEMMODECTL);
1079 rstdbyctl = I915_READ(RSTDBYCTL);
1080 crstandvid = I915_READ16(CRSTANDVID);
1082 mutex_unlock(&dev->struct_mutex);
1084 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1086 seq_printf(m, "Boost freq: %d\n",
1087 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1088 MEMMODE_BOOST_FREQ_SHIFT);
1089 seq_printf(m, "HW control enabled: %s\n",
1090 rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1091 seq_printf(m, "SW control enabled: %s\n",
1092 rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1093 seq_printf(m, "Gated voltage change: %s\n",
1094 rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1095 seq_printf(m, "Starting frequency: P%d\n",
1096 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1097 seq_printf(m, "Max P-state: P%d\n",
1098 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1099 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1100 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1101 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1102 seq_printf(m, "Render standby enabled: %s\n",
1103 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1104 seq_printf(m, "Current RS state: ");
1105 switch (rstdbyctl & RSX_STATUS_MASK) {
1107 seq_printf(m, "on\n");
1109 case RSX_STATUS_RC1:
1110 seq_printf(m, "RC1\n");
1112 case RSX_STATUS_RC1E:
1113 seq_printf(m, "RC1E\n");
1115 case RSX_STATUS_RS1:
1116 seq_printf(m, "RS1\n");
1118 case RSX_STATUS_RS2:
1119 seq_printf(m, "RS2 (RC6)\n");
1121 case RSX_STATUS_RS3:
1122 seq_printf(m, "RC3 (RC6+)\n");
1125 seq_printf(m, "unknown\n");
1132 static int gen6_drpc_info(struct seq_file *m)
1135 struct drm_info_node *node = (struct drm_info_node *) m->private;
1136 struct drm_device *dev = node->minor->dev;
1137 struct drm_i915_private *dev_priv = dev->dev_private;
1138 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1139 unsigned forcewake_count;
1143 ret = mutex_lock_interruptible(&dev->struct_mutex);
1147 spin_lock_irq(&dev_priv->gt_lock);
1148 forcewake_count = dev_priv->forcewake_count;
1149 spin_unlock_irq(&dev_priv->gt_lock);
1151 if (forcewake_count) {
1152 seq_printf(m, "RC information inaccurate because somebody "
1153 "holds a forcewake reference \n");
1155 /* NB: we cannot use forcewake, else we read the wrong values */
1156 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1158 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1161 gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1162 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1164 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1165 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1166 mutex_unlock(&dev->struct_mutex);
1167 mutex_lock(&dev_priv->rps.hw_lock);
1168 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1169 mutex_unlock(&dev_priv->rps.hw_lock);
1171 seq_printf(m, "Video Turbo Mode: %s\n",
1172 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1173 seq_printf(m, "HW control enabled: %s\n",
1174 yesno(rpmodectl1 & GEN6_RP_ENABLE));
1175 seq_printf(m, "SW control enabled: %s\n",
1176 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1177 GEN6_RP_MEDIA_SW_MODE));
1178 seq_printf(m, "RC1e Enabled: %s\n",
1179 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1180 seq_printf(m, "RC6 Enabled: %s\n",
1181 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1182 seq_printf(m, "Deep RC6 Enabled: %s\n",
1183 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1184 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1185 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1186 seq_printf(m, "Current RC state: ");
1187 switch (gt_core_status & GEN6_RCn_MASK) {
1189 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1190 seq_printf(m, "Core Power Down\n");
1192 seq_printf(m, "on\n");
1195 seq_printf(m, "RC3\n");
1198 seq_printf(m, "RC6\n");
1201 seq_printf(m, "RC7\n");
1204 seq_printf(m, "Unknown\n");
1208 seq_printf(m, "Core Power Down: %s\n",
1209 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1211 /* Not exactly sure what this is */
1212 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1213 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1214 seq_printf(m, "RC6 residency since boot: %u\n",
1215 I915_READ(GEN6_GT_GFX_RC6));
1216 seq_printf(m, "RC6+ residency since boot: %u\n",
1217 I915_READ(GEN6_GT_GFX_RC6p));
1218 seq_printf(m, "RC6++ residency since boot: %u\n",
1219 I915_READ(GEN6_GT_GFX_RC6pp));
1221 seq_printf(m, "RC6 voltage: %dmV\n",
1222 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1223 seq_printf(m, "RC6+ voltage: %dmV\n",
1224 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1225 seq_printf(m, "RC6++ voltage: %dmV\n",
1226 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1230 static int i915_drpc_info(struct seq_file *m, void *unused)
1232 struct drm_info_node *node = (struct drm_info_node *) m->private;
1233 struct drm_device *dev = node->minor->dev;
1235 if (IS_GEN6(dev) || IS_GEN7(dev))
1236 return gen6_drpc_info(m);
1238 return ironlake_drpc_info(m);
1241 static int i915_fbc_status(struct seq_file *m, void *unused)
1243 struct drm_info_node *node = (struct drm_info_node *) m->private;
1244 struct drm_device *dev = node->minor->dev;
1245 drm_i915_private_t *dev_priv = dev->dev_private;
1247 if (!I915_HAS_FBC(dev)) {
1248 seq_printf(m, "FBC unsupported on this chipset\n");
1252 if (intel_fbc_enabled(dev)) {
1253 seq_printf(m, "FBC enabled\n");
1255 seq_printf(m, "FBC disabled: ");
1256 switch (dev_priv->no_fbc_reason) {
1258 seq_printf(m, "no outputs");
1260 case FBC_STOLEN_TOO_SMALL:
1261 seq_printf(m, "not enough stolen memory");
1263 case FBC_UNSUPPORTED_MODE:
1264 seq_printf(m, "mode not supported");
1266 case FBC_MODE_TOO_LARGE:
1267 seq_printf(m, "mode too large");
1270 seq_printf(m, "FBC unsupported on plane");
1273 seq_printf(m, "scanout buffer not tiled");
1275 case FBC_MULTIPLE_PIPES:
1276 seq_printf(m, "multiple pipes are enabled");
1278 case FBC_MODULE_PARAM:
1279 seq_printf(m, "disabled per module param (default off)");
1282 seq_printf(m, "unknown reason");
1284 seq_printf(m, "\n");
1289 static int i915_sr_status(struct seq_file *m, void *unused)
1291 struct drm_info_node *node = (struct drm_info_node *) m->private;
1292 struct drm_device *dev = node->minor->dev;
1293 drm_i915_private_t *dev_priv = dev->dev_private;
1294 bool sr_enabled = false;
1296 if (HAS_PCH_SPLIT(dev))
1297 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1298 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1299 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1300 else if (IS_I915GM(dev))
1301 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1302 else if (IS_PINEVIEW(dev))
1303 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1305 seq_printf(m, "self-refresh: %s\n",
1306 sr_enabled ? "enabled" : "disabled");
1311 static int i915_emon_status(struct seq_file *m, void *unused)
1313 struct drm_info_node *node = (struct drm_info_node *) m->private;
1314 struct drm_device *dev = node->minor->dev;
1315 drm_i915_private_t *dev_priv = dev->dev_private;
1316 unsigned long temp, chipset, gfx;
1322 ret = mutex_lock_interruptible(&dev->struct_mutex);
1326 temp = i915_mch_val(dev_priv);
1327 chipset = i915_chipset_val(dev_priv);
1328 gfx = i915_gfx_val(dev_priv);
1329 mutex_unlock(&dev->struct_mutex);
1331 seq_printf(m, "GMCH temp: %ld\n", temp);
1332 seq_printf(m, "Chipset power: %ld\n", chipset);
1333 seq_printf(m, "GFX power: %ld\n", gfx);
1334 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1339 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1341 struct drm_info_node *node = (struct drm_info_node *) m->private;
1342 struct drm_device *dev = node->minor->dev;
1343 drm_i915_private_t *dev_priv = dev->dev_private;
1345 int gpu_freq, ia_freq;
1347 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1348 seq_printf(m, "unsupported on this chipset\n");
1352 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1356 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1358 for (gpu_freq = dev_priv->rps.min_delay;
1359 gpu_freq <= dev_priv->rps.max_delay;
1362 sandybridge_pcode_read(dev_priv,
1363 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1365 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1366 gpu_freq * GT_FREQUENCY_MULTIPLIER,
1367 ((ia_freq >> 0) & 0xff) * 100,
1368 ((ia_freq >> 8) & 0xff) * 100);
1371 mutex_unlock(&dev_priv->rps.hw_lock);
1376 static int i915_gfxec(struct seq_file *m, void *unused)
1378 struct drm_info_node *node = (struct drm_info_node *) m->private;
1379 struct drm_device *dev = node->minor->dev;
1380 drm_i915_private_t *dev_priv = dev->dev_private;
1383 ret = mutex_lock_interruptible(&dev->struct_mutex);
1387 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1389 mutex_unlock(&dev->struct_mutex);
1394 static int i915_opregion(struct seq_file *m, void *unused)
1396 struct drm_info_node *node = (struct drm_info_node *) m->private;
1397 struct drm_device *dev = node->minor->dev;
1398 drm_i915_private_t *dev_priv = dev->dev_private;
1399 struct intel_opregion *opregion = &dev_priv->opregion;
1400 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1406 ret = mutex_lock_interruptible(&dev->struct_mutex);
1410 if (opregion->header) {
1411 memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1412 seq_write(m, data, OPREGION_SIZE);
1415 mutex_unlock(&dev->struct_mutex);
1422 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1424 struct drm_info_node *node = (struct drm_info_node *) m->private;
1425 struct drm_device *dev = node->minor->dev;
1426 drm_i915_private_t *dev_priv = dev->dev_private;
1427 struct intel_fbdev *ifbdev;
1428 struct intel_framebuffer *fb;
1431 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1435 ifbdev = dev_priv->fbdev;
1436 fb = to_intel_framebuffer(ifbdev->helper.fb);
1438 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1442 fb->base.bits_per_pixel,
1443 atomic_read(&fb->base.refcount.refcount));
1444 describe_obj(m, fb->obj);
1445 seq_printf(m, "\n");
1446 mutex_unlock(&dev->mode_config.mutex);
1448 mutex_lock(&dev->mode_config.fb_lock);
1449 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1450 if (&fb->base == ifbdev->helper.fb)
1453 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1457 fb->base.bits_per_pixel,
1458 atomic_read(&fb->base.refcount.refcount));
1459 describe_obj(m, fb->obj);
1460 seq_printf(m, "\n");
1462 mutex_unlock(&dev->mode_config.fb_lock);
1467 static int i915_context_status(struct seq_file *m, void *unused)
1469 struct drm_info_node *node = (struct drm_info_node *) m->private;
1470 struct drm_device *dev = node->minor->dev;
1471 drm_i915_private_t *dev_priv = dev->dev_private;
1472 struct intel_ring_buffer *ring;
1475 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1479 if (dev_priv->ips.pwrctx) {
1480 seq_printf(m, "power context ");
1481 describe_obj(m, dev_priv->ips.pwrctx);
1482 seq_printf(m, "\n");
1485 if (dev_priv->ips.renderctx) {
1486 seq_printf(m, "render context ");
1487 describe_obj(m, dev_priv->ips.renderctx);
1488 seq_printf(m, "\n");
1491 for_each_ring(ring, dev_priv, i) {
1492 if (ring->default_context) {
1493 seq_printf(m, "HW default context %s ring ", ring->name);
1494 describe_obj(m, ring->default_context->obj);
1495 seq_printf(m, "\n");
1499 mutex_unlock(&dev->mode_config.mutex);
1504 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1506 struct drm_info_node *node = (struct drm_info_node *) m->private;
1507 struct drm_device *dev = node->minor->dev;
1508 struct drm_i915_private *dev_priv = dev->dev_private;
1509 unsigned forcewake_count;
1511 spin_lock_irq(&dev_priv->gt_lock);
1512 forcewake_count = dev_priv->forcewake_count;
1513 spin_unlock_irq(&dev_priv->gt_lock);
1515 seq_printf(m, "forcewake count = %u\n", forcewake_count);
1520 static const char *swizzle_string(unsigned swizzle)
1523 case I915_BIT_6_SWIZZLE_NONE:
1525 case I915_BIT_6_SWIZZLE_9:
1527 case I915_BIT_6_SWIZZLE_9_10:
1528 return "bit9/bit10";
1529 case I915_BIT_6_SWIZZLE_9_11:
1530 return "bit9/bit11";
1531 case I915_BIT_6_SWIZZLE_9_10_11:
1532 return "bit9/bit10/bit11";
1533 case I915_BIT_6_SWIZZLE_9_17:
1534 return "bit9/bit17";
1535 case I915_BIT_6_SWIZZLE_9_10_17:
1536 return "bit9/bit10/bit17";
1537 case I915_BIT_6_SWIZZLE_UNKNOWN:
1544 static int i915_swizzle_info(struct seq_file *m, void *data)
1546 struct drm_info_node *node = (struct drm_info_node *) m->private;
1547 struct drm_device *dev = node->minor->dev;
1548 struct drm_i915_private *dev_priv = dev->dev_private;
1551 ret = mutex_lock_interruptible(&dev->struct_mutex);
1555 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1556 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1557 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1558 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1560 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1561 seq_printf(m, "DDC = 0x%08x\n",
1563 seq_printf(m, "C0DRB3 = 0x%04x\n",
1564 I915_READ16(C0DRB3));
1565 seq_printf(m, "C1DRB3 = 0x%04x\n",
1566 I915_READ16(C1DRB3));
1567 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1568 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1569 I915_READ(MAD_DIMM_C0));
1570 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1571 I915_READ(MAD_DIMM_C1));
1572 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1573 I915_READ(MAD_DIMM_C2));
1574 seq_printf(m, "TILECTL = 0x%08x\n",
1575 I915_READ(TILECTL));
1576 seq_printf(m, "ARB_MODE = 0x%08x\n",
1577 I915_READ(ARB_MODE));
1578 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1579 I915_READ(DISP_ARB_CTL));
1581 mutex_unlock(&dev->struct_mutex);
1586 static int i915_ppgtt_info(struct seq_file *m, void *data)
1588 struct drm_info_node *node = (struct drm_info_node *) m->private;
1589 struct drm_device *dev = node->minor->dev;
1590 struct drm_i915_private *dev_priv = dev->dev_private;
1591 struct intel_ring_buffer *ring;
1595 ret = mutex_lock_interruptible(&dev->struct_mutex);
1598 if (INTEL_INFO(dev)->gen == 6)
1599 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1601 for_each_ring(ring, dev_priv, i) {
1602 seq_printf(m, "%s\n", ring->name);
1603 if (INTEL_INFO(dev)->gen == 7)
1604 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1605 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1606 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1607 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1609 if (dev_priv->mm.aliasing_ppgtt) {
1610 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1612 seq_printf(m, "aliasing PPGTT:\n");
1613 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1615 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1616 mutex_unlock(&dev->struct_mutex);
1621 static int i915_dpio_info(struct seq_file *m, void *data)
1623 struct drm_info_node *node = (struct drm_info_node *) m->private;
1624 struct drm_device *dev = node->minor->dev;
1625 struct drm_i915_private *dev_priv = dev->dev_private;
1629 if (!IS_VALLEYVIEW(dev)) {
1630 seq_printf(m, "unsupported\n");
1634 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1638 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1640 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1641 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1642 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1643 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1645 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1646 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1647 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1648 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1650 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1651 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1652 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1653 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1655 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1656 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1657 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1658 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1660 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1661 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1663 mutex_unlock(&dev_priv->dpio_lock);
1669 i915_wedged_get(void *data, u64 *val)
1671 struct drm_device *dev = data;
1672 drm_i915_private_t *dev_priv = dev->dev_private;
1674 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
1680 i915_wedged_set(void *data, u64 val)
1682 struct drm_device *dev = data;
1684 DRM_INFO("Manually setting wedged to %llu\n", val);
1685 i915_handle_error(dev, val);
1690 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1691 i915_wedged_get, i915_wedged_set,
1695 i915_ring_stop_get(void *data, u64 *val)
1697 struct drm_device *dev = data;
1698 drm_i915_private_t *dev_priv = dev->dev_private;
1700 *val = dev_priv->gpu_error.stop_rings;
1706 i915_ring_stop_set(void *data, u64 val)
1708 struct drm_device *dev = data;
1709 struct drm_i915_private *dev_priv = dev->dev_private;
1712 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
1714 ret = mutex_lock_interruptible(&dev->struct_mutex);
1718 dev_priv->gpu_error.stop_rings = val;
1719 mutex_unlock(&dev->struct_mutex);
1724 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1725 i915_ring_stop_get, i915_ring_stop_set,
1728 #define DROP_UNBOUND 0x1
1729 #define DROP_BOUND 0x2
1730 #define DROP_RETIRE 0x4
1731 #define DROP_ACTIVE 0x8
1732 #define DROP_ALL (DROP_UNBOUND | \
1737 i915_drop_caches_get(void *data, u64 *val)
1745 i915_drop_caches_set(void *data, u64 val)
1747 struct drm_device *dev = data;
1748 struct drm_i915_private *dev_priv = dev->dev_private;
1749 struct drm_i915_gem_object *obj, *next;
1752 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
1754 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1755 * on ioctls on -EAGAIN. */
1756 ret = mutex_lock_interruptible(&dev->struct_mutex);
1760 if (val & DROP_ACTIVE) {
1761 ret = i915_gpu_idle(dev);
1766 if (val & (DROP_RETIRE | DROP_ACTIVE))
1767 i915_gem_retire_requests(dev);
1769 if (val & DROP_BOUND) {
1770 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
1771 if (obj->pin_count == 0) {
1772 ret = i915_gem_object_unbind(obj);
1778 if (val & DROP_UNBOUND) {
1779 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1780 if (obj->pages_pin_count == 0) {
1781 ret = i915_gem_object_put_pages(obj);
1788 mutex_unlock(&dev->struct_mutex);
1793 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1794 i915_drop_caches_get, i915_drop_caches_set,
1798 i915_max_freq_get(void *data, u64 *val)
1800 struct drm_device *dev = data;
1801 drm_i915_private_t *dev_priv = dev->dev_private;
1804 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1807 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1811 *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
1812 mutex_unlock(&dev_priv->rps.hw_lock);
1818 i915_max_freq_set(void *data, u64 val)
1820 struct drm_device *dev = data;
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1824 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1827 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
1829 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1834 * Turbo will still be enabled, but won't go above the set value.
1836 do_div(val, GT_FREQUENCY_MULTIPLIER);
1837 dev_priv->rps.max_delay = val;
1838 gen6_set_rps(dev, val);
1839 mutex_unlock(&dev_priv->rps.hw_lock);
1844 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
1845 i915_max_freq_get, i915_max_freq_set,
1849 i915_min_freq_get(void *data, u64 *val)
1851 struct drm_device *dev = data;
1852 drm_i915_private_t *dev_priv = dev->dev_private;
1855 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1858 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1862 *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
1863 mutex_unlock(&dev_priv->rps.hw_lock);
1869 i915_min_freq_set(void *data, u64 val)
1871 struct drm_device *dev = data;
1872 struct drm_i915_private *dev_priv = dev->dev_private;
1875 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1878 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
1880 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1885 * Turbo will still be enabled, but won't go below the set value.
1887 do_div(val, GT_FREQUENCY_MULTIPLIER);
1888 dev_priv->rps.min_delay = val;
1889 gen6_set_rps(dev, val);
1890 mutex_unlock(&dev_priv->rps.hw_lock);
1895 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
1896 i915_min_freq_get, i915_min_freq_set,
1900 i915_cache_sharing_get(void *data, u64 *val)
1902 struct drm_device *dev = data;
1903 drm_i915_private_t *dev_priv = dev->dev_private;
1907 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1910 ret = mutex_lock_interruptible(&dev->struct_mutex);
1914 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1915 mutex_unlock(&dev_priv->dev->struct_mutex);
1917 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1923 i915_cache_sharing_set(void *data, u64 val)
1925 struct drm_device *dev = data;
1926 struct drm_i915_private *dev_priv = dev->dev_private;
1929 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1935 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
1937 /* Update the cache sharing policy here as well */
1938 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1939 snpcr &= ~GEN6_MBC_SNPCR_MASK;
1940 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
1941 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1946 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
1947 i915_cache_sharing_get, i915_cache_sharing_set,
1950 /* As the drm_debugfs_init() routines are called before dev->dev_private is
1951 * allocated we need to hook into the minor for release. */
1953 drm_add_fake_info_node(struct drm_minor *minor,
1957 struct drm_info_node *node;
1959 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
1961 debugfs_remove(ent);
1965 node->minor = minor;
1967 node->info_ent = (void *) key;
1969 mutex_lock(&minor->debugfs_lock);
1970 list_add(&node->list, &minor->debugfs_list);
1971 mutex_unlock(&minor->debugfs_lock);
1976 static int i915_forcewake_open(struct inode *inode, struct file *file)
1978 struct drm_device *dev = inode->i_private;
1979 struct drm_i915_private *dev_priv = dev->dev_private;
1981 if (INTEL_INFO(dev)->gen < 6)
1984 gen6_gt_force_wake_get(dev_priv);
1989 static int i915_forcewake_release(struct inode *inode, struct file *file)
1991 struct drm_device *dev = inode->i_private;
1992 struct drm_i915_private *dev_priv = dev->dev_private;
1994 if (INTEL_INFO(dev)->gen < 6)
1997 gen6_gt_force_wake_put(dev_priv);
2002 static const struct file_operations i915_forcewake_fops = {
2003 .owner = THIS_MODULE,
2004 .open = i915_forcewake_open,
2005 .release = i915_forcewake_release,
2008 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2010 struct drm_device *dev = minor->dev;
2013 ent = debugfs_create_file("i915_forcewake_user",
2016 &i915_forcewake_fops);
2018 return PTR_ERR(ent);
2020 return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2023 static int i915_debugfs_create(struct dentry *root,
2024 struct drm_minor *minor,
2026 const struct file_operations *fops)
2028 struct drm_device *dev = minor->dev;
2031 ent = debugfs_create_file(name,
2036 return PTR_ERR(ent);
2038 return drm_add_fake_info_node(minor, ent, fops);
2041 static struct drm_info_list i915_debugfs_list[] = {
2042 {"i915_capabilities", i915_capabilities, 0},
2043 {"i915_gem_objects", i915_gem_object_info, 0},
2044 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2045 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2046 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2047 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2048 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2049 {"i915_gem_request", i915_gem_request_info, 0},
2050 {"i915_gem_seqno", i915_gem_seqno_info, 0},
2051 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2052 {"i915_gem_interrupt", i915_interrupt_info, 0},
2053 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2054 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2055 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2056 {"i915_rstdby_delays", i915_rstdby_delays, 0},
2057 {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2058 {"i915_delayfreq_table", i915_delayfreq_table, 0},
2059 {"i915_inttoext_table", i915_inttoext_table, 0},
2060 {"i915_drpc_info", i915_drpc_info, 0},
2061 {"i915_emon_status", i915_emon_status, 0},
2062 {"i915_ring_freq_table", i915_ring_freq_table, 0},
2063 {"i915_gfxec", i915_gfxec, 0},
2064 {"i915_fbc_status", i915_fbc_status, 0},
2065 {"i915_sr_status", i915_sr_status, 0},
2066 {"i915_opregion", i915_opregion, 0},
2067 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2068 {"i915_context_status", i915_context_status, 0},
2069 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2070 {"i915_swizzle_info", i915_swizzle_info, 0},
2071 {"i915_ppgtt_info", i915_ppgtt_info, 0},
2072 {"i915_dpio", i915_dpio_info, 0},
2074 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2076 int i915_debugfs_init(struct drm_minor *minor)
2080 ret = i915_debugfs_create(minor->debugfs_root, minor,
2086 ret = i915_forcewake_create(minor->debugfs_root, minor);
2090 ret = i915_debugfs_create(minor->debugfs_root, minor,
2092 &i915_max_freq_fops);
2096 ret = i915_debugfs_create(minor->debugfs_root, minor,
2098 &i915_min_freq_fops);
2102 ret = i915_debugfs_create(minor->debugfs_root, minor,
2103 "i915_cache_sharing",
2104 &i915_cache_sharing_fops);
2108 ret = i915_debugfs_create(minor->debugfs_root, minor,
2110 &i915_ring_stop_fops);
2114 ret = i915_debugfs_create(minor->debugfs_root, minor,
2115 "i915_gem_drop_caches",
2116 &i915_drop_caches_fops);
2120 ret = i915_debugfs_create(minor->debugfs_root, minor,
2122 &i915_error_state_fops);
2126 ret = i915_debugfs_create(minor->debugfs_root, minor,
2128 &i915_next_seqno_fops);
2132 return drm_debugfs_create_files(i915_debugfs_list,
2133 I915_DEBUGFS_ENTRIES,
2134 minor->debugfs_root, minor);
2137 void i915_debugfs_cleanup(struct drm_minor *minor)
2139 drm_debugfs_remove_files(i915_debugfs_list,
2140 I915_DEBUGFS_ENTRIES, minor);
2141 drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2143 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
2145 drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2147 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2149 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2151 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2153 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2155 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2157 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2161 #endif /* CONFIG_DEBUG_FS */