1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * $FreeBSD: src/sys/dev/drm2/i915/i915_drv.h,v 1.1 2012/05/22 11:07:44 kib Exp $
34 #include <sys/eventhandler.h>
36 #include <dev/agp/agp_i810.h>
38 #include "intel_bios.h"
39 #include "intel_ringbuffer.h"
40 #include <linux/workqueue.h>
42 /* General customization:
45 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
47 #define DRIVER_NAME "i915"
48 #define DRIVER_DESC "Intel Graphics"
49 #define DRIVER_DATE "20080730"
51 MALLOC_DECLARE(DRM_I915_GEM);
59 #define pipe_name(p) ((p) + 'A')
60 #define I915_NUM_PIPE 2
68 #define transcoder_name(t) ((t) + 'A')
75 #define plane_name(p) ((p) + 'A')
85 #define port_name(p) ((p) + 'A')
87 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
89 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
91 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
92 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
93 if ((intel_encoder)->base.crtc == (__crtc))
95 struct intel_pch_pll {
96 int refcount; /* count of number of CRTCs sharing this PLL */
97 int active; /* count of number of active CRTCs (i.e. DPMS on) */
98 bool on; /* is the PLL actually active? Disabled during modeset */
103 #define I915_NUM_PLLS 2
105 struct intel_ddi_plls {
111 /* Interface history:
114 * 1.2: Add Power Management
115 * 1.3: Add vblank support
116 * 1.4: Fix cmdbuffer path, add heap destroy
117 * 1.5: Add vblank pipe configuration
118 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
119 * - Support vertical blank on secondary display pipe
121 #define DRIVER_MAJOR 1
122 #define DRIVER_MINOR 6
123 #define DRIVER_PATCHLEVEL 0
125 #define WATCH_COHERENCY 0
126 #define WATCH_LISTS 0
129 #define I915_GEM_PHYS_CURSOR_0 1
130 #define I915_GEM_PHYS_CURSOR_1 2
131 #define I915_GEM_PHYS_OVERLAY_REGS 3
132 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
134 struct drm_i915_gem_phys_object {
136 drm_dma_handle_t *handle;
137 struct drm_i915_gem_object *cur_obj;
140 struct opregion_header;
141 struct opregion_acpi;
142 struct opregion_swsci;
143 struct opregion_asle;
144 struct drm_i915_private;
146 struct intel_opregion {
147 struct opregion_header __iomem *header;
148 struct opregion_acpi __iomem *acpi;
149 struct opregion_swsci __iomem *swsci;
150 struct opregion_asle __iomem *asle;
152 u32 __iomem *lid_state;
154 #define OPREGION_SIZE (8*1024)
156 struct intel_overlay;
157 struct intel_overlay_error_state;
159 struct drm_i915_master_private {
160 drm_local_map_t *sarea;
161 struct _drm_i915_sarea *sarea_priv;
163 #define I915_FENCE_REG_NONE -1
164 #define I915_MAX_NUM_FENCES 16
165 /* 16 fences + sign bit for FENCE_REG_NONE */
166 #define I915_MAX_NUM_FENCE_BITS 5
168 struct drm_i915_fence_reg {
169 struct list_head lru_list;
170 struct drm_i915_gem_object *obj;
171 uint32_t setup_seqno;
175 struct sdvo_device_mapping {
184 struct drm_i915_error_state {
187 bool waiting[I915_NUM_RINGS];
188 u32 pipestat[I915_MAX_PIPES];
189 u32 tail[I915_NUM_RINGS];
190 u32 head[I915_NUM_RINGS];
191 u32 ipeir[I915_NUM_RINGS];
192 u32 ipehr[I915_NUM_RINGS];
193 u32 instdone[I915_NUM_RINGS];
194 u32 acthd[I915_NUM_RINGS];
195 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
196 /* our own tracking of ring head and tail */
197 u32 cpu_ring_head[I915_NUM_RINGS];
198 u32 cpu_ring_tail[I915_NUM_RINGS];
199 u32 error; /* gen6+ */
200 u32 instpm[I915_NUM_RINGS];
201 u32 instps[I915_NUM_RINGS];
203 u32 seqno[I915_NUM_RINGS];
205 u32 fault_reg[I915_NUM_RINGS];
207 u32 faddr[I915_NUM_RINGS];
208 u64 fence[I915_MAX_NUM_FENCES];
210 struct drm_i915_error_ring {
211 struct drm_i915_error_object {
215 } *ringbuffer, *batchbuffer;
216 struct drm_i915_error_request {
222 } ring[I915_NUM_RINGS];
223 struct drm_i915_error_buffer {
230 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
237 } *active_bo, *pinned_bo;
238 u32 active_bo_count, pinned_bo_count;
239 struct intel_overlay_error_state *overlay;
240 struct intel_display_error_state *display;
243 struct drm_i915_display_funcs {
244 void (*dpms)(struct drm_crtc *crtc, int mode);
245 bool (*fbc_enabled)(struct drm_device *dev);
246 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
247 void (*disable_fbc)(struct drm_device *dev);
248 int (*get_display_clock_speed)(struct drm_device *dev);
249 int (*get_fifo_size)(struct drm_device *dev, int plane);
250 void (*update_wm)(struct drm_device *dev);
251 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
252 uint32_t sprite_width, int pixel_size);
253 int (*crtc_mode_set)(struct drm_crtc *crtc,
254 struct drm_display_mode *mode,
255 struct drm_display_mode *adjusted_mode,
257 struct drm_framebuffer *old_fb);
258 void (*write_eld)(struct drm_connector *connector,
259 struct drm_crtc *crtc);
260 void (*fdi_link_train)(struct drm_crtc *crtc);
261 void (*init_clock_gating)(struct drm_device *dev);
262 void (*init_pch_clock_gating)(struct drm_device *dev);
263 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
264 struct drm_framebuffer *fb,
265 struct drm_i915_gem_object *obj);
266 void (*force_wake_get)(struct drm_i915_private *dev_priv);
267 void (*force_wake_put)(struct drm_i915_private *dev_priv);
268 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
270 /* clock updates for mode set */
272 /* render clock increase/decrease */
273 /* display clock increase/decrease */
274 /* pll clock increase/decrease */
277 struct drm_i915_gt_funcs {
278 void (*force_wake_get)(struct drm_i915_private *dev_priv);
279 void (*force_wake_put)(struct drm_i915_private *dev_priv);
282 #define DEV_INFO_FLAGS \
283 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
284 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
285 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
286 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
287 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
288 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
289 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
290 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
291 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
292 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
293 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
294 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
295 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
296 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
297 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
298 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
299 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
300 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
301 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
302 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
303 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
304 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
305 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
306 DEV_INFO_FLAG(has_llc)
308 struct intel_device_info {
327 u8 cursor_needs_physical:1;
329 u8 overlay_needs_physical:1;
336 #define I915_PPGTT_PD_ENTRIES 512
337 #define I915_PPGTT_PT_ENTRIES 1024
338 struct i915_hw_ppgtt {
339 unsigned num_pd_entries;
342 vm_paddr_t *pt_dma_addr;
343 vm_paddr_t scratch_page_dma_addr;
347 FBC_NO_OUTPUT, /* no outputs enabled to compress */
348 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
349 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
350 FBC_MODE_TOO_LARGE, /* mode too large for compression */
351 FBC_BAD_PLANE, /* fbc not supported on plane */
352 FBC_NOT_TILED, /* buffer not tiled */
353 FBC_MULTIPLE_PIPES, /* more than one pipe active */
357 /* defined intel_pm.c */
358 extern struct lock mchdev_lock;
361 struct mem_block *next;
362 struct mem_block *prev;
365 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
368 struct opregion_header;
369 struct opregion_acpi;
370 struct opregion_swsci;
371 struct opregion_asle;
373 #define I915_FENCE_REG_NONE -1
374 #define I915_MAX_NUM_FENCES 16
375 /* 16 fences + sign bit for FENCE_REG_NONE */
376 #define I915_MAX_NUM_FENCE_BITS 5
379 PCH_NONE = 0, /* No PCH present */
380 PCH_IBX, /* Ibexpeak PCH */
381 PCH_CPT, /* Cougarpoint PCH */
382 PCH_LPT, /* Lynxpoint PCH */
385 enum intel_sbi_destination {
390 #define QUIRK_PIPEA_FORCE (1<<0)
391 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
392 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
395 struct intel_fbc_work;
401 struct drm_i915_private *dev_priv;
404 struct i915_suspend_saved_registers {
425 u32 saveTRANS_HTOTAL_A;
426 u32 saveTRANS_HBLANK_A;
427 u32 saveTRANS_HSYNC_A;
428 u32 saveTRANS_VTOTAL_A;
429 u32 saveTRANS_VBLANK_A;
430 u32 saveTRANS_VSYNC_A;
438 u32 savePFIT_PGM_RATIOS;
439 u32 saveBLC_HIST_CTL;
441 u32 saveBLC_PWM_CTL2;
442 u32 saveBLC_CPU_PWM_CTL;
443 u32 saveBLC_CPU_PWM_CTL2;
456 u32 saveTRANS_HTOTAL_B;
457 u32 saveTRANS_HBLANK_B;
458 u32 saveTRANS_HSYNC_B;
459 u32 saveTRANS_VTOTAL_B;
460 u32 saveTRANS_VBLANK_B;
461 u32 saveTRANS_VSYNC_B;
475 u32 savePP_ON_DELAYS;
476 u32 savePP_OFF_DELAYS;
484 u32 savePFIT_CONTROL;
485 u32 save_palette_a[256];
486 u32 save_palette_b[256];
487 u32 saveDPFC_CB_BASE;
488 u32 saveFBC_CFB_BASE;
491 u32 saveFBC_CONTROL2;
501 u32 saveCACHE_MODE_0;
502 u32 saveMI_ARB_STATE;
513 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
524 u32 savePIPEA_GMCH_DATA_M;
525 u32 savePIPEB_GMCH_DATA_M;
526 u32 savePIPEA_GMCH_DATA_N;
527 u32 savePIPEB_GMCH_DATA_N;
528 u32 savePIPEA_DP_LINK_M;
529 u32 savePIPEB_DP_LINK_M;
530 u32 savePIPEA_DP_LINK_N;
531 u32 savePIPEB_DP_LINK_N;
542 u32 savePCH_DREF_CONTROL;
543 u32 saveDISP_ARB_CTL;
544 u32 savePIPEA_DATA_M1;
545 u32 savePIPEA_DATA_N1;
546 u32 savePIPEA_LINK_M1;
547 u32 savePIPEA_LINK_N1;
548 u32 savePIPEB_DATA_M1;
549 u32 savePIPEB_DATA_N1;
550 u32 savePIPEB_LINK_M1;
551 u32 savePIPEB_LINK_N1;
552 u32 saveMCHBAR_RENDER_STANDBY;
553 u32 savePCH_PORT_HOTPLUG;
556 struct intel_gen6_power_mgmt {
557 struct work_struct work;
559 /* lock - irqsave spinlock that protectects the work_struct and
561 struct spinlock lock;
563 /* The below variables an all the rps hw state are protected by
564 * dev->struct mutext. */
569 struct delayed_work delayed_resume_work;
572 * Protects RPS/RC6 register access and PCU communication.
573 * Must be taken after struct_mutex if nested.
578 struct intel_ilk_power_mgmt {
586 unsigned long last_time1;
587 unsigned long chipset_power;
589 struct timespec last_time2;
590 unsigned long gfx_power;
596 struct drm_i915_gem_object *pwrctx;
597 struct drm_i915_gem_object *renderctx;
600 struct i915_dri1_state {
601 unsigned allow_batchbuffer : 1;
602 u32 __iomem *gfx_hws_cpu_addr;
613 struct intel_l3_parity {
615 struct work_struct error_work;
618 typedef struct drm_i915_private {
619 struct drm_device *dev;
621 device_t *gmbus_bridge;
622 device_t *bbbus_bridge;
626 /** gmbus_sx protects against concurrent usage of the single hw gmbus
627 * controller on different i2c buses. */
628 struct lock gmbus_lock;
630 int relative_constants_mode;
632 drm_local_map_t *sarea;
633 drm_local_map_t *mmio_map;
635 struct drm_i915_gt_funcs gt;
636 /** gt_fifo_count and the subsequent register write are synchronized
637 * with dev->struct_mutex. */
638 unsigned gt_fifo_count;
639 /** forcewake_count is protected by gt_lock */
640 unsigned forcewake_count;
641 /** gt_lock is also taken in irq contexts. */
644 drm_i915_sarea_t *sarea_priv;
645 struct intel_ring_buffer ring[I915_NUM_RINGS];
648 drm_dma_handle_t *status_page_dmah;
649 struct resource *mch_res;
651 void *hw_status_page;
652 dma_addr_t dma_status_page;
654 unsigned int status_gfx_addr;
655 drm_local_map_t hws_map;
656 struct drm_gem_object *hws_obj;
658 struct drm_i915_gem_object *pwrctx;
659 struct drm_i915_gem_object *renderctx;
667 atomic_t irq_received;
670 /** Cached value of IER to avoid reads in updating the bitfield */
675 struct lock irq_lock;
677 u32 hotplug_supported_mask;
679 int tex_lru_log_granularity;
680 int allow_batchbuffer;
681 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
687 /* For hangcheck timer */
688 #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
689 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
690 struct timer_list hangcheck_timer;
693 uint32_t last_acthd_bsd;
694 uint32_t last_acthd_blt;
695 uint32_t last_instdone;
696 uint32_t last_instdone1;
698 struct intel_opregion opregion;
701 struct intel_overlay *overlay;
702 bool sprite_scaling_enabled;
705 int backlight_level; /* restore backlight to this value */
706 bool backlight_enabled;
707 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
708 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
710 /* Feature bits from the VBIOS */
711 unsigned int int_tv_support:1;
712 unsigned int lvds_dither:1;
713 unsigned int lvds_vbt:1;
714 unsigned int int_crt_support:1;
715 unsigned int lvds_use_ssc:1;
716 unsigned int display_clock_mode:1;
727 struct edp_power_seq pps;
729 bool no_aux_handshake;
732 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
733 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
734 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
736 unsigned int fsb_freq, mem_freq, is_ddr3;
738 /* Display functions */
739 struct drm_i915_display_funcs display;
741 /* PCH chipset type */
742 enum intel_pch pch_type;
743 unsigned short pch_id;
745 unsigned long quirks;
751 /** Bridge to intel-gtt-ko */
752 const struct intel_gtt *gtt;
753 /** Memory allocator for GTT stolen memory */
754 struct drm_mm stolen;
755 /** Memory allocator for GTT */
756 struct drm_mm gtt_space;
757 /** List of all objects in gtt_space. Used to restore gtt
758 * mappings on resume */
759 struct list_head gtt_list;
761 /** Usable portion of the GTT for GEM */
762 unsigned long gtt_start;
763 unsigned long gtt_mappable_end;
764 unsigned long gtt_end;
766 /** PPGTT used for aliasing the PPGTT with the GTT */
767 struct i915_hw_ppgtt *aliasing_ppgtt;
770 * List of objects currently involved in rendering from the
773 * Includes buffers having the contents of their GPU caches
774 * flushed, not necessarily primitives. last_rendering_seqno
775 * represents when the rendering involved will be completed.
777 * A reference is held on the buffer while on this list.
779 struct list_head active_list;
782 * List of objects which are not in the ringbuffer but which
783 * still have a write_domain which needs to be flushed before
786 * A reference is held on the buffer while on this list.
788 struct list_head flushing_list;
791 * LRU list of objects which are not in the ringbuffer and
792 * are ready to unbind, but are still in the GTT.
794 * last_rendering_seqno is 0 while an object is in this list.
796 * A reference is not held on the buffer while on this list,
797 * as merely being GTT-bound shouldn't prevent its being
798 * freed, and we'll pull it off the list in the free path.
800 struct list_head inactive_list;
803 * LRU list of objects which are not in the ringbuffer but
804 * are still pinned in the GTT.
806 struct list_head pinned_list;
808 /** LRU list of objects with fence regs on them. */
809 struct list_head fence_list;
812 * List of objects currently pending being freed.
814 * These objects are no longer in use, but due to a signal
815 * we were prevented from freeing them at the appointed time.
817 struct list_head deferred_free_list;
820 * We leave the user IRQ off as much as possible,
821 * but this means that requests will finish and never
822 * be retired once the system goes idle. Set a timer to
823 * fire periodically while the ring is running. When it
824 * fires, go retire requests.
826 struct delayed_work retire_work;
829 * Are we in a non-interruptible section of code like
834 uint32_t next_gem_seqno;
837 * Waiting sequence number, if any
839 uint32_t waiting_gem_seqno;
842 * Last seq seen at irq time
844 uint32_t irq_gem_seqno;
847 * Flag if the X Server, and thus DRM, is not currently in
848 * control of the device.
850 * This is set between LeaveVT and EnterVT. It needs to be
851 * replaced with a semaphore. It also needs to be
852 * transitioned away from for kernel modesetting.
857 * Flag if the hardware appears to be wedged.
859 * This is set when attempts to idle the device timeout.
860 * It prevents command submission from occuring and makes
861 * every pending request fail
865 /** Bit 6 swizzling required for X tiling */
866 uint32_t bit_6_swizzle_x;
867 /** Bit 6 swizzling required for Y tiling */
868 uint32_t bit_6_swizzle_y;
870 /* storage for physical objects */
871 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
873 /* accounting, useful for userland debugging */
875 size_t mappable_gtt_total;
876 size_t object_memory;
879 eventhandler_tag i915_lowmem;
882 /* Kernel Modesetting */
884 const struct intel_device_info *info;
886 struct sdvo_device_mapping sdvo_mappings[2];
887 /* indicate whether the LVDS_BORDER should be enabled or not */
888 unsigned int lvds_border_bits;
889 /* Panel fitter placement and size for Ironlake+ */
890 u32 pch_pf_pos, pch_pf_size;
892 struct drm_crtc *plane_to_crtc_mapping[3];
893 struct drm_crtc *pipe_to_crtc_mapping[3];
894 /* wait_queue_head_t pending_flip_queue; XXXKIB */
895 bool flip_pending_is_done;
897 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
898 struct intel_ddi_plls ddi_plls;
900 /* Reclocking support */
901 bool render_reclock_avail;
902 bool lvds_downclock_avail;
903 /* indicates the reduced downclock for LVDS*/
905 struct task idle_task;
906 struct callout idle_callout;
910 struct child_device_config *child_dev;
911 struct drm_connector *int_lvds_connector;
912 struct drm_connector *int_edp_connector;
915 bool mchbar_need_disable;
917 struct intel_l3_parity l3_parity;
921 /* gen6+ rps state */
922 struct intel_gen6_power_mgmt rps;
928 unsigned long last_time1;
929 unsigned long chipset_power;
931 struct timespec last_time2;
932 unsigned long gfx_power;
936 struct lock *mchdev_lock;
938 enum no_fbc_reason no_fbc_reason;
940 unsigned long cfb_size;
944 struct intel_fbc_work *fbc_work;
946 struct lock error_lock;
947 /* Protected by dev->error_lock. */
948 struct drm_i915_error_state *first_error;
949 struct work_struct error_work;
950 int error_completion;
951 struct lock error_completion_lock;
952 struct workqueue_struct *wq;
953 struct work_struct hotplug_work;
955 unsigned long last_gpu_reset;
957 struct intel_fbdev *fbdev;
959 struct drm_property *broadcast_rgb_property;
960 struct drm_property *force_audio_property;
962 struct i915_suspend_saved_registers regfile;
964 /* Old dri1 support infrastructure, beware the dragons ya fools entering
966 struct i915_dri1_state dri1;
967 } drm_i915_private_t;
969 /* Iterate over initialised rings */
970 #define for_each_ring(ring__, dev_priv__, i__) \
971 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
972 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
974 enum hdmi_force_audio {
975 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
976 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
977 HDMI_AUDIO_AUTO, /* trust EDID */
978 HDMI_AUDIO_ON, /* force turn on HDMI audio */
981 enum i915_cache_level {
984 I915_CACHE_LLC_MLC, /* gen6+ */
987 enum intel_chip_family {
994 /** driver private structure attached to each drm_gem_object */
995 struct drm_i915_gem_object {
996 struct drm_gem_object base;
998 /** Current space allocated to this object in the GTT, if any. */
999 struct drm_mm_node *gtt_space;
1000 struct list_head gtt_list;
1001 /** This object's place on the active/flushing/inactive lists */
1002 struct list_head ring_list;
1003 struct list_head mm_list;
1004 /** This object's place on GPU write list */
1005 struct list_head gpu_write_list;
1006 /** This object's place in the batchbuffer or on the eviction list */
1007 struct list_head exec_list;
1010 * This is set if the object is on the active or flushing lists
1011 * (has pending rendering), and is not set if it's on inactive (ready
1014 unsigned int active:1;
1017 * This is set if the object has been written to since last bound
1020 unsigned int dirty:1;
1023 * This is set if the object has been written to since the last
1026 unsigned int pending_gpu_write:1;
1029 * Fence register bits (if any) for this object. Will be set
1030 * as needed when mapped into the GTT.
1031 * Protected by dev->struct_mutex.
1033 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1036 * Advice: are the backing pages purgeable?
1038 unsigned int madv:2;
1041 * Current tiling mode for the object.
1043 unsigned int tiling_mode:2;
1044 unsigned int tiling_changed:1;
1046 /** How many users have pinned this object in GTT space. The following
1047 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1048 * (via user_pin_count), execbuffer (objects are not allowed multiple
1049 * times for the same batchbuffer), and the framebuffer code. When
1050 * switching/pageflipping, the framebuffer code has at most two buffers
1053 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1054 * bits with absolutely no headroom. So use 4 bits. */
1055 unsigned int pin_count:4;
1056 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1059 * Is the object at the current location in the gtt mappable and
1060 * fenceable? Used to avoid costly recalculations.
1062 unsigned int map_and_fenceable:1;
1065 * Whether the current gtt mapping needs to be mappable (and isn't just
1066 * mappable by accident). Track pin and fault separate for a more
1067 * accurate mappable working set.
1069 unsigned int fault_mappable:1;
1070 unsigned int pin_mappable:1;
1073 * Is the GPU currently using a fence to access this buffer,
1075 unsigned int pending_fenced_gpu_access:1;
1076 unsigned int fenced_gpu_access:1;
1078 unsigned int cache_level:2;
1080 unsigned int has_aliasing_ppgtt_mapping:1;
1081 unsigned int has_global_gtt_mapping:1;
1086 * Used for performing relocations during execbuffer insertion.
1088 struct hlist_node exec_node;
1089 unsigned long exec_handle;
1090 struct drm_i915_gem_exec_object2 *exec_entry;
1093 * Current offset of the object in GTT space.
1095 * This is the same as gtt_space->start
1097 uint32_t gtt_offset;
1099 /** Breadcrumb of last rendering to the buffer. */
1100 uint32_t last_rendering_seqno;
1101 struct intel_ring_buffer *ring;
1103 /** Breadcrumb of last fenced GPU access to the buffer. */
1104 uint32_t last_fenced_seqno;
1105 struct intel_ring_buffer *last_fenced_ring;
1107 /** Current tiling stride for the object, if it's tiled. */
1110 /** Record of address bit 17 of each page at last unbind. */
1111 unsigned long *bit_17;
1114 * If present, while GEM_DOMAIN_CPU is in the read domain this array
1115 * flags which individual pages are valid.
1117 uint8_t *page_cpu_valid;
1119 /** User space pin count and filp owning the pin */
1120 uint32_t user_pin_count;
1121 struct drm_file *pin_filp;
1123 /** for phy allocated objects */
1124 struct drm_i915_gem_phys_object *phys_obj;
1127 * Number of crtcs where this object is currently the fb, but
1128 * will be page flipped away on the next vblank. When it
1129 * reaches 0, dev_priv->pending_flip_queue will be woken up.
1131 atomic_t pending_flip;
1134 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1137 * Request queue structure.
1139 * The request queue allows us to note sequence numbers that have been emitted
1140 * and may be associated with active buffers to be retired.
1142 * By keeping this list, we can avoid having to do questionable
1143 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1144 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1146 struct drm_i915_gem_request {
1147 /** On Which ring this request was generated */
1148 struct intel_ring_buffer *ring;
1150 /** GEM sequence number associated with this request. */
1153 /** Postion in the ringbuffer of the end of the request */
1156 /** Time at which this request was emitted, in jiffies. */
1157 unsigned long emitted_jiffies;
1159 /** global list entry for this request */
1160 struct list_head list;
1162 struct drm_i915_file_private *file_priv;
1163 /** file_priv list entry for this request */
1164 struct list_head client_list;
1167 struct drm_i915_file_private {
1169 struct spinlock lock;
1170 struct list_head request_list;
1174 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1176 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1177 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1178 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1179 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1180 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1181 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1182 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1183 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1184 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1185 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1186 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1187 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1188 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1189 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1190 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1191 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1192 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1193 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1194 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1195 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1196 (dev)->pci_device == 0x0152 || \
1197 (dev)->pci_device == 0x015a)
1198 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1199 (dev)->pci_device == 0x0106 || \
1200 (dev)->pci_device == 0x010A)
1201 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1202 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1203 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1204 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1205 ((dev)->pci_device & 0xFF00) == 0x0A00)
1208 * The genX designation typically refers to the render engine, so render
1209 * capability related checks should use IS_GEN, while display and other checks
1210 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1214 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1215 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1216 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1217 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1218 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1219 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1221 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1222 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1223 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1224 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1226 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
1228 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1229 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1231 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1232 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1234 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1235 * rows, which changed the alignment requirements and fence programming.
1237 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1239 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1240 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1241 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1242 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1243 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1244 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1245 /* dsparb controlled by hw only */
1246 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1248 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1249 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1250 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1252 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1254 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1255 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1256 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1257 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1258 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1259 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1261 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1262 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1263 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1264 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1265 #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1267 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1269 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1271 #define GT_FREQUENCY_MULTIPLIER 50
1274 * RC6 is a special power stage which allows the GPU to enter an very
1275 * low-voltage mode when idle, using down to 0V while at this stage. This
1276 * stage is entered automatically when the GPU is idle when RC6 support is
1277 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1279 * There are different RC6 modes available in Intel GPU, which differentiate
1280 * among each other with the latency required to enter and leave RC6 and
1281 * voltage consumed by the GPU in different states.
1283 * The combination of the following flags define which states GPU is allowed
1284 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1285 * RC6pp is deepest RC6. Their support by hardware varies according to the
1286 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1287 * which brings the most power savings; deeper states save more power, but
1288 * require higher latency to switch to and wake up.
1290 #define INTEL_RC6_ENABLE (1<<0)
1291 #define INTEL_RC6p_ENABLE (1<<1)
1292 #define INTEL_RC6pp_ENABLE (1<<2)
1294 extern int intel_iommu_enabled;
1295 extern struct drm_ioctl_desc i915_ioctls[];
1296 extern struct drm_driver i915_driver_info;
1297 extern struct cdev_pager_ops i915_gem_pager_ops;
1298 extern int i915_panel_ignore_lid;
1299 extern unsigned int i915_powersave;
1300 extern int i915_semaphores;
1301 extern unsigned int i915_lvds_downclock;
1302 extern int i915_panel_use_ssc;
1303 extern int i915_vbt_sdvo_panel_type;
1304 extern int i915_enable_rc6;
1305 extern int i915_enable_fbc;
1306 extern int i915_enable_ppgtt;
1307 extern int i915_enable_hangcheck;
1309 void i915_sysctl_cleanup(struct drm_device *dev);
1312 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1313 extern void i915_kernel_lost_context(struct drm_device * dev);
1314 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1315 extern int i915_driver_unload(struct drm_device *);
1316 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1317 extern void i915_driver_lastclose(struct drm_device * dev);
1318 extern void i915_driver_preclose(struct drm_device *dev,
1319 struct drm_file *file_priv);
1320 extern void i915_driver_postclose(struct drm_device *dev,
1321 struct drm_file *file_priv);
1322 extern int i915_driver_device_is_agp(struct drm_device * dev);
1323 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1325 extern int i915_emit_box(struct drm_device *dev,
1326 struct drm_clip_rect __user *boxes,
1327 int i, int DR1, int DR4);
1328 int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
1331 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1332 unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1333 void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1334 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1337 extern int i915_irq_emit(struct drm_device *dev, void *data,
1338 struct drm_file *file_priv);
1339 extern int i915_irq_wait(struct drm_device *dev, void *data,
1340 struct drm_file *file_priv);
1342 extern void intel_irq_init(struct drm_device *dev);
1343 extern void intel_gt_init(struct drm_device *dev);
1344 extern void intel_gt_reset(struct drm_device *dev);
1346 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1347 struct drm_file *file_priv);
1348 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1349 struct drm_file *file_priv);
1350 extern int i915_vblank_swap(struct drm_device *dev, void *data,
1351 struct drm_file *file_priv);
1352 void intel_enable_asle(struct drm_device *dev);
1353 void i915_hangcheck_elapsed(unsigned long data);
1354 void i915_handle_error(struct drm_device *dev, bool wedged);
1356 void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1357 void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1359 #ifdef CONFIG_DEBUG_FS
1360 extern void i915_destroy_error_state(struct drm_device *dev);
1362 #define i915_destroy_error_state(x)
1366 int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
1367 uint32_t *handle_p);
1368 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1369 struct drm_file *file_priv);
1370 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1371 struct drm_file *file_priv);
1372 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1373 struct drm_file *file_priv);
1374 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1375 struct drm_file *file_priv);
1376 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1377 struct drm_file *file_priv);
1378 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1379 struct drm_file *file_priv);
1380 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1381 struct drm_file *file_priv);
1382 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1383 struct drm_file *file_priv);
1384 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1385 struct drm_file *file_priv);
1386 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1387 struct drm_file *file_priv);
1388 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1389 struct drm_file *file_priv);
1390 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1391 struct drm_file *file_priv);
1392 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1393 struct drm_file *file_priv);
1394 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1395 struct drm_file *file_priv);
1396 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1397 struct drm_file *file_priv);
1398 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1399 struct drm_file *file_priv);
1400 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1401 struct drm_file *file_priv);
1402 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1403 struct drm_file *file_priv);
1404 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1405 struct drm_file *file_priv);
1406 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1407 struct drm_file *file_priv);
1408 void i915_gem_load(struct drm_device *dev);
1409 void i915_gem_unload(struct drm_device *dev);
1410 int i915_gem_init_object(struct drm_gem_object *obj);
1411 void i915_gem_free_object(struct drm_gem_object *obj);
1412 int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
1413 bool map_and_fenceable);
1414 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1415 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1416 void i915_gem_lastclose(struct drm_device *dev);
1417 uint32_t i915_get_gem_seqno(struct drm_device *dev);
1420 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1422 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1423 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1424 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1429 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1431 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1432 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1433 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1437 void i915_gem_retire_requests(struct drm_device *dev);
1438 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1439 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1440 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1442 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
1443 unsigned long mappable_end, unsigned long end);
1444 uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1445 uint32_t size, int tiling_mode);
1446 int i915_mutex_lock_interruptible(struct drm_device *dev);
1447 int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1450 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1452 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1454 struct intel_ring_buffer *pipelined);
1455 int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1456 int i915_gem_flush_ring(struct intel_ring_buffer *ring,
1457 uint32_t invalidate_domains, uint32_t flush_domains);
1458 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1459 int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1460 int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1461 int i915_gem_idle(struct drm_device *dev);
1462 int i915_gem_init_hw(struct drm_device *dev);
1463 void i915_gem_init_swizzling(struct drm_device *dev);
1464 void i915_gem_init_ppgtt(struct drm_device *dev);
1465 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1466 int __must_check i915_gpu_idle(struct drm_device *dev);
1467 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1468 struct intel_ring_buffer *ring, uint32_t seqno);
1469 int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
1470 struct drm_i915_gem_request *request);
1471 int i915_wait_seqno(struct intel_ring_buffer *ring,
1473 int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1474 struct intel_ring_buffer *pipelined);
1475 void i915_gem_reset(struct drm_device *dev);
1476 int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
1477 int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
1479 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1480 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1481 enum i915_cache_level cache_level);
1483 void i915_gem_free_all_phys_object(struct drm_device *dev);
1484 void i915_gem_detach_phys_object(struct drm_device *dev,
1485 struct drm_i915_gem_object *obj);
1486 int i915_gem_attach_phys_object(struct drm_device *dev,
1487 struct drm_i915_gem_object *obj, int id, int align);
1489 int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1490 struct drm_mode_create_dumb *args);
1491 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1492 uint32_t handle, uint64_t *offset);
1493 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1496 /* i915_gem_tiling.c */
1497 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1498 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1499 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1501 /* i915_gem_evict.c */
1502 int i915_gem_evict_something(struct drm_device *dev, int min_size,
1503 unsigned alignment, bool mappable);
1504 int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1505 int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
1507 /* i915_suspend.c */
1508 extern int i915_save_state(struct drm_device *dev);
1509 extern int i915_restore_state(struct drm_device *dev);
1512 extern int intel_setup_gmbus(struct drm_device *dev);
1513 extern void intel_teardown_gmbus(struct drm_device *dev);
1514 extern void intel_gmbus_set_speed(device_t idev, int speed);
1515 extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
1516 extern void intel_iic_reset(struct drm_device *dev);
1518 /* intel_opregion.c */
1519 int intel_opregion_setup(struct drm_device *dev);
1520 extern int intel_opregion_init(struct drm_device *dev);
1521 extern void intel_opregion_fini(struct drm_device *dev);
1522 extern void opregion_asle_intr(struct drm_device *dev);
1523 extern void intel_opregion_gse_intr(struct drm_device *dev);
1524 extern void opregion_enable_asle(struct drm_device *dev);
1526 /* i915_gem_gtt.c */
1527 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1528 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1529 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1530 struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
1531 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1532 struct drm_i915_gem_object *obj);
1534 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1535 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1536 enum i915_cache_level cache_level);
1537 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1540 extern void intel_modeset_init(struct drm_device *dev);
1541 extern void intel_modeset_gem_init(struct drm_device *dev);
1542 extern void intel_modeset_cleanup(struct drm_device *dev);
1543 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1544 extern void intel_disable_fbc(struct drm_device *dev);
1545 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1546 extern void ironlake_init_pch_refclk(struct drm_device *dev);
1547 extern void ironlake_enable_rc6(struct drm_device *dev);
1548 extern void gen6_set_rps(struct drm_device *dev, u8 val);
1549 extern void intel_detect_pch(struct drm_device *dev);
1550 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1552 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
1553 struct drm_device *dev);
1554 extern void intel_overlay_print_error_state(struct sbuf *m,
1555 struct intel_overlay_error_state *error);
1556 extern struct intel_display_error_state *intel_display_capture_error_state(
1557 struct drm_device *dev);
1558 extern void intel_display_print_error_state(struct sbuf *m,
1559 struct drm_device *dev, struct intel_display_error_state *error);
1562 trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
1567 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1569 #define BEGIN_LP_RING(n) \
1570 intel_ring_begin(LP_RING(dev_priv), (n))
1572 #define OUT_RING(x) \
1573 intel_ring_emit(LP_RING(dev_priv), x)
1575 #define ADVANCE_LP_RING() \
1576 intel_ring_advance(LP_RING(dev_priv))
1578 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1579 if (LP_RING(dev->dev_private)->obj == NULL) \
1580 LOCK_TEST_WITH_RETURN(dev, file); \
1583 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
1584 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1585 #define I915_GEM_HWS_INDEX 0x20
1586 #define I915_BREADCRUMB_INDEX 0x21
1588 const struct intel_device_info *i915_get_device_id(int device);
1590 int i915_reset(struct drm_device *dev, u8 flags);
1593 int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1594 struct sysctl_oid *top);
1597 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1600 return ((int32_t)(seq1 - seq2) >= 0);
1603 u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1605 /* On SNB platform, before reading ring registers forcewake bit
1606 * must be set to prevent GT core from power down and stale values being
1609 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1610 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1611 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1613 /* We give fast paths for the really cool registers */
1614 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1615 (((dev_priv)->info->gen >= 6) && \
1616 ((reg) < 0x40000) && \
1617 ((reg) != FORCEWAKE))
1619 #define __i915_read(x, y) \
1620 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1628 #define __i915_write(x, y) \
1629 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1632 __i915_write(16, 16)
1633 __i915_write(32, 32)
1634 __i915_write(64, 64)
1637 #define I915_READ8(reg) i915_read8(dev_priv, (reg))
1638 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1640 #define I915_READ16(reg) i915_read16(dev_priv, (reg))
1641 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1642 #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg))
1643 #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
1645 #define I915_READ(reg) i915_read32(dev_priv, (reg))
1646 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1647 #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg))
1648 #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
1650 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1651 #define I915_READ64(reg) i915_read64(dev_priv, (reg))
1653 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1654 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)