1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * $FreeBSD: src/sys/dev/drm2/i915/i915_drv.h,v 1.1 2012/05/22 11:07:44 kib Exp $
34 #include <sys/eventhandler.h>
36 #include <dev/agp/agp_i810.h>
38 #include "intel_bios.h"
39 #include "intel_ringbuffer.h"
40 #include <linux/workqueue.h>
42 /* General customization:
45 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
47 #define DRIVER_NAME "i915"
48 #define DRIVER_DESC "Intel Graphics"
49 #define DRIVER_DATE "20080730"
51 MALLOC_DECLARE(DRM_I915_GEM);
59 #define pipe_name(p) ((p) + 'A')
60 #define I915_NUM_PIPE 2
68 #define transcoder_name(t) ((t) + 'A')
75 #define plane_name(p) ((p) + 'A')
85 #define port_name(p) ((p) + 'A')
87 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
89 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
91 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
92 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
93 if ((intel_encoder)->base.crtc == (__crtc))
95 struct intel_pch_pll {
96 int refcount; /* count of number of CRTCs sharing this PLL */
97 int active; /* count of number of active CRTCs (i.e. DPMS on) */
98 bool on; /* is the PLL actually active? Disabled during modeset */
103 #define I915_NUM_PLLS 2
105 struct intel_ddi_plls {
111 /* Interface history:
114 * 1.2: Add Power Management
115 * 1.3: Add vblank support
116 * 1.4: Fix cmdbuffer path, add heap destroy
117 * 1.5: Add vblank pipe configuration
118 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
119 * - Support vertical blank on secondary display pipe
121 #define DRIVER_MAJOR 1
122 #define DRIVER_MINOR 6
123 #define DRIVER_PATCHLEVEL 0
125 #define WATCH_COHERENCY 0
126 #define WATCH_LISTS 0
129 #define I915_GEM_PHYS_CURSOR_0 1
130 #define I915_GEM_PHYS_CURSOR_1 2
131 #define I915_GEM_PHYS_OVERLAY_REGS 3
132 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
134 struct drm_i915_gem_phys_object {
136 drm_dma_handle_t *handle;
137 struct drm_i915_gem_object *cur_obj;
140 struct opregion_header;
141 struct opregion_acpi;
142 struct opregion_swsci;
143 struct opregion_asle;
144 struct drm_i915_private;
146 struct intel_opregion {
147 struct opregion_header __iomem *header;
148 struct opregion_acpi __iomem *acpi;
149 struct opregion_swsci __iomem *swsci;
150 struct opregion_asle __iomem *asle;
152 u32 __iomem *lid_state;
154 #define OPREGION_SIZE (8*1024)
156 struct intel_overlay;
157 struct intel_overlay_error_state;
159 struct drm_i915_master_private {
160 drm_local_map_t *sarea;
161 struct _drm_i915_sarea *sarea_priv;
163 #define I915_FENCE_REG_NONE -1
164 #define I915_MAX_NUM_FENCES 16
165 /* 16 fences + sign bit for FENCE_REG_NONE */
166 #define I915_MAX_NUM_FENCE_BITS 5
168 struct drm_i915_fence_reg {
169 struct list_head lru_list;
170 struct drm_i915_gem_object *obj;
171 uint32_t setup_seqno;
175 struct sdvo_device_mapping {
184 struct drm_i915_error_state {
187 u32 pipestat[I915_MAX_PIPES];
188 u32 tail[I915_NUM_RINGS];
189 u32 head[I915_NUM_RINGS];
190 u32 ipeir[I915_NUM_RINGS];
191 u32 ipehr[I915_NUM_RINGS];
192 u32 instdone[I915_NUM_RINGS];
193 u32 acthd[I915_NUM_RINGS];
194 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
195 /* our own tracking of ring head and tail */
196 u32 cpu_ring_head[I915_NUM_RINGS];
197 u32 cpu_ring_tail[I915_NUM_RINGS];
198 u32 error; /* gen6+ */
199 u32 instpm[I915_NUM_RINGS];
200 u32 instps[I915_NUM_RINGS];
202 u32 seqno[I915_NUM_RINGS];
204 u32 fault_reg[I915_NUM_RINGS];
206 u32 faddr[I915_NUM_RINGS];
207 u64 fence[I915_MAX_NUM_FENCES];
209 struct drm_i915_error_ring {
210 struct drm_i915_error_object {
214 } *ringbuffer, *batchbuffer;
215 struct drm_i915_error_request {
221 } ring[I915_NUM_RINGS];
222 struct drm_i915_error_buffer {
229 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
236 } *active_bo, *pinned_bo;
237 u32 active_bo_count, pinned_bo_count;
238 struct intel_overlay_error_state *overlay;
239 struct intel_display_error_state *display;
242 struct drm_i915_display_funcs {
243 void (*dpms)(struct drm_crtc *crtc, int mode);
244 bool (*fbc_enabled)(struct drm_device *dev);
245 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
246 void (*disable_fbc)(struct drm_device *dev);
247 int (*get_display_clock_speed)(struct drm_device *dev);
248 int (*get_fifo_size)(struct drm_device *dev, int plane);
249 void (*update_wm)(struct drm_device *dev);
250 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
251 uint32_t sprite_width, int pixel_size);
252 int (*crtc_mode_set)(struct drm_crtc *crtc,
253 struct drm_display_mode *mode,
254 struct drm_display_mode *adjusted_mode,
256 struct drm_framebuffer *old_fb);
257 void (*write_eld)(struct drm_connector *connector,
258 struct drm_crtc *crtc);
259 void (*fdi_link_train)(struct drm_crtc *crtc);
260 void (*init_clock_gating)(struct drm_device *dev);
261 void (*init_pch_clock_gating)(struct drm_device *dev);
262 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
263 struct drm_framebuffer *fb,
264 struct drm_i915_gem_object *obj);
265 void (*force_wake_get)(struct drm_i915_private *dev_priv);
266 void (*force_wake_put)(struct drm_i915_private *dev_priv);
267 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
269 /* clock updates for mode set */
271 /* render clock increase/decrease */
272 /* display clock increase/decrease */
273 /* pll clock increase/decrease */
276 struct drm_i915_gt_funcs {
277 void (*force_wake_get)(struct drm_i915_private *dev_priv);
278 void (*force_wake_put)(struct drm_i915_private *dev_priv);
281 #define DEV_INFO_FLAGS \
282 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
283 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
284 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
285 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
286 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
287 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
288 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
289 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
290 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
291 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
292 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
293 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
294 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
295 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
296 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
297 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
298 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
299 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
300 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
301 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
302 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
303 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
304 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
305 DEV_INFO_FLAG(has_llc)
307 struct intel_device_info {
326 u8 cursor_needs_physical:1;
328 u8 overlay_needs_physical:1;
335 #define I915_PPGTT_PD_ENTRIES 512
336 #define I915_PPGTT_PT_ENTRIES 1024
337 struct i915_hw_ppgtt {
338 unsigned num_pd_entries;
341 vm_paddr_t *pt_dma_addr;
342 vm_paddr_t scratch_page_dma_addr;
346 FBC_NO_OUTPUT, /* no outputs enabled to compress */
347 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
348 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
349 FBC_MODE_TOO_LARGE, /* mode too large for compression */
350 FBC_BAD_PLANE, /* fbc not supported on plane */
351 FBC_NOT_TILED, /* buffer not tiled */
352 FBC_MULTIPLE_PIPES, /* more than one pipe active */
356 /* defined intel_pm.c */
357 extern struct lock mchdev_lock;
360 struct mem_block *next;
361 struct mem_block *prev;
364 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
367 struct opregion_header;
368 struct opregion_acpi;
369 struct opregion_swsci;
370 struct opregion_asle;
372 #define I915_FENCE_REG_NONE -1
373 #define I915_MAX_NUM_FENCES 16
374 /* 16 fences + sign bit for FENCE_REG_NONE */
375 #define I915_MAX_NUM_FENCE_BITS 5
378 PCH_NONE = 0, /* No PCH present */
379 PCH_IBX, /* Ibexpeak PCH */
380 PCH_CPT, /* Cougarpoint PCH */
381 PCH_LPT, /* Lynxpoint PCH */
384 enum intel_sbi_destination {
389 #define QUIRK_PIPEA_FORCE (1<<0)
390 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
391 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
394 struct intel_fbc_work;
400 struct drm_i915_private *dev_priv;
403 struct i915_suspend_saved_registers {
424 u32 saveTRANS_HTOTAL_A;
425 u32 saveTRANS_HBLANK_A;
426 u32 saveTRANS_HSYNC_A;
427 u32 saveTRANS_VTOTAL_A;
428 u32 saveTRANS_VBLANK_A;
429 u32 saveTRANS_VSYNC_A;
437 u32 savePFIT_PGM_RATIOS;
438 u32 saveBLC_HIST_CTL;
440 u32 saveBLC_PWM_CTL2;
441 u32 saveBLC_CPU_PWM_CTL;
442 u32 saveBLC_CPU_PWM_CTL2;
455 u32 saveTRANS_HTOTAL_B;
456 u32 saveTRANS_HBLANK_B;
457 u32 saveTRANS_HSYNC_B;
458 u32 saveTRANS_VTOTAL_B;
459 u32 saveTRANS_VBLANK_B;
460 u32 saveTRANS_VSYNC_B;
474 u32 savePP_ON_DELAYS;
475 u32 savePP_OFF_DELAYS;
483 u32 savePFIT_CONTROL;
484 u32 save_palette_a[256];
485 u32 save_palette_b[256];
486 u32 saveDPFC_CB_BASE;
487 u32 saveFBC_CFB_BASE;
490 u32 saveFBC_CONTROL2;
500 u32 saveCACHE_MODE_0;
501 u32 saveMI_ARB_STATE;
512 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
523 u32 savePIPEA_GMCH_DATA_M;
524 u32 savePIPEB_GMCH_DATA_M;
525 u32 savePIPEA_GMCH_DATA_N;
526 u32 savePIPEB_GMCH_DATA_N;
527 u32 savePIPEA_DP_LINK_M;
528 u32 savePIPEB_DP_LINK_M;
529 u32 savePIPEA_DP_LINK_N;
530 u32 savePIPEB_DP_LINK_N;
541 u32 savePCH_DREF_CONTROL;
542 u32 saveDISP_ARB_CTL;
543 u32 savePIPEA_DATA_M1;
544 u32 savePIPEA_DATA_N1;
545 u32 savePIPEA_LINK_M1;
546 u32 savePIPEA_LINK_N1;
547 u32 savePIPEB_DATA_M1;
548 u32 savePIPEB_DATA_N1;
549 u32 savePIPEB_LINK_M1;
550 u32 savePIPEB_LINK_N1;
551 u32 saveMCHBAR_RENDER_STANDBY;
552 u32 savePCH_PORT_HOTPLUG;
555 struct intel_gen6_power_mgmt {
556 struct work_struct work;
558 /* lock - irqsave spinlock that protectects the work_struct and
560 struct spinlock lock;
562 /* The below variables an all the rps hw state are protected by
563 * dev->struct mutext. */
568 struct delayed_work delayed_resume_work;
571 * Protects RPS/RC6 register access and PCU communication.
572 * Must be taken after struct_mutex if nested.
577 struct intel_ilk_power_mgmt {
585 unsigned long last_time1;
586 unsigned long chipset_power;
588 struct timespec last_time2;
589 unsigned long gfx_power;
595 struct drm_i915_gem_object *pwrctx;
596 struct drm_i915_gem_object *renderctx;
599 struct i915_dri1_state {
600 unsigned allow_batchbuffer : 1;
601 u32 __iomem *gfx_hws_cpu_addr;
612 struct intel_l3_parity {
614 struct work_struct error_work;
617 typedef struct drm_i915_private {
618 struct drm_device *dev;
620 device_t *gmbus_bridge;
621 device_t *bbbus_bridge;
625 /** gmbus_sx protects against concurrent usage of the single hw gmbus
626 * controller on different i2c buses. */
627 struct lock gmbus_lock;
629 int relative_constants_mode;
631 drm_local_map_t *sarea;
632 drm_local_map_t *mmio_map;
634 struct drm_i915_gt_funcs gt;
635 /** gt_fifo_count and the subsequent register write are synchronized
636 * with dev->struct_mutex. */
637 unsigned gt_fifo_count;
638 /** forcewake_count is protected by gt_lock */
639 unsigned forcewake_count;
640 /** gt_lock is also taken in irq contexts. */
643 drm_i915_sarea_t *sarea_priv;
644 struct intel_ring_buffer ring[I915_NUM_RINGS];
647 drm_dma_handle_t *status_page_dmah;
648 struct resource *mch_res;
650 void *hw_status_page;
651 dma_addr_t dma_status_page;
653 unsigned int status_gfx_addr;
654 drm_local_map_t hws_map;
655 struct drm_gem_object *hws_obj;
657 struct drm_i915_gem_object *pwrctx;
658 struct drm_i915_gem_object *renderctx;
666 atomic_t irq_received;
669 /** Cached value of IER to avoid reads in updating the bitfield */
674 struct lock irq_lock;
676 u32 hotplug_supported_mask;
678 int tex_lru_log_granularity;
679 int allow_batchbuffer;
680 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
686 /* For hangcheck timer */
687 #define DRM_I915_HANGCHECK_PERIOD ((1500 /* in ms */ * hz) / 1000)
688 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
689 struct timer_list hangcheck_timer;
692 uint32_t last_acthd_bsd;
693 uint32_t last_acthd_blt;
694 uint32_t last_instdone;
695 uint32_t last_instdone1;
697 struct intel_opregion opregion;
700 struct intel_overlay *overlay;
701 bool sprite_scaling_enabled;
704 int backlight_level; /* restore backlight to this value */
705 bool backlight_enabled;
706 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
707 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
709 /* Feature bits from the VBIOS */
710 unsigned int int_tv_support:1;
711 unsigned int lvds_dither:1;
712 unsigned int lvds_vbt:1;
713 unsigned int int_crt_support:1;
714 unsigned int lvds_use_ssc:1;
715 unsigned int display_clock_mode:1;
726 struct edp_power_seq pps;
728 bool no_aux_handshake;
731 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
732 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
733 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
735 unsigned int fsb_freq, mem_freq, is_ddr3;
737 /* Display functions */
738 struct drm_i915_display_funcs display;
740 /* PCH chipset type */
741 enum intel_pch pch_type;
742 unsigned short pch_id;
744 unsigned long quirks;
750 /** Bridge to intel-gtt-ko */
751 const struct intel_gtt *gtt;
752 /** Memory allocator for GTT stolen memory */
753 struct drm_mm stolen;
754 /** Memory allocator for GTT */
755 struct drm_mm gtt_space;
756 /** List of all objects in gtt_space. Used to restore gtt
757 * mappings on resume */
758 struct list_head gtt_list;
760 /** Usable portion of the GTT for GEM */
761 unsigned long gtt_start;
762 unsigned long gtt_mappable_end;
763 unsigned long gtt_end;
765 /** PPGTT used for aliasing the PPGTT with the GTT */
766 struct i915_hw_ppgtt *aliasing_ppgtt;
769 * List of objects currently involved in rendering from the
772 * Includes buffers having the contents of their GPU caches
773 * flushed, not necessarily primitives. last_rendering_seqno
774 * represents when the rendering involved will be completed.
776 * A reference is held on the buffer while on this list.
778 struct list_head active_list;
781 * List of objects which are not in the ringbuffer but which
782 * still have a write_domain which needs to be flushed before
785 * A reference is held on the buffer while on this list.
787 struct list_head flushing_list;
790 * LRU list of objects which are not in the ringbuffer and
791 * are ready to unbind, but are still in the GTT.
793 * last_rendering_seqno is 0 while an object is in this list.
795 * A reference is not held on the buffer while on this list,
796 * as merely being GTT-bound shouldn't prevent its being
797 * freed, and we'll pull it off the list in the free path.
799 struct list_head inactive_list;
802 * LRU list of objects which are not in the ringbuffer but
803 * are still pinned in the GTT.
805 struct list_head pinned_list;
807 /** LRU list of objects with fence regs on them. */
808 struct list_head fence_list;
811 * List of objects currently pending being freed.
813 * These objects are no longer in use, but due to a signal
814 * we were prevented from freeing them at the appointed time.
816 struct list_head deferred_free_list;
819 * We leave the user IRQ off as much as possible,
820 * but this means that requests will finish and never
821 * be retired once the system goes idle. Set a timer to
822 * fire periodically while the ring is running. When it
823 * fires, go retire requests.
825 struct delayed_work retire_work;
828 * Are we in a non-interruptible section of code like
833 uint32_t next_gem_seqno;
836 * Waiting sequence number, if any
838 uint32_t waiting_gem_seqno;
841 * Last seq seen at irq time
843 uint32_t irq_gem_seqno;
846 * Flag if the X Server, and thus DRM, is not currently in
847 * control of the device.
849 * This is set between LeaveVT and EnterVT. It needs to be
850 * replaced with a semaphore. It also needs to be
851 * transitioned away from for kernel modesetting.
856 * Flag if the hardware appears to be wedged.
858 * This is set when attempts to idle the device timeout.
859 * It prevents command submission from occuring and makes
860 * every pending request fail
864 /** Bit 6 swizzling required for X tiling */
865 uint32_t bit_6_swizzle_x;
866 /** Bit 6 swizzling required for Y tiling */
867 uint32_t bit_6_swizzle_y;
869 /* storage for physical objects */
870 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
872 /* accounting, useful for userland debugging */
874 size_t mappable_gtt_total;
875 size_t object_memory;
878 eventhandler_tag i915_lowmem;
881 /* Kernel Modesetting */
883 const struct intel_device_info *info;
885 struct sdvo_device_mapping sdvo_mappings[2];
886 /* indicate whether the LVDS_BORDER should be enabled or not */
887 unsigned int lvds_border_bits;
888 /* Panel fitter placement and size for Ironlake+ */
889 u32 pch_pf_pos, pch_pf_size;
891 struct drm_crtc *plane_to_crtc_mapping[3];
892 struct drm_crtc *pipe_to_crtc_mapping[3];
893 /* wait_queue_head_t pending_flip_queue; XXXKIB */
894 bool flip_pending_is_done;
896 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
897 struct intel_ddi_plls ddi_plls;
899 /* Reclocking support */
900 bool render_reclock_avail;
901 bool lvds_downclock_avail;
902 /* indicates the reduced downclock for LVDS*/
904 struct task idle_task;
905 struct callout idle_callout;
909 struct child_device_config *child_dev;
910 struct drm_connector *int_lvds_connector;
911 struct drm_connector *int_edp_connector;
914 bool mchbar_need_disable;
916 struct intel_l3_parity l3_parity;
920 /* gen6+ rps state */
921 struct intel_gen6_power_mgmt rps;
927 unsigned long last_time1;
928 unsigned long chipset_power;
930 struct timespec last_time2;
931 unsigned long gfx_power;
935 struct lock *mchdev_lock;
937 enum no_fbc_reason no_fbc_reason;
939 unsigned long cfb_size;
943 struct intel_fbc_work *fbc_work;
945 struct lock error_lock;
946 /* Protected by dev->error_lock. */
947 struct drm_i915_error_state *first_error;
948 struct work_struct error_work;
949 int error_completion;
950 struct lock error_completion_lock;
951 struct workqueue_struct *wq;
952 struct work_struct hotplug_work;
954 unsigned long last_gpu_reset;
956 struct intel_fbdev *fbdev;
958 struct drm_property *broadcast_rgb_property;
959 struct drm_property *force_audio_property;
961 struct i915_suspend_saved_registers regfile;
963 /* Old dri1 support infrastructure, beware the dragons ya fools entering
965 struct i915_dri1_state dri1;
966 } drm_i915_private_t;
968 /* Iterate over initialised rings */
969 #define for_each_ring(ring__, dev_priv__, i__) \
970 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
971 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
973 enum hdmi_force_audio {
974 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
975 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
976 HDMI_AUDIO_AUTO, /* trust EDID */
977 HDMI_AUDIO_ON, /* force turn on HDMI audio */
980 enum i915_cache_level {
983 I915_CACHE_LLC_MLC, /* gen6+ */
986 enum intel_chip_family {
993 /** driver private structure attached to each drm_gem_object */
994 struct drm_i915_gem_object {
995 struct drm_gem_object base;
997 /** Current space allocated to this object in the GTT, if any. */
998 struct drm_mm_node *gtt_space;
999 struct list_head gtt_list;
1000 /** This object's place on the active/flushing/inactive lists */
1001 struct list_head ring_list;
1002 struct list_head mm_list;
1003 /** This object's place on GPU write list */
1004 struct list_head gpu_write_list;
1005 /** This object's place in the batchbuffer or on the eviction list */
1006 struct list_head exec_list;
1009 * This is set if the object is on the active or flushing lists
1010 * (has pending rendering), and is not set if it's on inactive (ready
1013 unsigned int active:1;
1016 * This is set if the object has been written to since last bound
1019 unsigned int dirty:1;
1022 * This is set if the object has been written to since the last
1025 unsigned int pending_gpu_write:1;
1028 * Fence register bits (if any) for this object. Will be set
1029 * as needed when mapped into the GTT.
1030 * Protected by dev->struct_mutex.
1032 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1035 * Advice: are the backing pages purgeable?
1037 unsigned int madv:2;
1040 * Current tiling mode for the object.
1042 unsigned int tiling_mode:2;
1043 unsigned int tiling_changed:1;
1045 /** How many users have pinned this object in GTT space. The following
1046 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1047 * (via user_pin_count), execbuffer (objects are not allowed multiple
1048 * times for the same batchbuffer), and the framebuffer code. When
1049 * switching/pageflipping, the framebuffer code has at most two buffers
1052 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1053 * bits with absolutely no headroom. So use 4 bits. */
1054 unsigned int pin_count:4;
1055 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1058 * Is the object at the current location in the gtt mappable and
1059 * fenceable? Used to avoid costly recalculations.
1061 unsigned int map_and_fenceable:1;
1064 * Whether the current gtt mapping needs to be mappable (and isn't just
1065 * mappable by accident). Track pin and fault separate for a more
1066 * accurate mappable working set.
1068 unsigned int fault_mappable:1;
1069 unsigned int pin_mappable:1;
1072 * Is the GPU currently using a fence to access this buffer,
1074 unsigned int pending_fenced_gpu_access:1;
1075 unsigned int fenced_gpu_access:1;
1077 unsigned int cache_level:2;
1079 unsigned int has_aliasing_ppgtt_mapping:1;
1080 unsigned int has_global_gtt_mapping:1;
1085 * Used for performing relocations during execbuffer insertion.
1087 struct hlist_node exec_node;
1088 unsigned long exec_handle;
1089 struct drm_i915_gem_exec_object2 *exec_entry;
1092 * Current offset of the object in GTT space.
1094 * This is the same as gtt_space->start
1096 uint32_t gtt_offset;
1098 /** Breadcrumb of last rendering to the buffer. */
1099 uint32_t last_rendering_seqno;
1100 struct intel_ring_buffer *ring;
1102 /** Breadcrumb of last fenced GPU access to the buffer. */
1103 uint32_t last_fenced_seqno;
1104 struct intel_ring_buffer *last_fenced_ring;
1106 /** Current tiling stride for the object, if it's tiled. */
1109 /** Record of address bit 17 of each page at last unbind. */
1110 unsigned long *bit_17;
1113 * If present, while GEM_DOMAIN_CPU is in the read domain this array
1114 * flags which individual pages are valid.
1116 uint8_t *page_cpu_valid;
1118 /** User space pin count and filp owning the pin */
1119 uint32_t user_pin_count;
1120 struct drm_file *pin_filp;
1122 /** for phy allocated objects */
1123 struct drm_i915_gem_phys_object *phys_obj;
1126 * Number of crtcs where this object is currently the fb, but
1127 * will be page flipped away on the next vblank. When it
1128 * reaches 0, dev_priv->pending_flip_queue will be woken up.
1130 atomic_t pending_flip;
1133 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1136 * Request queue structure.
1138 * The request queue allows us to note sequence numbers that have been emitted
1139 * and may be associated with active buffers to be retired.
1141 * By keeping this list, we can avoid having to do questionable
1142 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1143 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1145 struct drm_i915_gem_request {
1146 /** On Which ring this request was generated */
1147 struct intel_ring_buffer *ring;
1149 /** GEM sequence number associated with this request. */
1152 /** Postion in the ringbuffer of the end of the request */
1155 /** Time at which this request was emitted, in jiffies. */
1156 unsigned long emitted_jiffies;
1158 /** global list entry for this request */
1159 struct list_head list;
1161 struct drm_i915_file_private *file_priv;
1162 /** file_priv list entry for this request */
1163 struct list_head client_list;
1166 struct drm_i915_file_private {
1168 struct spinlock lock;
1169 struct list_head request_list;
1173 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1175 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1176 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1177 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1178 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1179 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1180 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1181 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1182 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1183 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1184 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1185 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1186 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1187 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1188 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1189 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1190 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1191 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1192 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1193 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1194 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1195 (dev)->pci_device == 0x0152 || \
1196 (dev)->pci_device == 0x015a)
1197 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1198 (dev)->pci_device == 0x0106 || \
1199 (dev)->pci_device == 0x010A)
1200 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1201 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1202 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1203 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1204 ((dev)->pci_device & 0xFF00) == 0x0A00)
1207 * The genX designation typically refers to the render engine, so render
1208 * capability related checks should use IS_GEN, while display and other checks
1209 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1213 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1214 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1215 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1216 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1217 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1218 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1220 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1221 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1222 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1223 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1225 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
1227 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1228 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1230 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1231 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1233 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1234 * rows, which changed the alignment requirements and fence programming.
1236 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1238 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1239 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1240 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1241 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1242 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1243 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1244 /* dsparb controlled by hw only */
1245 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1247 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1248 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1249 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1251 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1253 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1254 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1255 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1256 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1257 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1258 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1260 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1261 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1262 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1263 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1264 #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
1266 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1268 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1270 #define GT_FREQUENCY_MULTIPLIER 50
1273 * RC6 is a special power stage which allows the GPU to enter an very
1274 * low-voltage mode when idle, using down to 0V while at this stage. This
1275 * stage is entered automatically when the GPU is idle when RC6 support is
1276 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1278 * There are different RC6 modes available in Intel GPU, which differentiate
1279 * among each other with the latency required to enter and leave RC6 and
1280 * voltage consumed by the GPU in different states.
1282 * The combination of the following flags define which states GPU is allowed
1283 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1284 * RC6pp is deepest RC6. Their support by hardware varies according to the
1285 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1286 * which brings the most power savings; deeper states save more power, but
1287 * require higher latency to switch to and wake up.
1289 #define INTEL_RC6_ENABLE (1<<0)
1290 #define INTEL_RC6p_ENABLE (1<<1)
1291 #define INTEL_RC6pp_ENABLE (1<<2)
1293 extern int intel_iommu_enabled;
1294 extern struct drm_ioctl_desc i915_ioctls[];
1295 extern struct drm_driver i915_driver_info;
1296 extern struct cdev_pager_ops i915_gem_pager_ops;
1297 extern int i915_panel_ignore_lid;
1298 extern unsigned int i915_powersave;
1299 extern int i915_semaphores;
1300 extern unsigned int i915_lvds_downclock;
1301 extern int i915_panel_use_ssc;
1302 extern int i915_vbt_sdvo_panel_type;
1303 extern int i915_enable_rc6;
1304 extern int i915_enable_fbc;
1305 extern int i915_enable_ppgtt;
1306 extern int i915_enable_hangcheck;
1308 void i915_sysctl_cleanup(struct drm_device *dev);
1311 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1312 extern void i915_kernel_lost_context(struct drm_device * dev);
1313 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1314 extern int i915_driver_unload(struct drm_device *);
1315 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1316 extern void i915_driver_lastclose(struct drm_device * dev);
1317 extern void i915_driver_preclose(struct drm_device *dev,
1318 struct drm_file *file_priv);
1319 extern void i915_driver_postclose(struct drm_device *dev,
1320 struct drm_file *file_priv);
1321 extern int i915_driver_device_is_agp(struct drm_device * dev);
1322 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1324 extern int i915_emit_box(struct drm_device *dev,
1325 struct drm_clip_rect __user *boxes,
1326 int i, int DR1, int DR4);
1327 int i915_emit_box_p(struct drm_device *dev, struct drm_clip_rect *box,
1330 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1331 unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1332 void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1333 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1336 extern int i915_irq_emit(struct drm_device *dev, void *data,
1337 struct drm_file *file_priv);
1338 extern int i915_irq_wait(struct drm_device *dev, void *data,
1339 struct drm_file *file_priv);
1341 extern void intel_irq_init(struct drm_device *dev);
1342 extern void intel_gt_init(struct drm_device *dev);
1343 extern void intel_gt_reset(struct drm_device *dev);
1345 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
1346 struct drm_file *file_priv);
1347 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
1348 struct drm_file *file_priv);
1349 extern int i915_vblank_swap(struct drm_device *dev, void *data,
1350 struct drm_file *file_priv);
1351 void intel_enable_asle(struct drm_device *dev);
1352 void i915_hangcheck_elapsed(unsigned long data);
1353 void i915_handle_error(struct drm_device *dev, bool wedged);
1355 void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1356 void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1358 void i915_destroy_error_state(struct drm_device *dev);
1361 int i915_gem_create(struct drm_file *file, struct drm_device *dev, uint64_t size,
1362 uint32_t *handle_p);
1363 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1364 struct drm_file *file_priv);
1365 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1366 struct drm_file *file_priv);
1367 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1368 struct drm_file *file_priv);
1369 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1370 struct drm_file *file_priv);
1371 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1372 struct drm_file *file_priv);
1373 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1374 struct drm_file *file_priv);
1375 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1376 struct drm_file *file_priv);
1377 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1378 struct drm_file *file_priv);
1379 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1380 struct drm_file *file_priv);
1381 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1382 struct drm_file *file_priv);
1383 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1384 struct drm_file *file_priv);
1385 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1386 struct drm_file *file_priv);
1387 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1388 struct drm_file *file_priv);
1389 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1390 struct drm_file *file_priv);
1391 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1392 struct drm_file *file_priv);
1393 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1394 struct drm_file *file_priv);
1395 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1396 struct drm_file *file_priv);
1397 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1398 struct drm_file *file_priv);
1399 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1400 struct drm_file *file_priv);
1401 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1402 struct drm_file *file_priv);
1403 void i915_gem_load(struct drm_device *dev);
1404 void i915_gem_unload(struct drm_device *dev);
1405 int i915_gem_init_object(struct drm_gem_object *obj);
1406 void i915_gem_free_object(struct drm_gem_object *obj);
1407 int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment,
1408 bool map_and_fenceable);
1409 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1410 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1411 void i915_gem_lastclose(struct drm_device *dev);
1412 uint32_t i915_get_gem_seqno(struct drm_device *dev);
1415 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1417 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1418 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1419 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1424 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1426 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1427 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1428 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1432 void i915_gem_retire_requests(struct drm_device *dev);
1433 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1434 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1435 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1437 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
1438 unsigned long mappable_end, unsigned long end);
1439 uint32_t i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1440 uint32_t size, int tiling_mode);
1441 int i915_mutex_lock_interruptible(struct drm_device *dev);
1442 int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1445 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1447 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1449 struct intel_ring_buffer *pipelined);
1450 int i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1451 int i915_gem_flush_ring(struct intel_ring_buffer *ring,
1452 uint32_t invalidate_domains, uint32_t flush_domains);
1453 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1454 int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1455 int i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1456 int i915_gem_idle(struct drm_device *dev);
1457 int i915_gem_init_hw(struct drm_device *dev);
1458 void i915_gem_init_swizzling(struct drm_device *dev);
1459 void i915_gem_init_ppgtt(struct drm_device *dev);
1460 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1461 int i915_gpu_idle(struct drm_device *dev, bool do_retire);
1462 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1463 struct intel_ring_buffer *ring, uint32_t seqno);
1464 int i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
1465 struct drm_i915_gem_request *request);
1466 int i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1467 struct intel_ring_buffer *pipelined);
1468 void i915_gem_reset(struct drm_device *dev);
1469 int i915_wait_request(struct intel_ring_buffer *ring, uint32_t seqno,
1471 int i915_gem_mmap(struct drm_device *dev, uint64_t offset, int prot);
1472 int i915_gem_fault(struct drm_device *dev, uint64_t offset, int prot,
1474 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1475 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1476 enum i915_cache_level cache_level);
1478 void i915_gem_free_all_phys_object(struct drm_device *dev);
1479 void i915_gem_detach_phys_object(struct drm_device *dev,
1480 struct drm_i915_gem_object *obj);
1481 int i915_gem_attach_phys_object(struct drm_device *dev,
1482 struct drm_i915_gem_object *obj, int id, int align);
1484 int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
1485 struct drm_mode_create_dumb *args);
1486 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1487 uint32_t handle, uint64_t *offset);
1488 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1491 /* i915_gem_tiling.c */
1492 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1493 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1494 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1496 /* i915_gem_evict.c */
1497 int i915_gem_evict_something(struct drm_device *dev, int min_size,
1498 unsigned alignment, bool mappable);
1499 int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
1500 int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
1502 /* i915_suspend.c */
1503 extern int i915_save_state(struct drm_device *dev);
1504 extern int i915_restore_state(struct drm_device *dev);
1507 extern int intel_setup_gmbus(struct drm_device *dev);
1508 extern void intel_teardown_gmbus(struct drm_device *dev);
1509 extern void intel_gmbus_set_speed(device_t idev, int speed);
1510 extern void intel_gmbus_force_bit(device_t idev, bool force_bit);
1511 extern void intel_iic_reset(struct drm_device *dev);
1513 /* intel_opregion.c */
1514 int intel_opregion_setup(struct drm_device *dev);
1515 extern int intel_opregion_init(struct drm_device *dev);
1516 extern void intel_opregion_fini(struct drm_device *dev);
1517 extern void opregion_asle_intr(struct drm_device *dev);
1518 extern void intel_opregion_gse_intr(struct drm_device *dev);
1519 extern void opregion_enable_asle(struct drm_device *dev);
1521 /* i915_gem_gtt.c */
1522 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1523 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1524 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1525 struct drm_i915_gem_object *obj, enum i915_cache_level cache_level);
1526 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1527 struct drm_i915_gem_object *obj);
1529 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1530 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1531 enum i915_cache_level cache_level);
1532 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1535 extern void intel_modeset_init(struct drm_device *dev);
1536 extern void intel_modeset_gem_init(struct drm_device *dev);
1537 extern void intel_modeset_cleanup(struct drm_device *dev);
1538 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1539 extern void intel_disable_fbc(struct drm_device *dev);
1540 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1541 extern void ironlake_init_pch_refclk(struct drm_device *dev);
1542 extern void ironlake_enable_rc6(struct drm_device *dev);
1543 extern void gen6_set_rps(struct drm_device *dev, u8 val);
1544 extern void intel_detect_pch(struct drm_device *dev);
1545 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1547 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(
1548 struct drm_device *dev);
1549 extern void intel_overlay_print_error_state(struct sbuf *m,
1550 struct intel_overlay_error_state *error);
1551 extern struct intel_display_error_state *intel_display_capture_error_state(
1552 struct drm_device *dev);
1553 extern void intel_display_print_error_state(struct sbuf *m,
1554 struct drm_device *dev, struct intel_display_error_state *error);
1557 trace_i915_reg_rw(boolean_t rw, int reg, uint64_t val, int sz)
1562 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1564 #define BEGIN_LP_RING(n) \
1565 intel_ring_begin(LP_RING(dev_priv), (n))
1567 #define OUT_RING(x) \
1568 intel_ring_emit(LP_RING(dev_priv), x)
1570 #define ADVANCE_LP_RING() \
1571 intel_ring_advance(LP_RING(dev_priv))
1573 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1574 if (LP_RING(dev->dev_private)->obj == NULL) \
1575 LOCK_TEST_WITH_RETURN(dev, file); \
1578 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
1579 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1580 #define I915_GEM_HWS_INDEX 0x20
1581 #define I915_BREADCRUMB_INDEX 0x21
1583 const struct intel_device_info *i915_get_device_id(int device);
1585 int i915_reset(struct drm_device *dev, u8 flags);
1588 int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1589 struct sysctl_oid *top);
1592 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1595 return ((int32_t)(seq1 - seq2) >= 0);
1598 u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1600 /* On SNB platform, before reading ring registers forcewake bit
1601 * must be set to prevent GT core from power down and stale values being
1604 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1605 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1606 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1608 /* We give fast paths for the really cool registers */
1609 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
1610 (((dev_priv)->info->gen >= 6) && \
1611 ((reg) < 0x40000) && \
1612 ((reg) != FORCEWAKE))
1614 #define __i915_read(x, y) \
1615 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1623 #define __i915_write(x, y) \
1624 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1627 __i915_write(16, 16)
1628 __i915_write(32, 32)
1629 __i915_write(64, 64)
1632 #define I915_READ8(reg) i915_read8(dev_priv, (reg))
1633 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1635 #define I915_READ16(reg) i915_read16(dev_priv, (reg))
1636 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1637 #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg))
1638 #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
1640 #define I915_READ(reg) i915_read32(dev_priv, (reg))
1641 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1642 #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg))
1643 #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
1645 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1646 #define I915_READ64(reg) i915_read64(dev_priv, (reg))
1648 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1649 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)