2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "intel_drv.h"
30 #include <linux/highmem.h>
33 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35 #define GEN6_PDE_VALID (1 << 0)
36 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
37 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39 #define GEN6_PTE_VALID (1 << 0)
40 #define GEN6_PTE_UNCACHED (1 << 1)
41 #define HSW_PTE_UNCACHED (0)
42 #define GEN6_PTE_CACHE_LLC (2 << 1)
43 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
44 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46 static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
48 enum i915_cache_level level)
50 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
51 pte |= GEN6_PTE_ADDR_ENCODE(addr);
54 case I915_CACHE_LLC_MLC:
55 pte |= GEN6_PTE_CACHE_LLC_MLC;
58 pte |= GEN6_PTE_CACHE_LLC;
61 pte |= GEN6_PTE_UNCACHED;
70 #define BYT_PTE_WRITEABLE (1 << 1)
71 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
73 static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
75 enum i915_cache_level level)
77 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
78 pte |= GEN6_PTE_ADDR_ENCODE(addr);
80 /* Mark the page as writeable. Other platforms don't have a
81 * setting for read-only/writable, so this matches that behavior.
83 pte |= BYT_PTE_WRITEABLE;
85 if (level != I915_CACHE_NONE)
86 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
91 static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
93 enum i915_cache_level level)
95 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
96 pte |= GEN6_PTE_ADDR_ENCODE(addr);
98 if (level != I915_CACHE_NONE)
99 pte |= GEN6_PTE_CACHE_LLC;
104 static int gen6_ppgtt_enable(struct drm_device *dev)
106 drm_i915_private_t *dev_priv = dev->dev_private;
108 struct intel_ring_buffer *ring;
109 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
110 uint32_t pd_entry, first_pd_entry_in_global_pt;
113 WARN_ON(ppgtt->pd_offset & 0x3f);
115 first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
116 for (i = 0; i < ppgtt->num_pd_entries; i++) {
119 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
120 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
121 pd_entry |= GEN6_PDE_VALID;
123 intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
125 intel_gtt_read_pte(first_pd_entry_in_global_pt);
127 pd_offset = ppgtt->pd_offset;
128 pd_offset /= 64; /* in cachelines, */
131 if (INTEL_INFO(dev)->gen == 6) {
132 uint32_t ecochk, gab_ctl, ecobits;
134 ecobits = I915_READ(GAC_ECO_BITS);
135 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
136 ECOBITS_PPGTT_CACHE64B);
138 gab_ctl = I915_READ(GAB_CTL);
139 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
141 ecochk = I915_READ(GAM_ECOCHK);
142 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
143 ECOCHK_PPGTT_CACHE64B);
144 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
145 } else if (INTEL_INFO(dev)->gen >= 7) {
146 uint32_t ecochk, ecobits;
148 ecobits = I915_READ(GAC_ECO_BITS);
149 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
151 ecochk = I915_READ(GAM_ECOCHK);
152 if (IS_HASWELL(dev)) {
153 ecochk |= ECOCHK_PPGTT_WB_HSW;
155 ecochk |= ECOCHK_PPGTT_LLC_IVB;
156 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
158 I915_WRITE(GAM_ECOCHK, ecochk);
159 /* GFX_MODE is per-ring on gen7+ */
162 for_each_ring(ring, dev_priv, i) {
163 if (INTEL_INFO(dev)->gen >= 7)
164 I915_WRITE(RING_MODE_GEN7(ring),
165 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
167 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
168 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
173 /* PPGTT support for Sandybdrige/Gen6 and later */
174 static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
175 unsigned first_entry,
176 unsigned num_entries)
178 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
179 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
180 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
181 unsigned last_pte, i;
183 scratch_pte = ppgtt->pte_encode(ppgtt->dev,
184 ppgtt->scratch_page_dma_addr,
187 while (num_entries) {
188 last_pte = first_pte + num_entries;
189 if (last_pte > I915_PPGTT_PT_ENTRIES)
190 last_pte = I915_PPGTT_PT_ENTRIES;
192 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
194 for (i = first_pte; i < last_pte; i++)
195 pt_vaddr[i] = scratch_pte;
197 kunmap_atomic(pt_vaddr);
199 num_entries -= last_pte - first_pte;
205 static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
206 struct sg_table *pages,
207 unsigned first_entry,
208 enum i915_cache_level cache_level)
210 gen6_gtt_pte_t *pt_vaddr;
211 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
212 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
213 unsigned i, j, m, segment_len;
214 dma_addr_t page_addr;
215 struct scatterlist *sg;
217 /* init sg walking */
220 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
223 while (i < pages->nents) {
224 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
226 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
227 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
228 pt_vaddr[j] = ppgtt->pte_encode(ppgtt->dev, page_addr,
231 /* grab the next page */
232 if (++m == segment_len) {
233 if (++i == pages->nents)
237 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
242 kunmap_atomic(pt_vaddr);
249 static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
254 if (ppgtt->pt_dma_addr) {
255 for (i = 0; i < ppgtt->num_pd_entries; i++)
256 pci_unmap_page(ppgtt->dev->pdev,
257 ppgtt->pt_dma_addr[i],
258 4096, PCI_DMA_BIDIRECTIONAL);
261 kfree(ppgtt->pt_dma_addr);
262 for (i = 0; i < ppgtt->num_pd_entries; i++)
263 __free_page(ppgtt->pt_pages[i]);
264 kfree(ppgtt->pt_pages);
269 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
271 struct drm_device *dev = ppgtt->dev;
272 struct drm_i915_private *dev_priv = dev->dev_private;
273 unsigned first_pd_entry_in_global_pt;
277 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
278 * entries. For aliasing ppgtt support we just steal them at the end for
281 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
283 if (IS_HASWELL(dev)) {
284 ppgtt->pte_encode = hsw_pte_encode;
285 } else if (IS_VALLEYVIEW(dev)) {
286 ppgtt->pte_encode = byt_pte_encode;
288 ppgtt->pte_encode = gen6_pte_encode;
290 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
291 ppgtt->enable = gen6_ppgtt_enable;
292 ppgtt->clear_range = gen6_ppgtt_clear_range;
293 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
294 ppgtt->cleanup = gen6_ppgtt_cleanup;
295 ppgtt->pt_pages = kzalloc(sizeof(struct vm_page *)*ppgtt->num_pd_entries,
297 if (!ppgtt->pt_pages)
300 for (i = 0; i < ppgtt->num_pd_entries; i++) {
301 ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
302 VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
303 if (!ppgtt->pt_pages[i])
307 ppgtt->clear_range(ppgtt, 0,
308 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
310 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
315 dev_priv->mm.aliasing_ppgtt = ppgtt;
316 i915_gem_cleanup_aliasing_ppgtt(dev);
321 static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 struct i915_hw_ppgtt *ppgtt;
327 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
332 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
334 if (INTEL_INFO(dev)->gen < 8)
335 ret = gen6_ppgtt_init(ppgtt);
342 dev_priv->mm.aliasing_ppgtt = ppgtt;
347 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
355 ppgtt->cleanup(ppgtt);
356 dev_priv->mm.aliasing_ppgtt = NULL;
360 i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
361 unsigned num_entries, vm_page_t *pages, enum i915_cache_level cache_level)
364 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
365 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
366 unsigned last_pte, i;
367 dma_addr_t page_addr;
369 while (num_entries) {
370 last_pte = first_pte + num_entries;
371 if (last_pte > I915_PPGTT_PT_ENTRIES)
372 last_pte = I915_PPGTT_PT_ENTRIES;
374 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
376 for (i = first_pte; i < last_pte; i++) {
377 page_addr = VM_PAGE_TO_PHYS(*pages);
378 pt_vaddr[i] = ppgtt->pte_encode(ppgtt->dev, page_addr,
384 kunmap_atomic(pt_vaddr);
386 num_entries -= last_pte - first_pte;
392 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
393 struct drm_i915_gem_object *obj,
394 enum i915_cache_level cache_level)
396 i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
397 obj->base.size >> PAGE_SHIFT, obj->pages, cache_level);
400 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
401 struct drm_i915_gem_object *obj)
403 ppgtt->clear_range(ppgtt,
404 obj->gtt_space->start >> PAGE_SHIFT,
405 obj->base.size >> PAGE_SHIFT);
408 extern int intel_iommu_gfx_mapped;
409 /* Certain Gen5 chipsets require require idling the GPU before
410 * unmapping anything from the GTT when VT-d is enabled.
412 static inline bool needs_idle_maps(struct drm_device *dev)
414 #ifdef CONFIG_INTEL_IOMMU
415 /* Query intel_iommu to see if we need the workaround. Presumably that
418 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
424 static bool do_idling(struct drm_i915_private *dev_priv)
426 bool ret = dev_priv->mm.interruptible;
428 if (unlikely(dev_priv->gtt.do_idle_maps)) {
429 dev_priv->mm.interruptible = false;
430 if (i915_gpu_idle(dev_priv->dev)) {
431 DRM_ERROR("Couldn't idle GPU\n");
432 /* Wait a bit, in hopes it avoids the hang */
440 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
442 if (unlikely(dev_priv->gtt.do_idle_maps))
443 dev_priv->mm.interruptible = interruptible;
446 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
448 struct drm_i915_private *dev_priv = dev->dev_private;
449 struct drm_i915_gem_object *obj;
451 /* First fill our portion of the GTT with scratch pages */
452 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
453 dev_priv->gtt.total / PAGE_SIZE);
455 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
456 i915_gem_clflush_object(obj);
457 i915_gem_gtt_bind_object(obj, obj->cache_level);
460 i915_gem_chipset_flush(dev);
464 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
466 if (obj->has_dma_mapping)
469 if (!dma_map_sg(&obj->base.dev->pdev->dev,
470 obj->pages->sgl, obj->pages->nents,
471 PCI_DMA_BIDIRECTIONAL))
479 * Binds an object into the global gtt with the specified cache level. The object
480 * will be accessible to the GPU via commands whose operands reference offsets
481 * within the global GTT as well as accessible by the GPU through the GMADR
482 * mapped BAR (dev_priv->mm.gtt->gtt).
485 static void gen6_ggtt_insert_entries(struct drm_device *dev,
487 unsigned int first_entry,
488 enum i915_cache_level level)
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 gen6_gtt_pte_t __iomem *gtt_entries =
492 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
494 struct sg_page_iter sg_iter;
497 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
498 addr = sg_page_iter_dma_address(&sg_iter);
499 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
504 /* XXX: This serves as a posting read to make sure that the PTE has
505 * actually been updated. There is some concern that even though
506 * registers and PTEs are within the same BAR that they are potentially
507 * of NUMA access patterns. Therefore, even with the way we assume
508 * hardware should work, we must keep this posting read for paranoia.
511 WARN_ON(readl(>t_entries[i-1])
512 != dev_priv->gtt.pte_encode(dev, addr, level));
514 /* This next bit makes the above posting read even more important. We
515 * want to flush the TLBs only after we're certain all the PTE updates
518 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
519 POSTING_READ(GFX_FLSH_CNTL_GEN6);
522 static void gen6_ggtt_clear_range(struct drm_device *dev,
523 unsigned int first_entry,
524 unsigned int num_entries)
526 struct drm_i915_private *dev_priv = dev->dev_private;
527 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
528 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
529 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
532 if (WARN(num_entries > max_entries,
533 "First entry = %d; Num entries = %d (max=%d)\n",
534 first_entry, num_entries, max_entries))
535 num_entries = max_entries;
537 scratch_pte = dev_priv->gtt.pte_encode(dev,
538 dev_priv->gtt.scratch_page_dma,
540 for (i = 0; i < num_entries; i++)
541 iowrite32(scratch_pte, >t_base[i]);
546 static void i915_ggtt_insert_entries(struct drm_device *dev,
548 unsigned int pg_start,
549 enum i915_cache_level cache_level)
552 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
553 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
555 intel_gtt_insert_sg_entries(st, pg_start, flags);
559 static void i915_ggtt_clear_range(struct drm_device *dev,
560 unsigned int first_entry,
561 unsigned int num_entries)
563 intel_gtt_clear_range(first_entry, num_entries);
566 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
567 enum i915_cache_level cache_level)
569 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
570 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
571 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
572 obj->base.size >> PAGE_SHIFT, obj->pages, flags);
574 obj->has_global_gtt_mapping = 1;
577 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
579 struct drm_device *dev = obj->base.dev;
580 struct drm_i915_private *dev_priv = dev->dev_private;
582 dev_priv->gtt.gtt_clear_range(obj->base.dev,
583 obj->gtt_space->start >> PAGE_SHIFT,
584 obj->base.size >> PAGE_SHIFT);
586 obj->has_global_gtt_mapping = 0;
589 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
591 struct drm_device *dev = obj->base.dev;
592 struct drm_i915_private *dev_priv = dev->dev_private;
595 interruptible = do_idling(dev_priv);
598 if (!obj->has_dma_mapping)
599 dma_unmap_sg(&dev->pdev->dev,
600 obj->pages->sgl, obj->pages->nents,
601 PCI_DMA_BIDIRECTIONAL);
604 undo_idling(dev_priv, interruptible);
607 static void i915_gtt_color_adjust(struct drm_mm_node *node,
609 unsigned long *start,
612 if (node->color != color)
615 if (!list_empty(&node->node_list)) {
616 node = list_entry(node->node_list.next,
619 if (node->allocated && node->color != color)
623 void i915_gem_setup_global_gtt(struct drm_device *dev,
625 unsigned long mappable_end,
628 /* Let GEM Manage all of the aperture.
630 * However, leave one page at the end still bound to the scratch page.
631 * There are a number of places where the hardware apparently prefetches
632 * past the end of the object, and we've seen multiple hangs with the
633 * GPU head pointer stuck in a batchbuffer bound at the last page of the
634 * aperture. One page should be enough to keep any prefetching inside
637 drm_i915_private_t *dev_priv = dev->dev_private;
638 unsigned long mappable;
641 BUG_ON(mappable_end > end);
643 mappable = min(end, mappable_end) - start;
645 /* Substract the guard page ... */
646 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
648 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
650 dev_priv->gtt.start = start;
651 dev_priv->gtt.mappable_end = mappable_end;
652 dev_priv->gtt.total = end - start;
654 /* ... but ensure that we clear the entire range. */
655 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
656 device_printf(dev->dev,
657 "taking over the fictitious range 0x%lx-0x%lx\n",
658 dev->agp->base + start, dev->agp->base + start + mappable);
659 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
660 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
664 intel_enable_ppgtt(struct drm_device *dev)
666 if (i915_enable_ppgtt >= 0)
667 return i915_enable_ppgtt;
669 #ifdef CONFIG_INTEL_IOMMU
670 /* Disable ppgtt on SNB if VT-d is on. */
671 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
678 void i915_gem_init_global_gtt(struct drm_device *dev)
680 struct drm_i915_private *dev_priv = dev->dev_private;
681 unsigned long gtt_size, mappable_size;
683 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
684 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
686 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
689 if (INTEL_INFO(dev)->gen <= 7) {
690 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
691 * aperture accordingly when using aliasing ppgtt. */
692 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
695 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
697 ret = i915_gem_init_aliasing_ppgtt(dev);
701 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
702 drm_mm_takedown(&dev_priv->mm.gtt_space);
703 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
705 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
709 static int gen6_gmch_probe(struct drm_device *dev,
712 phys_addr_t *mappable_base,
713 unsigned long *mappable_end)
715 struct drm_i915_private *dev_priv = dev->dev_private;
716 phys_addr_t gtt_bus_addr;
717 unsigned int gtt_size;
721 *mappable_base = pci_resource_start(dev->pdev, 2);
722 *mappable_end = pci_resource_len(dev->pdev, 2);
724 /* 64/512MB is the current min/max we actually know of, but this is just
725 * a coarse sanity check.
727 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
728 DRM_ERROR("Unknown GMADR size (%lx)\n",
729 dev_priv->gtt.mappable_end);
733 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
734 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
735 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
736 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
738 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
739 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
741 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
743 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
745 /* For Modern GENs the PTEs and register space are split in the BAR */
746 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
747 (pci_resource_len(dev->pdev, 0) / 2);
749 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
750 if (!dev_priv->gtt.gsm) {
751 DRM_ERROR("Failed to map the gtt page table\n");
755 ret = setup_scratch_page(dev);
757 DRM_ERROR("Scratch setup failed\n");
759 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
760 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
765 static void gen6_gmch_remove(struct drm_device *dev)
767 struct drm_i915_private *dev_priv = dev->dev_private;
768 iounmap(dev_priv->gtt.gsm);
769 teardown_scratch_page(dev_priv->dev);
773 static int i915_gmch_probe(struct drm_device *dev,
776 phys_addr_t *mappable_base,
777 unsigned long *mappable_end)
782 static void i915_gmch_remove(struct drm_device *dev)
786 int i915_gem_gtt_init(struct drm_device *dev)
788 struct drm_i915_private *dev_priv = dev->dev_private;
790 /* XXX Legacy agp stuff */
791 dev_priv->mm.gtt = intel_gtt_get();
792 if (!dev_priv->mm.gtt) {
793 DRM_ERROR("Failed to initialize GTT\n");
797 if (INTEL_INFO(dev)->gen <= 5 || 1) {
798 dev_priv->gtt.gtt_probe = i915_gmch_probe;
799 dev_priv->gtt.gtt_remove = i915_gmch_remove;
801 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev);
803 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
804 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
807 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
808 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
810 if (IS_HASWELL(dev)) {
811 dev_priv->gtt.pte_encode = hsw_pte_encode;
812 } else if (IS_VALLEYVIEW(dev)) {
813 dev_priv->gtt.pte_encode = byt_pte_encode;
815 dev_priv->gtt.pte_encode = gen6_pte_encode;
821 dev_priv->mm.gtt = kmalloc(sizeof(*dev_priv->mm.gtt), M_DRM, M_WAITOK | M_ZERO);
822 if (!dev_priv->mm.gtt)
825 /* GMADR is the PCI mmio aperture into the global GTT. */
826 DRM_INFO("Memory usable by graphics device = %zdM\n",
827 dev_priv->gtt.total >> 20);
828 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
829 dev_priv->gtt.mappable_end >> 20);
830 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
831 dev_priv->gtt.stolen_size >> 20);