2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "intel_drv.h"
30 #include <linux/highmem.h>
32 typedef uint32_t gen6_gtt_pte_t;
35 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
37 #define GEN6_PDE_VALID (1 << 0)
38 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
39 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
41 #define GEN6_PTE_VALID (1 << 0)
42 #define GEN6_PTE_UNCACHED (1 << 1)
43 #define HSW_PTE_UNCACHED (0)
44 #define GEN6_PTE_CACHE_LLC (2 << 1)
45 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
46 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
48 static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
50 enum i915_cache_level level)
52 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
53 pte |= GEN6_PTE_ADDR_ENCODE(addr);
56 case I915_CACHE_LLC_MLC:
57 /* Haswell doesn't set L3 this way */
59 pte |= GEN6_PTE_CACHE_LLC;
61 pte |= GEN6_PTE_CACHE_LLC_MLC;
64 pte |= GEN6_PTE_CACHE_LLC;
68 pte |= HSW_PTE_UNCACHED;
70 pte |= GEN6_PTE_UNCACHED;
79 static int gen6_ppgtt_enable(struct drm_device *dev)
81 drm_i915_private_t *dev_priv = dev->dev_private;
83 struct intel_ring_buffer *ring;
84 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
85 uint32_t pd_entry, first_pd_entry_in_global_pt;
88 first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
89 for (i = 0; i < ppgtt->num_pd_entries; i++) {
92 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
93 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
94 pd_entry |= GEN6_PDE_VALID;
96 intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
98 intel_gtt_read_pte(first_pd_entry_in_global_pt);
100 pd_offset = ppgtt->pd_offset;
101 pd_offset /= 64; /* in cachelines, */
104 if (INTEL_INFO(dev)->gen == 6) {
105 uint32_t ecochk, gab_ctl, ecobits;
107 ecobits = I915_READ(GAC_ECO_BITS);
108 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
109 ECOBITS_PPGTT_CACHE64B);
111 gab_ctl = I915_READ(GAB_CTL);
112 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
114 ecochk = I915_READ(GAM_ECOCHK);
115 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
116 ECOCHK_PPGTT_CACHE64B);
117 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
118 } else if (INTEL_INFO(dev)->gen >= 7) {
119 uint32_t ecochk, ecobits;
121 ecobits = I915_READ(GAC_ECO_BITS);
122 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
124 ecochk = I915_READ(GAM_ECOCHK);
125 if (IS_HASWELL(dev)) {
126 ecochk |= ECOCHK_PPGTT_WB_HSW;
128 ecochk |= ECOCHK_PPGTT_LLC_IVB;
129 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
131 I915_WRITE(GAM_ECOCHK, ecochk);
132 /* GFX_MODE is per-ring on gen7+ */
135 for_each_ring(ring, dev_priv, i) {
136 if (INTEL_INFO(dev)->gen >= 7)
137 I915_WRITE(RING_MODE_GEN7(ring),
138 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
140 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
141 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
146 /* PPGTT support for Sandybdrige/Gen6 and later */
147 static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
148 unsigned first_entry,
149 unsigned num_entries)
151 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
152 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
153 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
154 unsigned last_pte, i;
156 scratch_pte = gen6_pte_encode(ppgtt->dev,
157 ppgtt->scratch_page_dma_addr,
160 while (num_entries) {
161 last_pte = first_pte + num_entries;
162 if (last_pte > I915_PPGTT_PT_ENTRIES)
163 last_pte = I915_PPGTT_PT_ENTRIES;
165 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
167 for (i = first_pte; i < last_pte; i++)
168 pt_vaddr[i] = scratch_pte;
170 kunmap_atomic(pt_vaddr);
172 num_entries -= last_pte - first_pte;
178 static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
179 struct sg_table *pages,
180 unsigned first_entry,
181 enum i915_cache_level cache_level)
183 gen6_gtt_pte_t *pt_vaddr;
184 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
185 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
186 unsigned i, j, m, segment_len;
187 dma_addr_t page_addr;
188 struct scatterlist *sg;
190 /* init sg walking */
193 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
196 while (i < pages->nents) {
197 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
199 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
200 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
201 pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
204 /* grab the next page */
205 if (++m == segment_len) {
206 if (++i == pages->nents)
210 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
215 kunmap_atomic(pt_vaddr);
222 static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
227 if (ppgtt->pt_dma_addr) {
228 for (i = 0; i < ppgtt->num_pd_entries; i++)
229 pci_unmap_page(ppgtt->dev->pdev,
230 ppgtt->pt_dma_addr[i],
231 4096, PCI_DMA_BIDIRECTIONAL);
234 kfree(ppgtt->pt_dma_addr);
235 for (i = 0; i < ppgtt->num_pd_entries; i++)
236 __free_page(ppgtt->pt_pages[i]);
237 kfree(ppgtt->pt_pages);
242 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
244 struct drm_device *dev = ppgtt->dev;
245 struct drm_i915_private *dev_priv = dev->dev_private;
246 unsigned first_pd_entry_in_global_pt;
250 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
251 * entries. For aliasing ppgtt support we just steal them at the end for
254 first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
256 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
257 ppgtt->enable = gen6_ppgtt_enable;
258 ppgtt->clear_range = gen6_ppgtt_clear_range;
259 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
260 ppgtt->cleanup = gen6_ppgtt_cleanup;
261 ppgtt->pt_pages = kzalloc(sizeof(struct vm_page *)*ppgtt->num_pd_entries,
263 if (!ppgtt->pt_pages)
266 for (i = 0; i < ppgtt->num_pd_entries; i++) {
267 ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
268 VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
269 if (!ppgtt->pt_pages[i])
273 ppgtt->clear_range(ppgtt, 0,
274 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
276 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
281 dev_priv->mm.aliasing_ppgtt = ppgtt;
282 i915_gem_cleanup_aliasing_ppgtt(dev);
287 static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
289 struct drm_i915_private *dev_priv = dev->dev_private;
290 struct i915_hw_ppgtt *ppgtt;
293 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
298 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
300 if (INTEL_INFO(dev)->gen < 8)
301 ret = gen6_ppgtt_init(ppgtt);
308 dev_priv->mm.aliasing_ppgtt = ppgtt;
313 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
321 ppgtt->cleanup(ppgtt);
322 dev_priv->mm.aliasing_ppgtt = NULL;
326 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
328 struct drm_i915_private *dev_priv = dev->dev_private;
329 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
335 dev_priv->mm.aliasing_ppgtt = NULL;
337 for (i = 0; i < ppgtt->num_pd_entries; i++) {
338 m = ppgtt->pt_pages[i];
340 vm_page_busy_wait(m, FALSE, "i915gem");
341 vm_page_unwire(m, 0);
345 drm_free(ppgtt->pt_pages, M_DRM);
346 drm_free(ppgtt, M_DRM);
351 i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
352 unsigned num_entries, vm_page_t *pages, enum i915_cache_level cache_level)
355 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
356 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
357 unsigned last_pte, i;
358 dma_addr_t page_addr;
360 while (num_entries) {
361 last_pte = first_pte + num_entries;
362 if (last_pte > I915_PPGTT_PT_ENTRIES)
363 last_pte = I915_PPGTT_PT_ENTRIES;
365 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
367 for (i = first_pte; i < last_pte; i++) {
368 page_addr = VM_PAGE_TO_PHYS(*pages);
369 pt_vaddr[i] = gen6_pte_encode(ppgtt->dev, page_addr,
375 kunmap_atomic(pt_vaddr);
377 num_entries -= last_pte - first_pte;
383 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
384 struct drm_i915_gem_object *obj,
385 enum i915_cache_level cache_level)
387 i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
388 obj->base.size >> PAGE_SHIFT, obj->pages, cache_level);
391 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
392 struct drm_i915_gem_object *obj)
394 ppgtt->clear_range(ppgtt,
395 obj->gtt_space->start >> PAGE_SHIFT,
396 obj->base.size >> PAGE_SHIFT);
399 extern int intel_iommu_gfx_mapped;
400 /* Certain Gen5 chipsets require require idling the GPU before
401 * unmapping anything from the GTT when VT-d is enabled.
403 static inline bool needs_idle_maps(struct drm_device *dev)
405 #ifdef CONFIG_INTEL_IOMMU
406 /* Query intel_iommu to see if we need the workaround. Presumably that
409 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
415 static bool do_idling(struct drm_i915_private *dev_priv)
417 bool ret = dev_priv->mm.interruptible;
419 if (unlikely(dev_priv->gtt.do_idle_maps)) {
420 dev_priv->mm.interruptible = false;
421 if (i915_gpu_idle(dev_priv->dev)) {
422 DRM_ERROR("Couldn't idle GPU\n");
423 /* Wait a bit, in hopes it avoids the hang */
431 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
433 if (unlikely(dev_priv->gtt.do_idle_maps))
434 dev_priv->mm.interruptible = interruptible;
437 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
439 struct drm_i915_private *dev_priv = dev->dev_private;
440 struct drm_i915_gem_object *obj;
442 /* First fill our portion of the GTT with scratch pages */
443 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
444 dev_priv->gtt.total / PAGE_SIZE);
446 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
447 i915_gem_clflush_object(obj);
448 i915_gem_gtt_bind_object(obj, obj->cache_level);
451 i915_gem_chipset_flush(dev);
455 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
457 if (obj->has_dma_mapping)
460 if (!dma_map_sg(&obj->base.dev->pdev->dev,
461 obj->pages->sgl, obj->pages->nents,
462 PCI_DMA_BIDIRECTIONAL))
470 * Binds an object into the global gtt with the specified cache level. The object
471 * will be accessible to the GPU via commands whose operands reference offsets
472 * within the global GTT as well as accessible by the GPU through the GMADR
473 * mapped BAR (dev_priv->mm.gtt->gtt).
475 static void gen6_ggtt_insert_entries(struct drm_device *dev,
477 unsigned int first_entry,
478 enum i915_cache_level level)
481 struct drm_i915_private *dev_priv = dev->dev_private;
482 struct scatterlist *sg = st->sgl;
483 gtt_pte_t __iomem *gtt_entries =
484 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
486 unsigned int len, m = 0;
489 for_each_sg(st->sgl, sg, st->nents, unused) {
490 len = sg_dma_len(sg) >> PAGE_SHIFT;
491 for (m = 0; m < len; m++) {
492 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
493 iowrite32(gen6_pte_encode(dev, addr, level),
499 /* XXX: This serves as a posting read to make sure that the PTE has
500 * actually been updated. There is some concern that even though
501 * registers and PTEs are within the same BAR that they are potentially
502 * of NUMA access patterns. Therefore, even with the way we assume
503 * hardware should work, we must keep this posting read for paranoia.
506 WARN_ON(readl(>t_entries[i-1])
507 != gen6_pte_encode(dev, addr, level));
509 /* This next bit makes the above posting read even more important. We
510 * want to flush the TLBs only after we're certain all the PTE updates
513 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
514 POSTING_READ(GFX_FLSH_CNTL_GEN6);
518 static void gen6_ggtt_clear_range(struct drm_device *dev,
519 unsigned int first_entry,
520 unsigned int num_entries)
522 struct drm_i915_private *dev_priv = dev->dev_private;
523 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
524 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
525 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
528 if (WARN(num_entries > max_entries,
529 "First entry = %d; Num entries = %d (max=%d)\n",
530 first_entry, num_entries, max_entries))
531 num_entries = max_entries;
533 scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
535 for (i = 0; i < num_entries; i++)
536 iowrite32(scratch_pte, >t_base[i]);
540 static void i915_ggtt_insert_entries(struct drm_device *dev,
542 unsigned int pg_start,
543 enum i915_cache_level cache_level)
546 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
547 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
549 intel_gtt_insert_sg_entries(st, pg_start, flags);
553 static void i915_ggtt_clear_range(struct drm_device *dev,
554 unsigned int first_entry,
555 unsigned int num_entries)
557 intel_gtt_clear_range(first_entry, num_entries);
560 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
561 enum i915_cache_level cache_level)
563 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
564 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
565 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
566 obj->base.size >> PAGE_SHIFT, obj->pages, flags);
568 obj->has_global_gtt_mapping = 1;
571 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
573 struct drm_device *dev = obj->base.dev;
574 struct drm_i915_private *dev_priv = dev->dev_private;
576 dev_priv->gtt.gtt_clear_range(obj->base.dev,
577 obj->gtt_space->start >> PAGE_SHIFT,
578 obj->base.size >> PAGE_SHIFT);
580 obj->has_global_gtt_mapping = 0;
583 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
585 struct drm_device *dev = obj->base.dev;
586 struct drm_i915_private *dev_priv = dev->dev_private;
589 interruptible = do_idling(dev_priv);
592 if (!obj->has_dma_mapping)
593 dma_unmap_sg(&dev->pdev->dev,
594 obj->pages->sgl, obj->pages->nents,
595 PCI_DMA_BIDIRECTIONAL);
598 undo_idling(dev_priv, interruptible);
601 static void i915_gtt_color_adjust(struct drm_mm_node *node,
603 unsigned long *start,
606 if (node->color != color)
609 if (!list_empty(&node->node_list)) {
610 node = list_entry(node->node_list.next,
613 if (node->allocated && node->color != color)
617 void i915_gem_setup_global_gtt(struct drm_device *dev,
619 unsigned long mappable_end,
622 /* Let GEM Manage all of the aperture.
624 * However, leave one page at the end still bound to the scratch page.
625 * There are a number of places where the hardware apparently prefetches
626 * past the end of the object, and we've seen multiple hangs with the
627 * GPU head pointer stuck in a batchbuffer bound at the last page of the
628 * aperture. One page should be enough to keep any prefetching inside
631 drm_i915_private_t *dev_priv = dev->dev_private;
632 unsigned long mappable;
635 BUG_ON(mappable_end > end);
637 mappable = min(end, mappable_end) - start;
639 /* Substract the guard page ... */
640 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
642 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
644 dev_priv->gtt.start = start;
645 dev_priv->gtt.mappable_end = mappable_end;
646 dev_priv->gtt.total = end - start;
648 /* ... but ensure that we clear the entire range. */
649 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
650 device_printf(dev->dev,
651 "taking over the fictitious range 0x%lx-0x%lx\n",
652 dev->agp->base + start, dev->agp->base + start + mappable);
653 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
654 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
658 intel_enable_ppgtt(struct drm_device *dev)
660 if (i915_enable_ppgtt >= 0)
661 return i915_enable_ppgtt;
663 #ifdef CONFIG_INTEL_IOMMU
664 /* Disable ppgtt on SNB if VT-d is on. */
665 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
672 void i915_gem_init_global_gtt(struct drm_device *dev)
674 struct drm_i915_private *dev_priv = dev->dev_private;
675 unsigned long gtt_size, mappable_size;
677 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
678 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
680 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
683 if (INTEL_INFO(dev)->gen <= 7) {
684 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
685 * aperture accordingly when using aliasing ppgtt. */
686 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
689 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
691 ret = i915_gem_init_aliasing_ppgtt(dev);
695 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
696 drm_mm_takedown(&dev_priv->mm.gtt_space);
697 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
699 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
702 int i915_gem_gtt_init(struct drm_device *dev)
704 struct drm_i915_private *dev_priv = dev->dev_private;
706 /* On modern platforms we need not worry ourself with the legacy
707 * hostbridge query stuff. Skip it entirely
709 if (INTEL_INFO(dev)->gen < 6 || 1) {
710 dev_priv->mm.gtt = intel_gtt_get();
711 if (!dev_priv->mm.gtt) {
712 DRM_ERROR("Failed to initialize GTT\n");
716 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev);
718 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
719 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
724 dev_priv->mm.gtt = kmalloc(sizeof(*dev_priv->mm.gtt), M_DRM, M_WAITOK | M_ZERO);
725 if (!dev_priv->mm.gtt)
728 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
729 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
730 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
731 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
733 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
734 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;