2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/seq_file.h>
28 #include <drm/i915_drm.h>
30 #include "i915_vgpu.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
34 #include <linux/bitmap.h>
36 #include <sys/mplock2.h>
39 * DOC: Global GTT views
41 * Background and previous state
43 * Historically objects could exists (be bound) in global GTT space only as
44 * singular instances with a view representing all of the object's backing pages
45 * in a linear fashion. This view will be called a normal view.
47 * To support multiple views of the same object, where the number of mapped
48 * pages is not equal to the backing store, or where the layout of the pages
49 * is not linear, concept of a GGTT view was added.
51 * One example of an alternative view is a stereo display driven by a single
52 * image. In this case we would have a framebuffer looking like this
58 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
59 * rendering. In contrast, fed to the display engine would be an alternative
60 * view which could look something like this:
65 * In this example both the size and layout of pages in the alternative view is
66 * different from the normal view.
68 * Implementation and usage
70 * GGTT views are implemented using VMAs and are distinguished via enum
71 * i915_ggtt_view_type and struct i915_ggtt_view.
73 * A new flavour of core GEM functions which work with GGTT bound objects were
74 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
75 * renaming in large amounts of code. They take the struct i915_ggtt_view
76 * parameter encapsulating all metadata required to implement a view.
78 * As a helper for callers which are only interested in the normal view,
79 * globally const i915_ggtt_view_normal singleton instance exists. All old core
80 * GEM API functions, the ones not taking the view parameter, are operating on,
81 * or with the normal GGTT view.
83 * Code wanting to add or use a new GGTT view needs to:
85 * 1. Add a new enum with a suitable name.
86 * 2. Extend the metadata in the i915_ggtt_view structure if required.
87 * 3. Add support to i915_get_vma_pages().
89 * New views are required to build a scatter-gather table from within the
90 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
91 * exists for the lifetime of an VMA.
93 * Core API is designed to have copy semantics which means that passed in
94 * struct i915_ggtt_view does not need to be persistent (left around after
95 * calling the core API functions).
100 i915_get_ggtt_vma_pages(struct i915_vma *vma);
102 const struct i915_ggtt_view i915_ggtt_view_normal;
103 const struct i915_ggtt_view i915_ggtt_view_rotated = {
104 .type = I915_GGTT_VIEW_ROTATED
107 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
109 bool has_aliasing_ppgtt;
111 bool has_full_48bit_ppgtt;
113 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
114 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
115 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
117 if (intel_vgpu_active(dev))
118 has_full_ppgtt = false; /* emulation is too hard */
121 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
122 * execlists, the sole mechanism available to submit work.
124 if (INTEL_INFO(dev)->gen < 9 &&
125 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
128 if (enable_ppgtt == 1)
131 if (enable_ppgtt == 2 && has_full_ppgtt)
134 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
137 #ifdef CONFIG_INTEL_IOMMU
138 /* Disable ppgtt on SNB if VT-d is on. */
139 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
140 DRM_INFO("Disabling PPGTT because VT-d is on\n");
145 /* Early VLV doesn't have this */
146 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
147 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
151 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
152 return has_full_48bit_ppgtt ? 3 : 2;
154 return has_aliasing_ppgtt ? 1 : 0;
157 static int ppgtt_bind_vma(struct i915_vma *vma,
158 enum i915_cache_level cache_level,
163 /* Currently applicable only to VLV */
165 pte_flags |= PTE_READ_ONLY;
167 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
168 cache_level, pte_flags);
173 static void ppgtt_unbind_vma(struct i915_vma *vma)
175 vma->vm->clear_range(vma->vm,
181 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
182 enum i915_cache_level level,
185 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
189 case I915_CACHE_NONE:
190 pte |= PPAT_UNCACHED_INDEX;
193 pte |= PPAT_DISPLAY_ELLC_INDEX;
196 pte |= PPAT_CACHED_INDEX;
203 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
204 const enum i915_cache_level level)
206 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
208 if (level != I915_CACHE_NONE)
209 pde |= PPAT_CACHED_PDE_INDEX;
211 pde |= PPAT_UNCACHED_INDEX;
215 #define gen8_pdpe_encode gen8_pde_encode
216 #define gen8_pml4e_encode gen8_pde_encode
218 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
219 enum i915_cache_level level,
220 bool valid, u32 unused)
222 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
223 pte |= GEN6_PTE_ADDR_ENCODE(addr);
226 case I915_CACHE_L3_LLC:
228 pte |= GEN6_PTE_CACHE_LLC;
230 case I915_CACHE_NONE:
231 pte |= GEN6_PTE_UNCACHED;
240 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
241 enum i915_cache_level level,
242 bool valid, u32 unused)
244 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
245 pte |= GEN6_PTE_ADDR_ENCODE(addr);
248 case I915_CACHE_L3_LLC:
249 pte |= GEN7_PTE_CACHE_L3_LLC;
252 pte |= GEN6_PTE_CACHE_LLC;
254 case I915_CACHE_NONE:
255 pte |= GEN6_PTE_UNCACHED;
264 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
265 enum i915_cache_level level,
266 bool valid, u32 flags)
268 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
269 pte |= GEN6_PTE_ADDR_ENCODE(addr);
271 if (!(flags & PTE_READ_ONLY))
272 pte |= BYT_PTE_WRITEABLE;
274 if (level != I915_CACHE_NONE)
275 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
280 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
281 enum i915_cache_level level,
282 bool valid, u32 unused)
284 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
285 pte |= HSW_PTE_ADDR_ENCODE(addr);
287 if (level != I915_CACHE_NONE)
288 pte |= HSW_WB_LLC_AGE3;
293 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
294 enum i915_cache_level level,
295 bool valid, u32 unused)
297 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
298 pte |= HSW_PTE_ADDR_ENCODE(addr);
301 case I915_CACHE_NONE:
304 pte |= HSW_WT_ELLC_LLC_AGE3;
307 pte |= HSW_WB_ELLC_LLC_AGE3;
314 static int __setup_page_dma(struct drm_device *dev,
315 struct i915_page_dma *p, gfp_t flags)
317 struct device *device = &dev->pdev->dev;
319 p->page = alloc_page(flags);
323 p->daddr = dma_map_page(device,
324 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
326 if (dma_mapping_error(device, p->daddr)) {
327 __free_page(p->page);
334 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
336 return __setup_page_dma(dev, p, GFP_KERNEL);
339 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
341 if (WARN_ON(!p->page))
344 dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
345 __free_page(p->page);
346 memset(p, 0, sizeof(*p));
349 static void *kmap_page_dma(struct i915_page_dma *p)
351 return kmap_atomic(p->page);
354 /* We use the flushing unmap only with ppgtt structures:
355 * page directories, page tables and scratch pages.
357 static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
359 /* There are only few exceptions for gen >=6. chv and bxt.
360 * And we are not sure about the latter so play safe for now.
362 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
363 drm_clflush_virt_range(vaddr, PAGE_SIZE);
365 kunmap_atomic(vaddr);
368 #define kmap_px(px) kmap_page_dma(px_base(px))
369 #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
371 #define setup_px(dev, px) setup_page_dma((dev), px_base(px))
372 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
373 #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
374 #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
376 static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
380 uint64_t * const vaddr = kmap_page_dma(p);
382 for (i = 0; i < 512; i++)
385 kunmap_page_dma(dev, vaddr);
388 static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
389 const uint32_t val32)
395 fill_page_dma(dev, p, v);
398 static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
400 struct i915_page_scratch *sp;
403 sp = kzalloc(sizeof(*sp), GFP_KERNEL);
405 return ERR_PTR(-ENOMEM);
407 ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
413 set_pages_uc(px_page(sp), 1);
418 static void free_scratch_page(struct drm_device *dev,
419 struct i915_page_scratch *sp)
421 set_pages_wb(px_page(sp), 1);
427 static struct i915_page_table *alloc_pt(struct drm_device *dev)
429 struct i915_page_table *pt;
430 const size_t count = INTEL_INFO(dev)->gen >= 8 ?
431 GEN8_PTES : GEN6_PTES;
434 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
436 return ERR_PTR(-ENOMEM);
438 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
444 ret = setup_px(dev, pt);
451 kfree(pt->used_ptes);
458 static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
461 kfree(pt->used_ptes);
465 static void gen8_initialize_pt(struct i915_address_space *vm,
466 struct i915_page_table *pt)
468 gen8_pte_t scratch_pte;
470 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
471 I915_CACHE_LLC, true);
473 fill_px(vm->dev, pt, scratch_pte);
476 static void gen6_initialize_pt(struct i915_address_space *vm,
477 struct i915_page_table *pt)
479 gen6_pte_t scratch_pte;
481 WARN_ON(px_dma(vm->scratch_page) == 0);
483 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
484 I915_CACHE_LLC, true, 0);
486 fill32_px(vm->dev, pt, scratch_pte);
489 static struct i915_page_directory *alloc_pd(struct drm_device *dev)
491 struct i915_page_directory *pd;
494 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
496 return ERR_PTR(-ENOMEM);
498 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
499 sizeof(*pd->used_pdes), GFP_KERNEL);
503 ret = setup_px(dev, pd);
510 kfree(pd->used_pdes);
517 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
521 kfree(pd->used_pdes);
526 static void gen8_initialize_pd(struct i915_address_space *vm,
527 struct i915_page_directory *pd)
529 gen8_pde_t scratch_pde;
531 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
533 fill_px(vm->dev, pd, scratch_pde);
536 static int __pdp_init(struct drm_device *dev,
537 struct i915_page_directory_pointer *pdp)
539 size_t pdpes = I915_PDPES_PER_PDP(dev);
541 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
542 sizeof(unsigned long),
544 if (!pdp->used_pdpes)
547 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
549 if (!pdp->page_directory) {
550 kfree(pdp->used_pdpes);
551 /* the PDP might be the statically allocated top level. Keep it
552 * as clean as possible */
553 pdp->used_pdpes = NULL;
560 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
562 kfree(pdp->used_pdpes);
563 kfree(pdp->page_directory);
564 pdp->page_directory = NULL;
568 i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
570 struct i915_page_directory_pointer *pdp;
573 WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
575 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
577 return ERR_PTR(-ENOMEM);
579 ret = __pdp_init(dev, pdp);
583 ret = setup_px(dev, pdp);
597 static void free_pdp(struct drm_device *dev,
598 struct i915_page_directory_pointer *pdp)
601 if (USES_FULL_48BIT_PPGTT(dev)) {
602 cleanup_px(dev, pdp);
607 static void gen8_initialize_pdp(struct i915_address_space *vm,
608 struct i915_page_directory_pointer *pdp)
610 gen8_ppgtt_pdpe_t scratch_pdpe;
612 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
614 fill_px(vm->dev, pdp, scratch_pdpe);
617 static void gen8_initialize_pml4(struct i915_address_space *vm,
618 struct i915_pml4 *pml4)
620 gen8_ppgtt_pml4e_t scratch_pml4e;
622 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
625 fill_px(vm->dev, pml4, scratch_pml4e);
629 gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
630 struct i915_page_directory_pointer *pdp,
631 struct i915_page_directory *pd,
634 gen8_ppgtt_pdpe_t *page_directorypo;
636 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
639 page_directorypo = kmap_px(pdp);
640 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
641 kunmap_px(ppgtt, page_directorypo);
645 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
646 struct i915_pml4 *pml4,
647 struct i915_page_directory_pointer *pdp,
650 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
652 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
653 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
654 kunmap_px(ppgtt, pagemap);
657 /* Broadwell Page Directory Pointer Descriptors */
658 static int gen8_write_pdp(struct drm_i915_gem_request *req,
662 struct intel_engine_cs *ring = req->ring;
667 ret = intel_ring_begin(req, 6);
671 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
672 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
673 intel_ring_emit(ring, upper_32_bits(addr));
674 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
675 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
676 intel_ring_emit(ring, lower_32_bits(addr));
677 intel_ring_advance(ring);
682 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
683 struct drm_i915_gem_request *req)
687 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
688 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
690 ret = gen8_write_pdp(req, i, pd_daddr);
698 static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
699 struct drm_i915_gem_request *req)
701 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
704 static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
705 struct i915_page_directory_pointer *pdp,
708 gen8_pte_t scratch_pte)
710 struct i915_hw_ppgtt *ppgtt =
711 container_of(vm, struct i915_hw_ppgtt, base);
712 gen8_pte_t *pt_vaddr;
713 unsigned pdpe = gen8_pdpe_index(start);
714 unsigned pde = gen8_pde_index(start);
715 unsigned pte = gen8_pte_index(start);
716 unsigned num_entries = length >> PAGE_SHIFT;
717 unsigned last_pte, i;
722 while (num_entries) {
723 struct i915_page_directory *pd;
724 struct i915_page_table *pt;
726 if (WARN_ON(!pdp->page_directory[pdpe]))
729 pd = pdp->page_directory[pdpe];
731 if (WARN_ON(!pd->page_table[pde]))
734 pt = pd->page_table[pde];
736 if (WARN_ON(!px_page(pt)))
739 last_pte = pte + num_entries;
740 if (last_pte > GEN8_PTES)
741 last_pte = GEN8_PTES;
743 pt_vaddr = kmap_px(pt);
745 for (i = pte; i < last_pte; i++) {
746 pt_vaddr[i] = scratch_pte;
750 kunmap_px(ppgtt, pt);
753 if (++pde == I915_PDES) {
754 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
761 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
766 struct i915_hw_ppgtt *ppgtt =
767 container_of(vm, struct i915_hw_ppgtt, base);
768 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
769 I915_CACHE_LLC, use_scratch);
771 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
772 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
776 struct i915_page_directory_pointer *pdp;
778 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
779 gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
786 gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
787 struct i915_page_directory_pointer *pdp,
788 struct sg_page_iter *sg_iter,
790 enum i915_cache_level cache_level)
792 struct i915_hw_ppgtt *ppgtt =
793 container_of(vm, struct i915_hw_ppgtt, base);
794 gen8_pte_t *pt_vaddr;
795 unsigned pdpe = gen8_pdpe_index(start);
796 unsigned pde = gen8_pde_index(start);
797 unsigned pte = gen8_pte_index(start);
801 while (__sg_page_iter_next(sg_iter)) {
802 if (pt_vaddr == NULL) {
803 struct i915_page_directory *pd = pdp->page_directory[pdpe];
804 struct i915_page_table *pt = pd->page_table[pde];
805 pt_vaddr = kmap_px(pt);
809 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
811 if (++pte == GEN8_PTES) {
812 kunmap_px(ppgtt, pt_vaddr);
814 if (++pde == I915_PDES) {
815 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
824 kunmap_px(ppgtt, pt_vaddr);
827 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
828 struct sg_table *pages,
830 enum i915_cache_level cache_level,
833 struct i915_hw_ppgtt *ppgtt =
834 container_of(vm, struct i915_hw_ppgtt, base);
835 struct sg_page_iter sg_iter;
837 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
839 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
840 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
843 struct i915_page_directory_pointer *pdp;
845 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
847 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
848 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
854 static void gen8_free_page_tables(struct drm_device *dev,
855 struct i915_page_directory *pd)
862 for_each_set_bit(i, pd->used_pdes, I915_PDES) {
863 if (WARN_ON(!pd->page_table[i]))
866 free_pt(dev, pd->page_table[i]);
867 pd->page_table[i] = NULL;
871 static int gen8_init_scratch(struct i915_address_space *vm)
873 struct drm_device *dev = vm->dev;
875 vm->scratch_page = alloc_scratch_page(dev);
876 if (IS_ERR(vm->scratch_page))
877 return PTR_ERR(vm->scratch_page);
879 vm->scratch_pt = alloc_pt(dev);
880 if (IS_ERR(vm->scratch_pt)) {
881 free_scratch_page(dev, vm->scratch_page);
882 return PTR_ERR(vm->scratch_pt);
885 vm->scratch_pd = alloc_pd(dev);
886 if (IS_ERR(vm->scratch_pd)) {
887 free_pt(dev, vm->scratch_pt);
888 free_scratch_page(dev, vm->scratch_page);
889 return PTR_ERR(vm->scratch_pd);
892 if (USES_FULL_48BIT_PPGTT(dev)) {
893 vm->scratch_pdp = alloc_pdp(dev);
894 if (IS_ERR(vm->scratch_pdp)) {
895 free_pd(dev, vm->scratch_pd);
896 free_pt(dev, vm->scratch_pt);
897 free_scratch_page(dev, vm->scratch_page);
898 return PTR_ERR(vm->scratch_pdp);
902 gen8_initialize_pt(vm, vm->scratch_pt);
903 gen8_initialize_pd(vm, vm->scratch_pd);
904 if (USES_FULL_48BIT_PPGTT(dev))
905 gen8_initialize_pdp(vm, vm->scratch_pdp);
910 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
912 enum vgt_g2v_type msg;
913 struct drm_device *dev = ppgtt->base.dev;
914 struct drm_i915_private *dev_priv = dev->dev_private;
917 if (USES_FULL_48BIT_PPGTT(dev)) {
918 u64 daddr = px_dma(&ppgtt->pml4);
920 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
921 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
923 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
924 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
926 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
927 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
929 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
930 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
933 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
934 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
937 I915_WRITE(vgtif_reg(g2v_notify), msg);
942 static void gen8_free_scratch(struct i915_address_space *vm)
944 struct drm_device *dev = vm->dev;
946 if (USES_FULL_48BIT_PPGTT(dev))
947 free_pdp(dev, vm->scratch_pdp);
948 free_pd(dev, vm->scratch_pd);
949 free_pt(dev, vm->scratch_pt);
950 free_scratch_page(dev, vm->scratch_page);
953 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
954 struct i915_page_directory_pointer *pdp)
958 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
959 if (WARN_ON(!pdp->page_directory[i]))
962 gen8_free_page_tables(dev, pdp->page_directory[i]);
963 free_pd(dev, pdp->page_directory[i]);
969 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
973 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
974 if (WARN_ON(!ppgtt->pml4.pdps[i]))
977 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
980 cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
983 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
985 struct i915_hw_ppgtt *ppgtt =
986 container_of(vm, struct i915_hw_ppgtt, base);
988 if (intel_vgpu_active(vm->dev))
989 gen8_ppgtt_notify_vgt(ppgtt, false);
991 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
992 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
994 gen8_ppgtt_cleanup_4lvl(ppgtt);
996 gen8_free_scratch(vm);
1000 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1001 * @vm: Master vm structure.
1002 * @pd: Page directory for this address range.
1003 * @start: Starting virtual address to begin allocations.
1004 * @length: Size of the allocations.
1005 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1006 * caller to free on error.
1008 * Allocate the required number of page tables. Extremely similar to
1009 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1010 * the page directory boundary (instead of the page directory pointer). That
1011 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1012 * possible, and likely that the caller will need to use multiple calls of this
1013 * function to achieve the appropriate allocation.
1015 * Return: 0 if success; negative error code otherwise.
1017 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1018 struct i915_page_directory *pd,
1021 unsigned long *new_pts)
1023 struct drm_device *dev = vm->dev;
1024 struct i915_page_table *pt;
1027 gen8_for_each_pde(pt, pd, start, length, pde) {
1028 /* Don't reallocate page tables */
1029 if (test_bit(pde, pd->used_pdes)) {
1030 /* Scratch is never allocated this way */
1031 WARN_ON(pt == vm->scratch_pt);
1039 gen8_initialize_pt(vm, pt);
1040 pd->page_table[pde] = pt;
1041 __set_bit(pde, new_pts);
1042 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1048 for_each_set_bit(pde, new_pts, I915_PDES)
1049 free_pt(dev, pd->page_table[pde]);
1055 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1056 * @vm: Master vm structure.
1057 * @pdp: Page directory pointer for this address range.
1058 * @start: Starting virtual address to begin allocations.
1059 * @length: Size of the allocations.
1060 * @new_pds: Bitmap set by function with new allocations. Likely used by the
1061 * caller to free on error.
1063 * Allocate the required number of page directories starting at the pde index of
1064 * @start, and ending at the pde index @start + @length. This function will skip
1065 * over already allocated page directories within the range, and only allocate
1066 * new ones, setting the appropriate pointer within the pdp as well as the
1067 * correct position in the bitmap @new_pds.
1069 * The function will only allocate the pages within the range for a give page
1070 * directory pointer. In other words, if @start + @length straddles a virtually
1071 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1072 * required by the caller, This is not currently possible, and the BUG in the
1073 * code will prevent it.
1075 * Return: 0 if success; negative error code otherwise.
1078 gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1079 struct i915_page_directory_pointer *pdp,
1082 unsigned long *new_pds)
1084 struct drm_device *dev = vm->dev;
1085 struct i915_page_directory *pd;
1087 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1089 WARN_ON(!bitmap_empty(new_pds, pdpes));
1091 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1092 if (test_bit(pdpe, pdp->used_pdpes))
1099 gen8_initialize_pd(vm, pd);
1100 pdp->page_directory[pdpe] = pd;
1101 __set_bit(pdpe, new_pds);
1102 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
1108 for_each_set_bit(pdpe, new_pds, pdpes)
1109 free_pd(dev, pdp->page_directory[pdpe]);
1115 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1116 * @vm: Master vm structure.
1117 * @pml4: Page map level 4 for this address range.
1118 * @start: Starting virtual address to begin allocations.
1119 * @length: Size of the allocations.
1120 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1121 * caller to free on error.
1123 * Allocate the required number of page directory pointers. Extremely similar to
1124 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1125 * The main difference is here we are limited by the pml4 boundary (instead of
1126 * the page directory pointer).
1128 * Return: 0 if success; negative error code otherwise.
1131 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1132 struct i915_pml4 *pml4,
1135 unsigned long *new_pdps)
1137 struct drm_device *dev = vm->dev;
1138 struct i915_page_directory_pointer *pdp;
1141 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1143 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1144 if (!test_bit(pml4e, pml4->used_pml4es)) {
1145 pdp = alloc_pdp(dev);
1149 gen8_initialize_pdp(vm, pdp);
1150 pml4->pdps[pml4e] = pdp;
1151 __set_bit(pml4e, new_pdps);
1152 trace_i915_page_directory_pointer_entry_alloc(vm,
1162 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1163 free_pdp(dev, pml4->pdps[pml4e]);
1169 free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1175 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1176 * of these are based on the number of PDPEs in the system.
1179 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1180 unsigned long **new_pts,
1186 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1190 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1201 free_gen8_temp_bitmaps(pds, pts);
1205 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1206 * the page table structures, we mark them dirty so that
1207 * context switching/execlist queuing code takes extra steps
1208 * to ensure that tlbs are flushed.
1210 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1212 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1215 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1216 struct i915_page_directory_pointer *pdp,
1220 struct i915_hw_ppgtt *ppgtt =
1221 container_of(vm, struct i915_hw_ppgtt, base);
1222 unsigned long *new_page_dirs, *new_page_tables;
1223 struct drm_device *dev = vm->dev;
1224 struct i915_page_directory *pd;
1225 const uint64_t orig_start = start;
1226 const uint64_t orig_length = length;
1228 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1231 /* Wrap is never okay since we can only represent 48b, and we don't
1232 * actually use the other side of the canonical address space.
1234 if (WARN_ON(start + length < start))
1237 if (WARN_ON(start + length > vm->total))
1240 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1244 /* Do the allocations first so we can easily bail out */
1245 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1248 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1252 /* For every page directory referenced, allocate page tables */
1253 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1254 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1255 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1261 length = orig_length;
1263 /* Allocations have completed successfully, so set the bitmaps, and do
1265 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1266 gen8_pde_t *const page_directory = kmap_px(pd);
1267 struct i915_page_table *pt;
1268 uint64_t pd_len = length;
1269 uint64_t pd_start = start;
1272 /* Every pd should be allocated, we just did that above. */
1275 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1276 /* Same reasoning as pd */
1278 if (pt == NULL) /* XXX dillon hack */
1279 continue; /* XXX dillon hack */
1281 WARN_ON(!gen8_pte_count(pd_start, pd_len));
1283 /* Set our used ptes within the page table */
1284 bitmap_set(pt->used_ptes,
1285 gen8_pte_index(pd_start),
1286 gen8_pte_count(pd_start, pd_len));
1288 /* Our pde is now pointing to the pagetable, pt */
1289 __set_bit(pde, pd->used_pdes);
1291 /* Map the PDE to the page table */
1292 page_directory[pde] = gen8_pde_encode(px_dma(pt),
1294 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1295 gen8_pte_index(start),
1296 gen8_pte_count(start, length),
1299 /* NB: We haven't yet mapped ptes to pages. At this
1300 * point we're still relying on insert_entries() */
1303 kunmap_px(ppgtt, page_directory);
1304 __set_bit(pdpe, pdp->used_pdpes);
1305 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
1308 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1309 mark_tlbs_dirty(ppgtt);
1316 for_each_set_bit(temp, new_page_tables + pdpe *
1317 BITS_TO_LONGS(I915_PDES), I915_PDES)
1318 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1321 for_each_set_bit(pdpe, new_page_dirs, pdpes)
1322 free_pd(dev, pdp->page_directory[pdpe]);
1324 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1325 mark_tlbs_dirty(ppgtt);
1329 static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1330 struct i915_pml4 *pml4,
1334 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1335 struct i915_hw_ppgtt *ppgtt =
1336 container_of(vm, struct i915_hw_ppgtt, base);
1337 struct i915_page_directory_pointer *pdp;
1341 /* Do the pml4 allocations first, so we don't need to track the newly
1342 * allocated tables below the pdp */
1343 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1345 /* The pagedirectory and pagetable allocations are done in the shared 3
1346 * and 4 level code. Just allocate the pdps.
1348 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1353 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1354 "The allocation has spanned more than 512GB. "
1355 "It is highly likely this is incorrect.");
1357 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1360 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1364 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1367 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1368 GEN8_PML4ES_PER_PML4);
1373 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1374 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1379 static int gen8_alloc_va_range(struct i915_address_space *vm,
1380 uint64_t start, uint64_t length)
1382 struct i915_hw_ppgtt *ppgtt =
1383 container_of(vm, struct i915_hw_ppgtt, base);
1385 if (USES_FULL_48BIT_PPGTT(vm->dev))
1386 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1388 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1391 static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1392 uint64_t start, uint64_t length,
1393 gen8_pte_t scratch_pte,
1396 struct i915_page_directory *pd;
1399 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1400 struct i915_page_table *pt;
1401 uint64_t pd_len = length;
1402 uint64_t pd_start = start;
1405 if (!test_bit(pdpe, pdp->used_pdpes))
1408 seq_printf(m, "\tPDPE #%d\n", pdpe);
1409 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1411 gen8_pte_t *pt_vaddr;
1413 if (!test_bit(pde, pd->used_pdes))
1416 pt_vaddr = kmap_px(pt);
1417 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1419 (pdpe << GEN8_PDPE_SHIFT) |
1420 (pde << GEN8_PDE_SHIFT) |
1421 (pte << GEN8_PTE_SHIFT);
1425 for (i = 0; i < 4; i++)
1426 if (pt_vaddr[pte + i] != scratch_pte)
1431 seq_printf(m, "\t\t0x%lx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1432 for (i = 0; i < 4; i++) {
1433 if (pt_vaddr[pte + i] != scratch_pte)
1434 seq_printf(m, " %lx", pt_vaddr[pte + i]);
1436 seq_puts(m, " SCRATCH ");
1440 /* don't use kunmap_px, it could trigger
1441 * an unnecessary flush.
1443 kunmap_atomic(pt_vaddr);
1448 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1450 struct i915_address_space *vm = &ppgtt->base;
1451 uint64_t start = ppgtt->base.start;
1452 uint64_t length = ppgtt->base.total;
1453 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1454 I915_CACHE_LLC, true);
1456 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1457 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1460 struct i915_pml4 *pml4 = &ppgtt->pml4;
1461 struct i915_page_directory_pointer *pdp;
1463 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1464 if (!test_bit(pml4e, pml4->used_pml4es))
1467 seq_printf(m, " PML4E #%lu\n", pml4e);
1468 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1473 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1475 unsigned long *new_page_dirs, *new_page_tables;
1476 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1479 /* We allocate temp bitmap for page tables for no gain
1480 * but as this is for init only, lets keep the things simple
1482 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1486 /* Allocate for all pdps regardless of how the ppgtt
1489 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1493 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1495 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1501 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1502 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1503 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1507 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1511 ret = gen8_init_scratch(&ppgtt->base);
1515 ppgtt->base.start = 0;
1516 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1517 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1518 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1519 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1520 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1521 ppgtt->base.bind_vma = ppgtt_bind_vma;
1522 ppgtt->debug_dump = gen8_dump_ppgtt;
1524 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1525 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1529 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1531 ppgtt->base.total = 1ULL << 48;
1532 ppgtt->switch_mm = gen8_48b_mm_switch;
1534 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1538 ppgtt->base.total = 1ULL << 32;
1539 ppgtt->switch_mm = gen8_legacy_mm_switch;
1540 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1544 if (intel_vgpu_active(ppgtt->base.dev)) {
1545 ret = gen8_preallocate_top_level_pdps(ppgtt);
1551 if (intel_vgpu_active(ppgtt->base.dev))
1552 gen8_ppgtt_notify_vgt(ppgtt, true);
1557 gen8_free_scratch(&ppgtt->base);
1561 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1563 struct i915_address_space *vm = &ppgtt->base;
1564 struct i915_page_table *unused;
1565 gen6_pte_t scratch_pte;
1567 uint32_t pte, pde, temp;
1568 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1570 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1571 I915_CACHE_LLC, true, 0);
1573 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
1575 gen6_pte_t *pt_vaddr;
1576 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1577 pd_entry = readl(ppgtt->pd_addr + pde);
1578 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1580 if (pd_entry != expected)
1581 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1585 seq_printf(m, "\tPDE: %x\n", pd_entry);
1587 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1589 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1591 (pde * PAGE_SIZE * GEN6_PTES) +
1595 for (i = 0; i < 4; i++)
1596 if (pt_vaddr[pte + i] != scratch_pte)
1601 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1602 for (i = 0; i < 4; i++) {
1603 if (pt_vaddr[pte + i] != scratch_pte)
1604 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1606 seq_puts(m, " SCRATCH ");
1610 kunmap_px(ppgtt, pt_vaddr);
1614 /* Write pde (index) from the page directory @pd to the page table @pt */
1615 static void gen6_write_pde(struct i915_page_directory *pd,
1616 const int pde, struct i915_page_table *pt)
1618 /* Caller needs to make sure the write completes if necessary */
1619 struct i915_hw_ppgtt *ppgtt =
1620 container_of(pd, struct i915_hw_ppgtt, pd);
1623 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1624 pd_entry |= GEN6_PDE_VALID;
1626 writel(pd_entry, ppgtt->pd_addr + pde);
1629 /* Write all the page tables found in the ppgtt structure to incrementing page
1631 static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1632 struct i915_page_directory *pd,
1633 uint32_t start, uint32_t length)
1635 struct i915_page_table *pt;
1638 gen6_for_each_pde(pt, pd, start, length, temp, pde)
1639 gen6_write_pde(pd, pde, pt);
1641 /* Make sure write is complete before other code can use this page
1642 * table. Also require for WC mapped PTEs */
1643 readl(dev_priv->gtt.gsm);
1646 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1648 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1650 return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1653 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1654 struct drm_i915_gem_request *req)
1656 struct intel_engine_cs *ring = req->ring;
1659 /* NB: TLBs must be flushed and invalidated before a switch */
1660 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1664 ret = intel_ring_begin(req, 6);
1668 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1669 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1670 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1671 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1672 intel_ring_emit(ring, get_pd_offset(ppgtt));
1673 intel_ring_emit(ring, MI_NOOP);
1674 intel_ring_advance(ring);
1679 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1680 struct drm_i915_gem_request *req)
1682 struct intel_engine_cs *ring = req->ring;
1683 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1685 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1686 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1690 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1691 struct drm_i915_gem_request *req)
1693 struct intel_engine_cs *ring = req->ring;
1696 /* NB: TLBs must be flushed and invalidated before a switch */
1697 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1701 ret = intel_ring_begin(req, 6);
1705 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1706 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1707 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1708 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1709 intel_ring_emit(ring, get_pd_offset(ppgtt));
1710 intel_ring_emit(ring, MI_NOOP);
1711 intel_ring_advance(ring);
1713 /* XXX: RCS is the only one to auto invalidate the TLBs? */
1714 if (ring->id != RCS) {
1715 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1723 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1724 struct drm_i915_gem_request *req)
1726 struct intel_engine_cs *ring = req->ring;
1727 struct drm_device *dev = ppgtt->base.dev;
1728 struct drm_i915_private *dev_priv = dev->dev_private;
1731 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1732 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1734 POSTING_READ(RING_PP_DIR_DCLV(ring));
1739 static void gen8_ppgtt_enable(struct drm_device *dev)
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct intel_engine_cs *ring;
1745 for_each_ring(ring, dev_priv, j) {
1746 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1747 I915_WRITE(RING_MODE_GEN7(ring),
1748 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1752 static void gen7_ppgtt_enable(struct drm_device *dev)
1754 struct drm_i915_private *dev_priv = dev->dev_private;
1755 struct intel_engine_cs *ring;
1756 uint32_t ecochk, ecobits;
1759 ecobits = I915_READ(GAC_ECO_BITS);
1760 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1762 ecochk = I915_READ(GAM_ECOCHK);
1763 if (IS_HASWELL(dev)) {
1764 ecochk |= ECOCHK_PPGTT_WB_HSW;
1766 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1767 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1769 I915_WRITE(GAM_ECOCHK, ecochk);
1771 for_each_ring(ring, dev_priv, i) {
1772 /* GFX_MODE is per-ring on gen7+ */
1773 I915_WRITE(RING_MODE_GEN7(ring),
1774 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1778 static void gen6_ppgtt_enable(struct drm_device *dev)
1780 struct drm_i915_private *dev_priv = dev->dev_private;
1781 uint32_t ecochk, gab_ctl, ecobits;
1783 ecobits = I915_READ(GAC_ECO_BITS);
1784 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1785 ECOBITS_PPGTT_CACHE64B);
1787 gab_ctl = I915_READ(GAB_CTL);
1788 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1790 ecochk = I915_READ(GAM_ECOCHK);
1791 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1793 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1796 /* PPGTT support for Sandybdrige/Gen6 and later */
1797 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1802 struct i915_hw_ppgtt *ppgtt =
1803 container_of(vm, struct i915_hw_ppgtt, base);
1804 gen6_pte_t *pt_vaddr, scratch_pte;
1805 unsigned first_entry = start >> PAGE_SHIFT;
1806 unsigned num_entries = length >> PAGE_SHIFT;
1807 unsigned act_pt = first_entry / GEN6_PTES;
1808 unsigned first_pte = first_entry % GEN6_PTES;
1809 unsigned last_pte, i;
1811 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1812 I915_CACHE_LLC, true, 0);
1814 while (num_entries) {
1815 last_pte = first_pte + num_entries;
1816 if (last_pte > GEN6_PTES)
1817 last_pte = GEN6_PTES;
1819 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1821 for (i = first_pte; i < last_pte; i++)
1822 pt_vaddr[i] = scratch_pte;
1824 kunmap_px(ppgtt, pt_vaddr);
1826 num_entries -= last_pte - first_pte;
1832 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1833 struct sg_table *pages,
1835 enum i915_cache_level cache_level, u32 flags)
1837 struct i915_hw_ppgtt *ppgtt =
1838 container_of(vm, struct i915_hw_ppgtt, base);
1839 gen6_pte_t *pt_vaddr;
1840 unsigned first_entry = start >> PAGE_SHIFT;
1841 unsigned act_pt = first_entry / GEN6_PTES;
1842 unsigned act_pte = first_entry % GEN6_PTES;
1843 struct sg_page_iter sg_iter;
1846 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1847 if (pt_vaddr == NULL)
1848 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1851 vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1852 cache_level, true, flags);
1854 if (++act_pte == GEN6_PTES) {
1855 kunmap_px(ppgtt, pt_vaddr);
1862 kunmap_px(ppgtt, pt_vaddr);
1865 static int gen6_alloc_va_range(struct i915_address_space *vm,
1866 uint64_t start_in, uint64_t length_in)
1868 DECLARE_BITMAP(new_page_tables, I915_PDES);
1869 struct drm_device *dev = vm->dev;
1870 struct drm_i915_private *dev_priv = dev->dev_private;
1871 struct i915_hw_ppgtt *ppgtt =
1872 container_of(vm, struct i915_hw_ppgtt, base);
1873 struct i915_page_table *pt;
1874 uint32_t start, length, start_save, length_save;
1878 if (WARN_ON(start_in + length_in > ppgtt->base.total))
1881 start = start_save = start_in;
1882 length = length_save = length_in;
1884 bitmap_zero(new_page_tables, I915_PDES);
1886 /* The allocation is done in two stages so that we can bail out with
1887 * minimal amount of pain. The first stage finds new page tables that
1888 * need allocation. The second stage marks use ptes within the page
1891 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1892 if (pt != vm->scratch_pt) {
1893 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1897 /* We've already allocated a page table */
1898 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1906 gen6_initialize_pt(vm, pt);
1908 ppgtt->pd.page_table[pde] = pt;
1909 __set_bit(pde, new_page_tables);
1910 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1914 length = length_save;
1916 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1917 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1919 bitmap_zero(tmp_bitmap, GEN6_PTES);
1920 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1921 gen6_pte_count(start, length));
1923 if (__test_and_clear_bit(pde, new_page_tables))
1924 gen6_write_pde(&ppgtt->pd, pde, pt);
1926 trace_i915_page_table_entry_map(vm, pde, pt,
1927 gen6_pte_index(start),
1928 gen6_pte_count(start, length),
1930 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1934 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1936 /* Make sure write is complete before other code can use this page
1937 * table. Also require for WC mapped PTEs */
1938 readl(dev_priv->gtt.gsm);
1940 mark_tlbs_dirty(ppgtt);
1944 for_each_set_bit(pde, new_page_tables, I915_PDES) {
1945 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1947 ppgtt->pd.page_table[pde] = vm->scratch_pt;
1948 free_pt(vm->dev, pt);
1951 mark_tlbs_dirty(ppgtt);
1955 static int gen6_init_scratch(struct i915_address_space *vm)
1957 struct drm_device *dev = vm->dev;
1959 vm->scratch_page = alloc_scratch_page(dev);
1960 if (IS_ERR(vm->scratch_page))
1961 return PTR_ERR(vm->scratch_page);
1963 vm->scratch_pt = alloc_pt(dev);
1964 if (IS_ERR(vm->scratch_pt)) {
1965 free_scratch_page(dev, vm->scratch_page);
1966 return PTR_ERR(vm->scratch_pt);
1969 gen6_initialize_pt(vm, vm->scratch_pt);
1974 static void gen6_free_scratch(struct i915_address_space *vm)
1976 struct drm_device *dev = vm->dev;
1978 free_pt(dev, vm->scratch_pt);
1979 free_scratch_page(dev, vm->scratch_page);
1982 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1984 struct i915_hw_ppgtt *ppgtt =
1985 container_of(vm, struct i915_hw_ppgtt, base);
1986 struct i915_page_table *pt;
1989 drm_mm_remove_node(&ppgtt->node);
1991 gen6_for_all_pdes(pt, ppgtt, pde) {
1992 if (pt != vm->scratch_pt)
1993 free_pt(ppgtt->base.dev, pt);
1996 gen6_free_scratch(vm);
1999 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2001 struct i915_address_space *vm = &ppgtt->base;
2002 struct drm_device *dev = ppgtt->base.dev;
2003 struct drm_i915_private *dev_priv = dev->dev_private;
2004 bool retried = false;
2007 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2008 * allocator works in address space sizes, so it's multiplied by page
2009 * size. We allocate at the top of the GTT to avoid fragmentation.
2011 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
2013 ret = gen6_init_scratch(vm);
2018 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
2019 &ppgtt->node, GEN6_PD_SIZE,
2021 0, dev_priv->gtt.base.total,
2023 if (ret == -ENOSPC && !retried) {
2024 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
2025 GEN6_PD_SIZE, GEN6_PD_ALIGN,
2027 0, dev_priv->gtt.base.total,
2040 if (ppgtt->node.start < dev_priv->gtt.mappable_end)
2041 DRM_DEBUG("Forced to use aperture for PDEs\n");
2046 gen6_free_scratch(vm);
2050 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2052 return gen6_ppgtt_allocate_page_directories(ppgtt);
2055 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2056 uint64_t start, uint64_t length)
2058 struct i915_page_table *unused;
2061 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
2062 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2065 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2067 struct drm_device *dev = ppgtt->base.dev;
2068 struct drm_i915_private *dev_priv = dev->dev_private;
2071 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
2073 ppgtt->switch_mm = gen6_mm_switch;
2074 } else if (IS_HASWELL(dev)) {
2075 ppgtt->switch_mm = hsw_mm_switch;
2076 } else if (IS_GEN7(dev)) {
2077 ppgtt->switch_mm = gen7_mm_switch;
2081 if (intel_vgpu_active(dev))
2082 ppgtt->switch_mm = vgpu_mm_switch;
2084 ret = gen6_ppgtt_alloc(ppgtt);
2088 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2089 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2090 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2091 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2092 ppgtt->base.bind_vma = ppgtt_bind_vma;
2093 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2094 ppgtt->base.start = 0;
2095 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2096 ppgtt->debug_dump = gen6_dump_ppgtt;
2098 ppgtt->pd.base.ggtt_offset =
2099 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2101 ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
2102 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2104 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2106 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2108 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
2109 ppgtt->node.size >> 20,
2110 ppgtt->node.start / PAGE_SIZE);
2112 DRM_DEBUG("Adding PPGTT at offset %x\n",
2113 ppgtt->pd.base.ggtt_offset << 10);
2118 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2120 ppgtt->base.dev = dev;
2122 if (INTEL_INFO(dev)->gen < 8)
2123 return gen6_ppgtt_init(ppgtt);
2125 return gen8_ppgtt_init(ppgtt);
2128 static void i915_address_space_init(struct i915_address_space *vm,
2129 struct drm_i915_private *dev_priv)
2131 drm_mm_init(&vm->mm, vm->start, vm->total);
2132 vm->dev = dev_priv->dev;
2133 INIT_LIST_HEAD(&vm->active_list);
2134 INIT_LIST_HEAD(&vm->inactive_list);
2135 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2138 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2140 struct drm_i915_private *dev_priv = dev->dev_private;
2143 ret = __hw_ppgtt_init(dev, ppgtt);
2145 kref_init(&ppgtt->ref);
2146 i915_address_space_init(&ppgtt->base, dev_priv);
2152 int i915_ppgtt_init_hw(struct drm_device *dev)
2154 /* In the case of execlists, PPGTT is enabled by the context descriptor
2155 * and the PDPs are contained within the context itself. We don't
2156 * need to do anything here. */
2157 if (i915.enable_execlists)
2160 if (!USES_PPGTT(dev))
2164 gen6_ppgtt_enable(dev);
2165 else if (IS_GEN7(dev))
2166 gen7_ppgtt_enable(dev);
2167 else if (INTEL_INFO(dev)->gen >= 8)
2168 gen8_ppgtt_enable(dev);
2170 MISSING_CASE(INTEL_INFO(dev)->gen);
2175 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2177 struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
2178 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2180 if (i915.enable_execlists)
2186 return ppgtt->switch_mm(ppgtt, req);
2189 struct i915_hw_ppgtt *
2190 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2192 struct i915_hw_ppgtt *ppgtt;
2195 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2197 return ERR_PTR(-ENOMEM);
2199 ret = i915_ppgtt_init(dev, ppgtt);
2202 return ERR_PTR(ret);
2205 ppgtt->file_priv = fpriv;
2207 trace_i915_ppgtt_create(&ppgtt->base);
2212 void i915_ppgtt_release(struct kref *kref)
2214 struct i915_hw_ppgtt *ppgtt =
2215 container_of(kref, struct i915_hw_ppgtt, ref);
2217 trace_i915_ppgtt_release(&ppgtt->base);
2219 /* vmas should already be unbound */
2220 WARN_ON(!list_empty(&ppgtt->base.active_list));
2221 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2223 list_del(&ppgtt->base.global_link);
2224 drm_mm_takedown(&ppgtt->base.mm);
2226 ppgtt->base.cleanup(&ppgtt->base);
2230 extern int intel_iommu_gfx_mapped;
2231 /* Certain Gen5 chipsets require require idling the GPU before
2232 * unmapping anything from the GTT when VT-d is enabled.
2234 static bool needs_idle_maps(struct drm_device *dev)
2236 #ifdef CONFIG_INTEL_IOMMU
2237 /* Query intel_iommu to see if we need the workaround. Presumably that
2240 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2246 static bool do_idling(struct drm_i915_private *dev_priv)
2248 bool ret = dev_priv->mm.interruptible;
2250 if (unlikely(dev_priv->gtt.do_idle_maps)) {
2251 dev_priv->mm.interruptible = false;
2252 if (i915_gpu_idle(dev_priv->dev)) {
2253 DRM_ERROR("Couldn't idle GPU\n");
2254 /* Wait a bit, in hopes it avoids the hang */
2262 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2264 if (unlikely(dev_priv->gtt.do_idle_maps))
2265 dev_priv->mm.interruptible = interruptible;
2268 void i915_check_and_clear_faults(struct drm_device *dev)
2270 struct drm_i915_private *dev_priv = dev->dev_private;
2271 struct intel_engine_cs *ring;
2274 if (INTEL_INFO(dev)->gen < 6)
2277 for_each_ring(ring, dev_priv, i) {
2279 fault_reg = I915_READ(RING_FAULT_REG(ring));
2280 if (fault_reg & RING_FAULT_VALID) {
2282 DRM_DEBUG_DRIVER("Unexpected fault\n"
2284 "\tAddress space: %s\n"
2287 fault_reg & PAGE_MASK,
2288 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2289 RING_FAULT_SRCID(fault_reg),
2290 RING_FAULT_FAULT_TYPE(fault_reg));
2292 I915_WRITE(RING_FAULT_REG(ring),
2293 fault_reg & ~RING_FAULT_VALID);
2296 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
2299 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2301 if (INTEL_INFO(dev_priv->dev)->gen < 6) {
2302 intel_gtt_chipset_flush();
2304 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2305 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2309 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2311 struct drm_i915_private *dev_priv = dev->dev_private;
2313 /* Don't bother messing with faults pre GEN6 as we have little
2314 * documentation supporting that it's a good idea.
2316 if (INTEL_INFO(dev)->gen < 6)
2319 i915_check_and_clear_faults(dev);
2321 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
2322 dev_priv->gtt.base.start,
2323 dev_priv->gtt.base.total,
2326 i915_ggtt_flush(dev_priv);
2329 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2331 if (!dma_map_sg(&obj->base.dev->pdev->dev,
2332 obj->pages->sgl, obj->pages->nents,
2333 PCI_DMA_BIDIRECTIONAL))
2339 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2344 iowrite32((u32)pte, addr);
2345 iowrite32(pte >> 32, addr + 4);
2349 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2350 struct sg_table *st,
2352 enum i915_cache_level level, u32 unused)
2354 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2355 unsigned first_entry = start >> PAGE_SHIFT;
2356 gen8_pte_t __iomem *gtt_entries =
2357 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2359 struct sg_page_iter sg_iter;
2360 dma_addr_t addr = 0; /* shut up gcc */
2363 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2365 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2366 addr = sg_dma_address(sg_iter.sg) +
2367 (sg_iter.sg_pgoffset << PAGE_SHIFT);
2368 gen8_set_pte(>t_entries[i],
2369 gen8_pte_encode(addr, level, true));
2374 * XXX: This serves as a posting read to make sure that the PTE has
2375 * actually been updated. There is some concern that even though
2376 * registers and PTEs are within the same BAR that they are potentially
2377 * of NUMA access patterns. Therefore, even with the way we assume
2378 * hardware should work, we must keep this posting read for paranoia.
2381 WARN_ON(readq(>t_entries[i-1])
2382 != gen8_pte_encode(addr, level, true));
2384 /* This next bit makes the above posting read even more important. We
2385 * want to flush the TLBs only after we're certain all the PTE updates
2388 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2389 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2391 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2394 struct insert_entries {
2395 struct i915_address_space *vm;
2396 struct sg_table *st;
2398 enum i915_cache_level level;
2402 static int gen8_ggtt_insert_entries__cb(void *_arg)
2404 struct insert_entries *arg = _arg;
2405 gen8_ggtt_insert_entries(arg->vm, arg->st,
2406 arg->start, arg->level, arg->flags);
2410 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2411 struct sg_table *st,
2413 enum i915_cache_level level,
2416 struct insert_entries arg = { vm, st, start, level, flags };
2417 #ifndef __DragonFly__
2418 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2420 /* XXX: is this enough ?
2421 * See Linux commit 5bab6f60cb4d1417ad7c599166bcfec87529c1a2 */
2423 gen8_ggtt_insert_entries__cb(&arg);
2429 * Binds an object into the global gtt with the specified cache level. The object
2430 * will be accessible to the GPU via commands whose operands reference offsets
2431 * within the global GTT as well as accessible by the GPU through the GMADR
2432 * mapped BAR (dev_priv->mm.gtt->gtt).
2434 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2435 struct sg_table *st,
2437 enum i915_cache_level level, u32 flags)
2439 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2440 unsigned first_entry = start >> PAGE_SHIFT;
2441 gen6_pte_t __iomem *gtt_entries =
2442 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2444 struct sg_page_iter sg_iter;
2445 dma_addr_t addr = 0;
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2451 addr = sg_page_iter_dma_address(&sg_iter);
2452 iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]);
2456 /* XXX: This serves as a posting read to make sure that the PTE has
2457 * actually been updated. There is some concern that even though
2458 * registers and PTEs are within the same BAR that they are potentially
2459 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia.
2463 unsigned long gtt = readl(>t_entries[i-1]);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2467 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates
2471 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2472 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2477 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2482 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2483 unsigned first_entry = start >> PAGE_SHIFT;
2484 unsigned num_entries = length >> PAGE_SHIFT;
2485 gen8_pte_t scratch_pte, __iomem *gtt_base =
2486 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2487 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2491 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2493 if (WARN(num_entries > max_entries,
2494 "First entry = %d; Num entries = %d (max=%d)\n",
2495 first_entry, num_entries, max_entries))
2496 num_entries = max_entries;
2498 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
2501 for (i = 0; i < num_entries; i++)
2502 gen8_set_pte(>t_base[i], scratch_pte);
2505 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2508 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2513 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2514 unsigned first_entry = start >> PAGE_SHIFT;
2515 unsigned num_entries = length >> PAGE_SHIFT;
2516 gen6_pte_t scratch_pte, __iomem *gtt_base =
2517 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2518 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2522 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2524 if (WARN(num_entries > max_entries,
2525 "First entry = %d; Num entries = %d (max=%d)\n",
2526 first_entry, num_entries, max_entries))
2527 num_entries = max_entries;
2529 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2530 I915_CACHE_LLC, use_scratch, 0);
2532 for (i = 0; i < num_entries; i++)
2533 iowrite32(scratch_pte, >t_base[i]);
2536 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2539 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2540 struct sg_table *pages,
2542 enum i915_cache_level cache_level, u32 unused)
2544 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2545 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2546 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2549 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2551 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2553 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2557 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2562 struct drm_i915_private *dev_priv = vm->dev->dev_private;
2563 unsigned first_entry = start >> PAGE_SHIFT;
2564 unsigned num_entries = length >> PAGE_SHIFT;
2567 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2569 intel_gtt_clear_range(first_entry, num_entries);
2571 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2574 static int ggtt_bind_vma(struct i915_vma *vma,
2575 enum i915_cache_level cache_level,
2578 struct drm_i915_gem_object *obj = vma->obj;
2582 ret = i915_get_ggtt_vma_pages(vma);
2586 /* Currently applicable only to VLV */
2588 pte_flags |= PTE_READ_ONLY;
2590 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2592 cache_level, pte_flags);
2595 * Without aliasing PPGTT there's no difference between
2596 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2597 * upgrade to both bound if we bind either to avoid double-binding.
2599 vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2604 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2605 enum i915_cache_level cache_level,
2608 struct drm_device *dev = vma->vm->dev;
2609 struct drm_i915_private *dev_priv = dev->dev_private;
2610 struct drm_i915_gem_object *obj = vma->obj;
2611 struct sg_table *pages = obj->pages;
2615 ret = i915_get_ggtt_vma_pages(vma);
2618 pages = vma->ggtt_view.pages;
2620 /* Currently applicable only to VLV */
2622 pte_flags |= PTE_READ_ONLY;
2625 if (flags & GLOBAL_BIND) {
2626 vma->vm->insert_entries(vma->vm, pages,
2628 cache_level, pte_flags);
2631 if (flags & LOCAL_BIND) {
2632 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2633 appgtt->base.insert_entries(&appgtt->base, pages,
2635 cache_level, pte_flags);
2641 static void ggtt_unbind_vma(struct i915_vma *vma)
2643 struct drm_device *dev = vma->vm->dev;
2644 struct drm_i915_private *dev_priv = dev->dev_private;
2645 struct drm_i915_gem_object *obj = vma->obj;
2646 const uint64_t size = min_t(uint64_t,
2650 if (vma->bound & GLOBAL_BIND) {
2651 vma->vm->clear_range(vma->vm,
2657 if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
2658 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2660 appgtt->base.clear_range(&appgtt->base,
2667 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2669 struct drm_device *dev = obj->base.dev;
2670 struct drm_i915_private *dev_priv = dev->dev_private;
2673 interruptible = do_idling(dev_priv);
2675 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2676 PCI_DMA_BIDIRECTIONAL);
2678 undo_idling(dev_priv, interruptible);
2681 static void i915_gtt_color_adjust(struct drm_mm_node *node,
2682 unsigned long color,
2686 if (node->color != color)
2689 if (!list_empty(&node->node_list)) {
2690 node = list_entry(node->node_list.next,
2693 if (node->allocated && node->color != color)
2698 static int i915_gem_setup_global_gtt(struct drm_device *dev,
2703 /* Let GEM Manage all of the aperture.
2705 * However, leave one page at the end still bound to the scratch page.
2706 * There are a number of places where the hardware apparently prefetches
2707 * past the end of the object, and we've seen multiple hangs with the
2708 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2709 * aperture. One page should be enough to keep any prefetching inside
2712 struct drm_i915_private *dev_priv = dev->dev_private;
2713 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2714 struct drm_mm_node *entry;
2715 struct drm_i915_gem_object *obj;
2716 unsigned long hole_start, hole_end;
2718 unsigned long mappable;
2721 mappable = min(end, mappable_end) - start;
2722 BUG_ON(mappable_end > end);
2724 ggtt_vm->start = start;
2726 /* Subtract the guard page before address space initialization to
2727 * shrink the range used by drm_mm */
2728 ggtt_vm->total = end - start - PAGE_SIZE;
2729 i915_address_space_init(ggtt_vm, dev_priv);
2730 ggtt_vm->total += PAGE_SIZE;
2732 if (intel_vgpu_active(dev)) {
2733 ret = intel_vgt_balloon(dev);
2739 ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
2741 /* Mark any preallocated objects as occupied */
2742 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2743 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2745 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
2746 i915_gem_obj_ggtt_offset(obj), obj->base.size);
2748 WARN_ON(i915_gem_obj_ggtt_bound(obj));
2749 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
2751 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2754 vma->bound |= GLOBAL_BIND;
2755 __i915_vma_set_map_and_fenceable(vma);
2756 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2759 /* Clear any non-preallocated blocks */
2760 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2761 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2762 hole_start, hole_end);
2763 ggtt_vm->clear_range(ggtt_vm, hole_start,
2764 hole_end - hole_start, true);
2767 #ifdef __DragonFly__
2768 device_printf(dev->dev->bsddev,
2769 "taking over the fictitious range 0x%lx-0x%lx\n",
2770 dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable);
2771 error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start,
2772 dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
2775 /* And finally clear the reserved guard page */
2776 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
2778 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2779 struct i915_hw_ppgtt *ppgtt;
2781 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2785 ret = __hw_ppgtt_init(dev, ppgtt);
2787 ppgtt->base.cleanup(&ppgtt->base);
2792 if (ppgtt->base.allocate_va_range)
2793 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2796 ppgtt->base.cleanup(&ppgtt->base);
2801 ppgtt->base.clear_range(&ppgtt->base,
2806 dev_priv->mm.aliasing_ppgtt = ppgtt;
2807 WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
2808 dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
2814 void i915_gem_init_global_gtt(struct drm_device *dev)
2816 struct drm_i915_private *dev_priv = dev->dev_private;
2817 u64 gtt_size, mappable_size;
2819 gtt_size = dev_priv->gtt.base.total;
2820 mappable_size = dev_priv->gtt.mappable_end;
2822 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
2825 void i915_global_gtt_cleanup(struct drm_device *dev)
2827 struct drm_i915_private *dev_priv = dev->dev_private;
2828 struct i915_address_space *vm = &dev_priv->gtt.base;
2830 if (dev_priv->mm.aliasing_ppgtt) {
2831 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2833 ppgtt->base.cleanup(&ppgtt->base);
2836 if (drm_mm_initialized(&vm->mm)) {
2837 if (intel_vgpu_active(dev))
2838 intel_vgt_deballoon();
2840 drm_mm_takedown(&vm->mm);
2841 list_del(&vm->global_link);
2847 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2849 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2850 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2851 return snb_gmch_ctl << 20;
2854 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2856 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2857 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2859 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2861 #ifdef CONFIG_X86_32
2862 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2863 if (bdw_gmch_ctl > 4)
2867 return bdw_gmch_ctl << 20;
2870 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2872 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2873 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2876 return 1 << (20 + gmch_ctrl);
2881 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2883 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2884 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2885 return snb_gmch_ctl << 25; /* 32 MB units */
2888 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2890 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2891 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2892 return bdw_gmch_ctl << 25; /* 32 MB units */
2895 static size_t chv_get_stolen_size(u16 gmch_ctrl)
2897 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2898 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2901 * 0x0 to 0x10: 32MB increments starting at 0MB
2902 * 0x11 to 0x16: 4MB increments starting at 8MB
2903 * 0x17 to 0x1d: 4MB increments start at 36MB
2905 if (gmch_ctrl < 0x11)
2906 return gmch_ctrl << 25;
2907 else if (gmch_ctrl < 0x17)
2908 return (gmch_ctrl - 0x11 + 2) << 22;
2910 return (gmch_ctrl - 0x17 + 9) << 22;
2913 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2915 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2916 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2918 if (gen9_gmch_ctl < 0xf0)
2919 return gen9_gmch_ctl << 25; /* 32 MB units */
2921 /* 4MB increments starting at 0xf0 for 4MB */
2922 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2925 static int ggtt_probe_common(struct drm_device *dev,
2928 struct drm_i915_private *dev_priv = dev->dev_private;
2929 struct i915_page_scratch *scratch_page;
2930 phys_addr_t gtt_phys_addr;
2932 /* For Modern GENs the PTEs and register space are split in the BAR */
2933 gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2934 (pci_resource_len(dev->pdev, 0) / 2);
2937 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2938 * dropped. For WC mappings in general we have 64 byte burst writes
2939 * when the WC buffer is flushed, so we can't use it, but have to
2940 * resort to an uncached mapping. The WC issue is easily caught by the
2941 * readback check when writing GTT PTE entries.
2943 if (IS_BROXTON(dev))
2944 dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
2946 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
2947 if (!dev_priv->gtt.gsm) {
2948 DRM_ERROR("Failed to map the gtt page table\n");
2952 scratch_page = alloc_scratch_page(dev);
2953 if (IS_ERR(scratch_page)) {
2954 DRM_ERROR("Scratch setup failed\n");
2955 /* iounmap will also get called at remove, but meh */
2956 iounmap(dev_priv->gtt.gsm);
2957 return PTR_ERR(scratch_page);
2960 dev_priv->gtt.base.scratch_page = scratch_page;
2965 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2966 * bits. When using advanced contexts each context stores its own PAT, but
2967 * writing this data shouldn't be harmful even in those cases. */
2968 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2972 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2973 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2974 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2975 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2976 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2977 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2978 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2979 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2981 if (!USES_PPGTT(dev_priv->dev))
2982 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2983 * so RTL will always use the value corresponding to
2985 * So let's disable cache for GGTT to avoid screen corruptions.
2986 * MOCS still can be used though.
2987 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2988 * before this patch, i.e. the same uncached + snooping access
2989 * like on gen6/7 seems to be in effect.
2990 * - So this just fixes blitter/render access. Again it looks
2991 * like it's not just uncached access, but uncached + snooping.
2992 * So we can still hold onto all our assumptions wrt cpu
2993 * clflushing on LLC machines.
2995 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2997 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2998 * write would work. */
2999 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3000 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3003 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3008 * Map WB on BDW to snooped on CHV.
3010 * Only the snoop bit has meaning for CHV, the rest is
3013 * The hardware will never snoop for certain types of accesses:
3014 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3015 * - PPGTT page tables
3016 * - some other special cycles
3018 * As with BDW, we also need to consider the following for GT accesses:
3019 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3020 * so RTL will always use the value corresponding to
3022 * Which means we must set the snoop bit in PAT entry 0
3023 * in order to keep the global status page working.
3025 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3029 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3030 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3031 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3032 GEN8_PPAT(7, CHV_PPAT_SNOOP);
3034 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3035 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3038 static int gen8_gmch_probe(struct drm_device *dev,
3041 phys_addr_t *mappable_base,
3044 struct drm_i915_private *dev_priv = dev->dev_private;
3049 /* TODO: We're not aware of mappable constraints on gen8 yet */
3050 *mappable_base = pci_resource_start(dev->pdev, 2);
3051 *mappable_end = pci_resource_len(dev->pdev, 2);
3054 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3055 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3058 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3060 if (INTEL_INFO(dev)->gen >= 9) {
3061 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
3062 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3063 } else if (IS_CHERRYVIEW(dev)) {
3064 *stolen = chv_get_stolen_size(snb_gmch_ctl);
3065 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
3067 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
3068 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3071 *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3073 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3074 chv_setup_private_ppat(dev_priv);
3076 bdw_setup_private_ppat(dev_priv);
3078 ret = ggtt_probe_common(dev, gtt_size);
3080 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
3081 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
3082 dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3083 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3085 if (IS_CHERRYVIEW(dev_priv))
3086 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
3091 static int gen6_gmch_probe(struct drm_device *dev,
3094 phys_addr_t *mappable_base,
3097 struct drm_i915_private *dev_priv = dev->dev_private;
3098 unsigned int gtt_size;
3102 *mappable_base = pci_resource_start(dev->pdev, 2);
3103 *mappable_end = pci_resource_len(dev->pdev, 2);
3105 /* 64/512MB is the current min/max we actually know of, but this is just
3106 * a coarse sanity check.
3108 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
3109 DRM_ERROR("Unknown GMADR size (%lx)\n",
3110 dev_priv->gtt.mappable_end);
3115 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3116 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
3118 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3120 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
3122 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
3123 *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3125 ret = ggtt_probe_common(dev, gtt_size);
3127 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
3128 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
3129 dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3130 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3135 static void gen6_gmch_remove(struct i915_address_space *vm)
3138 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
3141 free_scratch_page(vm->dev, vm->scratch_page);
3144 static int i915_gmch_probe(struct drm_device *dev,
3147 phys_addr_t *mappable_base,
3150 struct drm_i915_private *dev_priv = dev->dev_private;
3154 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3156 DRM_ERROR("failed to set up gmch\n");
3161 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
3163 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
3164 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
3165 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
3166 dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3167 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3169 if (unlikely(dev_priv->gtt.do_idle_maps))
3170 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3175 static void i915_gmch_remove(struct i915_address_space *vm)
3177 intel_gmch_remove();
3180 int i915_gem_gtt_init(struct drm_device *dev)
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct i915_gtt *gtt = &dev_priv->gtt;
3186 if (INTEL_INFO(dev)->gen <= 5) {
3187 gtt->gtt_probe = i915_gmch_probe;
3188 gtt->base.cleanup = i915_gmch_remove;
3189 } else if (INTEL_INFO(dev)->gen < 8) {
3190 gtt->gtt_probe = gen6_gmch_probe;
3191 gtt->base.cleanup = gen6_gmch_remove;
3192 if (IS_HASWELL(dev) && dev_priv->ellc_size)
3193 gtt->base.pte_encode = iris_pte_encode;
3194 else if (IS_HASWELL(dev))
3195 gtt->base.pte_encode = hsw_pte_encode;
3196 else if (IS_VALLEYVIEW(dev))
3197 gtt->base.pte_encode = byt_pte_encode;
3198 else if (INTEL_INFO(dev)->gen >= 7)
3199 gtt->base.pte_encode = ivb_pte_encode;
3201 gtt->base.pte_encode = snb_pte_encode;
3203 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3204 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
3207 gtt->base.dev = dev;
3209 ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
3210 >t->mappable_base, >t->mappable_end);
3214 /* GMADR is the PCI mmio aperture into the global GTT. */
3215 DRM_INFO("Memory usable by graphics device = %luM\n",
3216 gtt->base.total >> 20);
3217 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
3218 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
3219 #ifdef CONFIG_INTEL_IOMMU
3220 if (intel_iommu_gfx_mapped)
3221 DRM_INFO("VT-d active for gfx access\n");
3224 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3225 * user's requested state against the hardware/driver capabilities. We
3226 * do this now so that we can print out any log messages once rather
3227 * than every time we check intel_enable_ppgtt().
3229 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3230 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3235 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3237 struct drm_i915_private *dev_priv = dev->dev_private;
3238 struct drm_i915_gem_object *obj;
3239 struct i915_address_space *vm;
3240 struct i915_vma *vma;
3243 i915_check_and_clear_faults(dev);
3245 /* First fill our portion of the GTT with scratch pages */
3246 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
3247 dev_priv->gtt.base.start,
3248 dev_priv->gtt.base.total,
3251 /* Cache flush objects bound into GGTT and rebind them. */
3252 vm = &dev_priv->gtt.base;
3253 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3255 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3259 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3266 i915_gem_clflush_object(obj, obj->pin_display);
3269 if (INTEL_INFO(dev)->gen >= 8) {
3270 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3271 chv_setup_private_ppat(dev_priv);
3273 bdw_setup_private_ppat(dev_priv);
3278 if (USES_PPGTT(dev)) {
3279 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3280 /* TODO: Perhaps it shouldn't be gen6 specific */
3282 struct i915_hw_ppgtt *ppgtt =
3283 container_of(vm, struct i915_hw_ppgtt,
3286 if (i915_is_ggtt(vm))
3287 ppgtt = dev_priv->mm.aliasing_ppgtt;
3289 gen6_write_page_range(dev_priv, &ppgtt->pd,
3290 0, ppgtt->base.total);
3294 i915_ggtt_flush(dev_priv);
3297 static struct i915_vma *
3298 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
3299 struct i915_address_space *vm,
3300 const struct i915_ggtt_view *ggtt_view)
3302 struct i915_vma *vma;
3304 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3305 return ERR_PTR(-EINVAL);
3307 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3309 return ERR_PTR(-ENOMEM);
3311 INIT_LIST_HEAD(&vma->vma_link);
3312 INIT_LIST_HEAD(&vma->mm_list);
3313 INIT_LIST_HEAD(&vma->exec_list);
3317 if (i915_is_ggtt(vm))
3318 vma->ggtt_view = *ggtt_view;
3320 list_add_tail(&vma->vma_link, &obj->vma_list);
3321 if (!i915_is_ggtt(vm))
3322 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3328 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3329 struct i915_address_space *vm)
3331 struct i915_vma *vma;
3333 vma = i915_gem_obj_to_vma(obj, vm);
3335 vma = __i915_gem_vma_create(obj, vm,
3336 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3342 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
3343 const struct i915_ggtt_view *view)
3345 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
3346 struct i915_vma *vma;
3349 return ERR_PTR(-EINVAL);
3351 vma = i915_gem_obj_to_ggtt_view(obj, view);
3357 vma = __i915_gem_vma_create(obj, ggtt, view);
3363 static struct scatterlist *
3364 rotate_pages(dma_addr_t *in, unsigned int offset,
3365 unsigned int width, unsigned int height,
3366 struct sg_table *st, struct scatterlist *sg)
3368 unsigned int column, row;
3369 unsigned int src_idx;
3376 for (column = 0; column < width; column++) {
3377 src_idx = width * (height - 1) + column;
3378 for (row = 0; row < height; row++) {
3380 /* We don't need the pages, but need to initialize
3381 * the entries so the sg list can be happily traversed.
3382 * The only thing we need are DMA addresses.
3384 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3385 sg_dma_address(sg) = in[offset + src_idx];
3386 sg_dma_len(sg) = PAGE_SIZE;
3395 static struct sg_table *
3396 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3397 struct drm_i915_gem_object *obj)
3399 struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3400 unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3401 unsigned int size_pages_uv;
3402 struct sg_page_iter sg_iter;
3404 dma_addr_t *page_addr_list;
3405 struct sg_table *st;
3406 unsigned int uv_start_page;
3407 struct scatterlist *sg;
3410 /* Allocate a temporary list of source pages for random access. */
3411 page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
3412 sizeof(dma_addr_t));
3413 if (!page_addr_list)
3414 return ERR_PTR(ret);
3416 /* Account for UV plane with NV12. */
3417 if (rot_info->pixel_format == DRM_FORMAT_NV12)
3418 size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
3422 /* Allocate target SG list. */
3423 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3427 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
3431 /* Populate source page list from the object. */
3433 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3434 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3438 /* Rotate the pages. */
3439 sg = rotate_pages(page_addr_list, 0,
3440 rot_info->width_pages, rot_info->height_pages,
3443 /* Append the UV plane if NV12. */
3444 if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3445 uv_start_page = size_pages;
3447 /* Check for tile-row un-alignment. */
3448 if (offset_in_page(rot_info->uv_offset))
3451 rot_info->uv_start_page = uv_start_page;
3453 rotate_pages(page_addr_list, uv_start_page,
3454 rot_info->width_pages_uv,
3455 rot_info->height_pages_uv,
3460 "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
3461 obj->base.size, rot_info->pitch, rot_info->height,
3462 rot_info->pixel_format, rot_info->width_pages,
3463 rot_info->height_pages, size_pages + size_pages_uv,
3466 drm_free_large(page_addr_list);
3473 drm_free_large(page_addr_list);
3476 "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
3477 obj->base.size, ret, rot_info->pitch, rot_info->height,
3478 rot_info->pixel_format, rot_info->width_pages,
3479 rot_info->height_pages, size_pages + size_pages_uv,
3481 return ERR_PTR(ret);
3484 static struct sg_table *
3485 intel_partial_pages(const struct i915_ggtt_view *view,
3486 struct drm_i915_gem_object *obj)
3488 struct sg_table *st;
3489 struct scatterlist *sg;
3490 struct sg_page_iter obj_sg_iter;
3493 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3497 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3503 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3504 view->params.partial.offset)
3506 if (st->nents >= view->params.partial.size)
3509 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3510 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3511 sg_dma_len(sg) = PAGE_SIZE;
3522 return ERR_PTR(ret);
3526 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3530 if (vma->ggtt_view.pages)
3533 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3534 vma->ggtt_view.pages = vma->obj->pages;
3535 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3536 vma->ggtt_view.pages =
3537 intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
3538 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3539 vma->ggtt_view.pages =
3540 intel_partial_pages(&vma->ggtt_view, vma->obj);
3542 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3543 vma->ggtt_view.type);
3545 if (!vma->ggtt_view.pages) {
3546 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3547 vma->ggtt_view.type);
3549 } else if (IS_ERR(vma->ggtt_view.pages)) {
3550 ret = PTR_ERR(vma->ggtt_view.pages);
3551 vma->ggtt_view.pages = NULL;
3552 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3553 vma->ggtt_view.type, ret);
3560 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3562 * @cache_level: mapping cache level
3563 * @flags: flags like global or local mapping
3565 * DMA addresses are taken from the scatter-gather table of this object (or of
3566 * this VMA in case of non-default GGTT views) and PTE entries set up.
3567 * Note that DMA addresses are also the only part of the SG table we care about.
3569 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3575 if (WARN_ON(flags == 0))
3579 if (flags & PIN_GLOBAL)
3580 bind_flags |= GLOBAL_BIND;
3581 if (flags & PIN_USER)
3582 bind_flags |= LOCAL_BIND;
3584 if (flags & PIN_UPDATE)
3585 bind_flags |= vma->bound;
3587 bind_flags &= ~vma->bound;
3589 if (bind_flags == 0)
3592 if (vma->bound == 0 && vma->vm->allocate_va_range) {
3593 trace_i915_va_alloc(vma->vm,
3596 VM_TO_TRACE_NAME(vma->vm));
3598 /* XXX: i915_vma_pin() will fix this +- hack */
3600 ret = vma->vm->allocate_va_range(vma->vm,
3608 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
3612 vma->bound |= bind_flags;
3618 * i915_ggtt_view_size - Get the size of a GGTT view.
3619 * @obj: Object the view is of.
3620 * @view: The view in question.
3622 * @return The size of the GGTT view in bytes.
3625 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3626 const struct i915_ggtt_view *view)
3628 if (view->type == I915_GGTT_VIEW_NORMAL) {
3629 return obj->base.size;
3630 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3631 return view->params.rotation_info.size;
3632 } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3633 return view->params.partial.size << PAGE_SHIFT;
3635 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3636 return obj->base.size;