drm/linux: u64 is unsigned long long
[dragonfly.git] / sys / dev / drm / i915 / i915_gem_gtt.c
1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/seq_file.h>
27 #include <drm/drmP.h>
28 #include <drm/i915_drm.h>
29 #include "i915_drv.h"
30 #include "i915_vgpu.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
33
34 #include <linux/bitmap.h>
35
36 #include <sys/mplock2.h>
37
38 /**
39  * DOC: Global GTT views
40  *
41  * Background and previous state
42  *
43  * Historically objects could exists (be bound) in global GTT space only as
44  * singular instances with a view representing all of the object's backing pages
45  * in a linear fashion. This view will be called a normal view.
46  *
47  * To support multiple views of the same object, where the number of mapped
48  * pages is not equal to the backing store, or where the layout of the pages
49  * is not linear, concept of a GGTT view was added.
50  *
51  * One example of an alternative view is a stereo display driven by a single
52  * image. In this case we would have a framebuffer looking like this
53  * (2x2 pages):
54  *
55  *    12
56  *    34
57  *
58  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
59  * rendering. In contrast, fed to the display engine would be an alternative
60  * view which could look something like this:
61  *
62  *   1212
63  *   3434
64  *
65  * In this example both the size and layout of pages in the alternative view is
66  * different from the normal view.
67  *
68  * Implementation and usage
69  *
70  * GGTT views are implemented using VMAs and are distinguished via enum
71  * i915_ggtt_view_type and struct i915_ggtt_view.
72  *
73  * A new flavour of core GEM functions which work with GGTT bound objects were
74  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
75  * renaming  in large amounts of code. They take the struct i915_ggtt_view
76  * parameter encapsulating all metadata required to implement a view.
77  *
78  * As a helper for callers which are only interested in the normal view,
79  * globally const i915_ggtt_view_normal singleton instance exists. All old core
80  * GEM API functions, the ones not taking the view parameter, are operating on,
81  * or with the normal GGTT view.
82  *
83  * Code wanting to add or use a new GGTT view needs to:
84  *
85  * 1. Add a new enum with a suitable name.
86  * 2. Extend the metadata in the i915_ggtt_view structure if required.
87  * 3. Add support to i915_get_vma_pages().
88  *
89  * New views are required to build a scatter-gather table from within the
90  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
91  * exists for the lifetime of an VMA.
92  *
93  * Core API is designed to have copy semantics which means that passed in
94  * struct i915_ggtt_view does not need to be persistent (left around after
95  * calling the core API functions).
96  *
97  */
98
99 static int
100 i915_get_ggtt_vma_pages(struct i915_vma *vma);
101
102 const struct i915_ggtt_view i915_ggtt_view_normal = {
103         .type = I915_GGTT_VIEW_NORMAL,
104 };
105 const struct i915_ggtt_view i915_ggtt_view_rotated = {
106         .type = I915_GGTT_VIEW_ROTATED,
107 };
108
109 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
110 {
111         bool has_aliasing_ppgtt;
112         bool has_full_ppgtt;
113         bool has_full_48bit_ppgtt;
114
115         has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
116         has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
117         has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
118
119         if (intel_vgpu_active(dev))
120                 has_full_ppgtt = false; /* emulation is too hard */
121
122         /*
123          * We don't allow disabling PPGTT for gen9+ as it's a requirement for
124          * execlists, the sole mechanism available to submit work.
125          */
126         if (INTEL_INFO(dev)->gen < 9 &&
127             (enable_ppgtt == 0 || !has_aliasing_ppgtt))
128                 return 0;
129
130         if (enable_ppgtt == 1)
131                 return 1;
132
133         if (enable_ppgtt == 2 && has_full_ppgtt)
134                 return 2;
135
136         if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
137                 return 3;
138
139 #ifdef CONFIG_INTEL_IOMMU
140         /* Disable ppgtt on SNB if VT-d is on. */
141         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
142                 DRM_INFO("Disabling PPGTT because VT-d is on\n");
143                 return 0;
144         }
145 #endif
146
147         /* Early VLV doesn't have this */
148         if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
149                 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
150                 return 0;
151         }
152
153         if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
154                 return has_full_48bit_ppgtt ? 3 : 2;
155         else
156                 return has_aliasing_ppgtt ? 1 : 0;
157 }
158
159 static int ppgtt_bind_vma(struct i915_vma *vma,
160                           enum i915_cache_level cache_level,
161                           u32 unused)
162 {
163         u32 pte_flags = 0;
164
165         /* Currently applicable only to VLV */
166         if (vma->obj->gt_ro)
167                 pte_flags |= PTE_READ_ONLY;
168
169         vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
170                                 cache_level, pte_flags);
171
172         return 0;
173 }
174
175 static void ppgtt_unbind_vma(struct i915_vma *vma)
176 {
177         vma->vm->clear_range(vma->vm,
178                              vma->node.start,
179                              vma->obj->base.size,
180                              true);
181 }
182
183 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
184                                   enum i915_cache_level level,
185                                   bool valid)
186 {
187         gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
188         pte |= addr;
189
190         switch (level) {
191         case I915_CACHE_NONE:
192                 pte |= PPAT_UNCACHED_INDEX;
193                 break;
194         case I915_CACHE_WT:
195                 pte |= PPAT_DISPLAY_ELLC_INDEX;
196                 break;
197         default:
198                 pte |= PPAT_CACHED_INDEX;
199                 break;
200         }
201
202         return pte;
203 }
204
205 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
206                                   const enum i915_cache_level level)
207 {
208         gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
209         pde |= addr;
210         if (level != I915_CACHE_NONE)
211                 pde |= PPAT_CACHED_PDE_INDEX;
212         else
213                 pde |= PPAT_UNCACHED_INDEX;
214         return pde;
215 }
216
217 #define gen8_pdpe_encode gen8_pde_encode
218 #define gen8_pml4e_encode gen8_pde_encode
219
220 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
221                                  enum i915_cache_level level,
222                                  bool valid, u32 unused)
223 {
224         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
225         pte |= GEN6_PTE_ADDR_ENCODE(addr);
226
227         switch (level) {
228         case I915_CACHE_L3_LLC:
229         case I915_CACHE_LLC:
230                 pte |= GEN6_PTE_CACHE_LLC;
231                 break;
232         case I915_CACHE_NONE:
233                 pte |= GEN6_PTE_UNCACHED;
234                 break;
235         default:
236                 MISSING_CASE(level);
237         }
238
239         return pte;
240 }
241
242 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
243                                  enum i915_cache_level level,
244                                  bool valid, u32 unused)
245 {
246         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
247         pte |= GEN6_PTE_ADDR_ENCODE(addr);
248
249         switch (level) {
250         case I915_CACHE_L3_LLC:
251                 pte |= GEN7_PTE_CACHE_L3_LLC;
252                 break;
253         case I915_CACHE_LLC:
254                 pte |= GEN6_PTE_CACHE_LLC;
255                 break;
256         case I915_CACHE_NONE:
257                 pte |= GEN6_PTE_UNCACHED;
258                 break;
259         default:
260                 MISSING_CASE(level);
261         }
262
263         return pte;
264 }
265
266 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
267                                  enum i915_cache_level level,
268                                  bool valid, u32 flags)
269 {
270         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
271         pte |= GEN6_PTE_ADDR_ENCODE(addr);
272
273         if (!(flags & PTE_READ_ONLY))
274                 pte |= BYT_PTE_WRITEABLE;
275
276         if (level != I915_CACHE_NONE)
277                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
278
279         return pte;
280 }
281
282 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
283                                  enum i915_cache_level level,
284                                  bool valid, u32 unused)
285 {
286         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
287         pte |= HSW_PTE_ADDR_ENCODE(addr);
288
289         if (level != I915_CACHE_NONE)
290                 pte |= HSW_WB_LLC_AGE3;
291
292         return pte;
293 }
294
295 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
296                                   enum i915_cache_level level,
297                                   bool valid, u32 unused)
298 {
299         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
300         pte |= HSW_PTE_ADDR_ENCODE(addr);
301
302         switch (level) {
303         case I915_CACHE_NONE:
304                 break;
305         case I915_CACHE_WT:
306                 pte |= HSW_WT_ELLC_LLC_AGE3;
307                 break;
308         default:
309                 pte |= HSW_WB_ELLC_LLC_AGE3;
310                 break;
311         }
312
313         return pte;
314 }
315
316 static int __setup_page_dma(struct drm_device *dev,
317                             struct i915_page_dma *p, gfp_t flags)
318 {
319         struct device *device = &dev->pdev->dev;
320
321         p->page = alloc_page(flags);
322         if (!p->page)
323                 return -ENOMEM;
324
325         p->daddr = dma_map_page(device,
326                                 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
327
328         if (dma_mapping_error(device, p->daddr)) {
329                 __free_page(p->page);
330                 return -EINVAL;
331         }
332
333         return 0;
334 }
335
336 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
337 {
338         return __setup_page_dma(dev, p, GFP_KERNEL);
339 }
340
341 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
342 {
343         if (WARN_ON(!p->page))
344                 return;
345
346         dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
347         __free_page(p->page);
348         memset(p, 0, sizeof(*p));
349 }
350
351 static void *kmap_page_dma(struct i915_page_dma *p)
352 {
353         return kmap_atomic(p->page);
354 }
355
356 /* We use the flushing unmap only with ppgtt structures:
357  * page directories, page tables and scratch pages.
358  */
359 static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
360 {
361         /* There are only few exceptions for gen >=6. chv and bxt.
362          * And we are not sure about the latter so play safe for now.
363          */
364         if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
365                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
366
367         kunmap_atomic(vaddr);
368 }
369
370 #define kmap_px(px) kmap_page_dma(px_base(px))
371 #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
372
373 #define setup_px(dev, px) setup_page_dma((dev), px_base(px))
374 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
375 #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
376 #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
377
378 static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
379                           const uint64_t val)
380 {
381         int i;
382         uint64_t * const vaddr = kmap_page_dma(p);
383
384         for (i = 0; i < 512; i++)
385                 vaddr[i] = val;
386
387         kunmap_page_dma(dev, vaddr);
388 }
389
390 static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
391                              const uint32_t val32)
392 {
393         uint64_t v = val32;
394
395         v = v << 32 | val32;
396
397         fill_page_dma(dev, p, v);
398 }
399
400 static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
401 {
402         struct i915_page_scratch *sp;
403         int ret;
404
405         sp = kzalloc(sizeof(*sp), GFP_KERNEL);
406         if (sp == NULL)
407                 return ERR_PTR(-ENOMEM);
408
409         ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
410         if (ret) {
411                 kfree(sp);
412                 return ERR_PTR(ret);
413         }
414
415         set_pages_uc(px_page(sp), 1);
416
417         return sp;
418 }
419
420 static void free_scratch_page(struct drm_device *dev,
421                               struct i915_page_scratch *sp)
422 {
423         set_pages_wb(px_page(sp), 1);
424
425         cleanup_px(dev, sp);
426         kfree(sp);
427 }
428
429 static struct i915_page_table *alloc_pt(struct drm_device *dev)
430 {
431         struct i915_page_table *pt;
432         const size_t count = INTEL_INFO(dev)->gen >= 8 ?
433                 GEN8_PTES : GEN6_PTES;
434         int ret = -ENOMEM;
435
436         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
437         if (!pt)
438                 return ERR_PTR(-ENOMEM);
439
440         pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
441                                 GFP_KERNEL);
442
443         if (!pt->used_ptes)
444                 goto fail_bitmap;
445
446         ret = setup_px(dev, pt);
447         if (ret)
448                 goto fail_page_m;
449
450         return pt;
451
452 fail_page_m:
453         kfree(pt->used_ptes);
454 fail_bitmap:
455         kfree(pt);
456
457         return ERR_PTR(ret);
458 }
459
460 static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
461 {
462         cleanup_px(dev, pt);
463         kfree(pt->used_ptes);
464         kfree(pt);
465 }
466
467 static void gen8_initialize_pt(struct i915_address_space *vm,
468                                struct i915_page_table *pt)
469 {
470         gen8_pte_t scratch_pte;
471
472         scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
473                                       I915_CACHE_LLC, true);
474
475         fill_px(vm->dev, pt, scratch_pte);
476 }
477
478 static void gen6_initialize_pt(struct i915_address_space *vm,
479                                struct i915_page_table *pt)
480 {
481         gen6_pte_t scratch_pte;
482
483         WARN_ON(px_dma(vm->scratch_page) == 0);
484
485         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
486                                      I915_CACHE_LLC, true, 0);
487
488         fill32_px(vm->dev, pt, scratch_pte);
489 }
490
491 static struct i915_page_directory *alloc_pd(struct drm_device *dev)
492 {
493         struct i915_page_directory *pd;
494         int ret = -ENOMEM;
495
496         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
497         if (!pd)
498                 return ERR_PTR(-ENOMEM);
499
500         pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
501                                 sizeof(*pd->used_pdes), GFP_KERNEL);
502         if (!pd->used_pdes)
503                 goto fail_bitmap;
504
505         ret = setup_px(dev, pd);
506         if (ret)
507                 goto fail_page_m;
508
509         return pd;
510
511 fail_page_m:
512         kfree(pd->used_pdes);
513 fail_bitmap:
514         kfree(pd);
515
516         return ERR_PTR(ret);
517 }
518
519 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
520 {
521         if (px_page(pd)) {
522                 cleanup_px(dev, pd);
523                 kfree(pd->used_pdes);
524                 kfree(pd);
525         }
526 }
527
528 static void gen8_initialize_pd(struct i915_address_space *vm,
529                                struct i915_page_directory *pd)
530 {
531         gen8_pde_t scratch_pde;
532
533         scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
534
535         fill_px(vm->dev, pd, scratch_pde);
536 }
537
538 static int __pdp_init(struct drm_device *dev,
539                       struct i915_page_directory_pointer *pdp)
540 {
541         size_t pdpes = I915_PDPES_PER_PDP(dev);
542
543         pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
544                                   sizeof(unsigned long),
545                                   GFP_KERNEL);
546         if (!pdp->used_pdpes)
547                 return -ENOMEM;
548
549         pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
550                                       GFP_KERNEL);
551         if (!pdp->page_directory) {
552                 kfree(pdp->used_pdpes);
553                 /* the PDP might be the statically allocated top level. Keep it
554                  * as clean as possible */
555                 pdp->used_pdpes = NULL;
556                 return -ENOMEM;
557         }
558
559         return 0;
560 }
561
562 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
563 {
564         kfree(pdp->used_pdpes);
565         kfree(pdp->page_directory);
566         pdp->page_directory = NULL;
567 }
568
569 static struct
570 i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
571 {
572         struct i915_page_directory_pointer *pdp;
573         int ret = -ENOMEM;
574
575         WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
576
577         pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
578         if (!pdp)
579                 return ERR_PTR(-ENOMEM);
580
581         ret = __pdp_init(dev, pdp);
582         if (ret)
583                 goto fail_bitmap;
584
585         ret = setup_px(dev, pdp);
586         if (ret)
587                 goto fail_page_m;
588
589         return pdp;
590
591 fail_page_m:
592         __pdp_fini(pdp);
593 fail_bitmap:
594         kfree(pdp);
595
596         return ERR_PTR(ret);
597 }
598
599 static void free_pdp(struct drm_device *dev,
600                      struct i915_page_directory_pointer *pdp)
601 {
602         __pdp_fini(pdp);
603         if (USES_FULL_48BIT_PPGTT(dev)) {
604                 cleanup_px(dev, pdp);
605                 kfree(pdp);
606         }
607 }
608
609 static void gen8_initialize_pdp(struct i915_address_space *vm,
610                                 struct i915_page_directory_pointer *pdp)
611 {
612         gen8_ppgtt_pdpe_t scratch_pdpe;
613
614         scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
615
616         fill_px(vm->dev, pdp, scratch_pdpe);
617 }
618
619 static void gen8_initialize_pml4(struct i915_address_space *vm,
620                                  struct i915_pml4 *pml4)
621 {
622         gen8_ppgtt_pml4e_t scratch_pml4e;
623
624         scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
625                                           I915_CACHE_LLC);
626
627         fill_px(vm->dev, pml4, scratch_pml4e);
628 }
629
630 static void
631 gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
632                           struct i915_page_directory_pointer *pdp,
633                           struct i915_page_directory *pd,
634                           int index)
635 {
636         gen8_ppgtt_pdpe_t *page_directorypo;
637
638         if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
639                 return;
640
641         page_directorypo = kmap_px(pdp);
642         page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
643         kunmap_px(ppgtt, page_directorypo);
644 }
645
646 static void
647 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
648                                   struct i915_pml4 *pml4,
649                                   struct i915_page_directory_pointer *pdp,
650                                   int index)
651 {
652         gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
653
654         WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
655         pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
656         kunmap_px(ppgtt, pagemap);
657 }
658
659 /* Broadwell Page Directory Pointer Descriptors */
660 static int gen8_write_pdp(struct drm_i915_gem_request *req,
661                           unsigned entry,
662                           dma_addr_t addr)
663 {
664         struct intel_engine_cs *ring = req->ring;
665         int ret;
666
667         BUG_ON(entry >= 4);
668
669         ret = intel_ring_begin(req, 6);
670         if (ret)
671                 return ret;
672
673         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
674         intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
675         intel_ring_emit(ring, upper_32_bits(addr));
676         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
677         intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
678         intel_ring_emit(ring, lower_32_bits(addr));
679         intel_ring_advance(ring);
680
681         return 0;
682 }
683
684 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
685                                  struct drm_i915_gem_request *req)
686 {
687         int i, ret;
688
689         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
690                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
691
692                 ret = gen8_write_pdp(req, i, pd_daddr);
693                 if (ret)
694                         return ret;
695         }
696
697         return 0;
698 }
699
700 static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
701                               struct drm_i915_gem_request *req)
702 {
703         return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
704 }
705
706 static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
707                                        struct i915_page_directory_pointer *pdp,
708                                        uint64_t start,
709                                        uint64_t length,
710                                        gen8_pte_t scratch_pte)
711 {
712         struct i915_hw_ppgtt *ppgtt =
713                 container_of(vm, struct i915_hw_ppgtt, base);
714         gen8_pte_t *pt_vaddr;
715         unsigned pdpe = gen8_pdpe_index(start);
716         unsigned pde = gen8_pde_index(start);
717         unsigned pte = gen8_pte_index(start);
718         unsigned num_entries = length >> PAGE_SHIFT;
719         unsigned last_pte, i;
720
721         if (WARN_ON(!pdp))
722                 return;
723
724         while (num_entries) {
725                 struct i915_page_directory *pd;
726                 struct i915_page_table *pt;
727
728                 if (WARN_ON(!pdp->page_directory[pdpe]))
729                         break;
730
731                 pd = pdp->page_directory[pdpe];
732
733                 if (WARN_ON(!pd->page_table[pde]))
734                         break;
735
736                 pt = pd->page_table[pde];
737
738                 if (WARN_ON(!px_page(pt)))
739                         break;
740
741                 last_pte = pte + num_entries;
742                 if (last_pte > GEN8_PTES)
743                         last_pte = GEN8_PTES;
744
745                 pt_vaddr = kmap_px(pt);
746
747                 for (i = pte; i < last_pte; i++) {
748                         pt_vaddr[i] = scratch_pte;
749                         num_entries--;
750                 }
751
752                 kunmap_px(ppgtt, pt);
753
754                 pte = 0;
755                 if (++pde == I915_PDES) {
756                         if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
757                                 break;
758                         pde = 0;
759                 }
760         }
761 }
762
763 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
764                                    uint64_t start,
765                                    uint64_t length,
766                                    bool use_scratch)
767 {
768         struct i915_hw_ppgtt *ppgtt =
769                 container_of(vm, struct i915_hw_ppgtt, base);
770         gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
771                                                  I915_CACHE_LLC, use_scratch);
772
773         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
774                 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
775                                            scratch_pte);
776         } else {
777                 uint64_t pml4e;
778                 struct i915_page_directory_pointer *pdp;
779
780                 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
781                         gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
782                                                    scratch_pte);
783                 }
784         }
785 }
786
787 static void
788 gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
789                               struct i915_page_directory_pointer *pdp,
790                               struct sg_page_iter *sg_iter,
791                               uint64_t start,
792                               enum i915_cache_level cache_level)
793 {
794         struct i915_hw_ppgtt *ppgtt =
795                 container_of(vm, struct i915_hw_ppgtt, base);
796         gen8_pte_t *pt_vaddr;
797         unsigned pdpe = gen8_pdpe_index(start);
798         unsigned pde = gen8_pde_index(start);
799         unsigned pte = gen8_pte_index(start);
800
801         pt_vaddr = NULL;
802
803         while (__sg_page_iter_next(sg_iter)) {
804                 if (pt_vaddr == NULL) {
805                         struct i915_page_directory *pd = pdp->page_directory[pdpe];
806                         struct i915_page_table *pt = pd->page_table[pde];
807                         pt_vaddr = kmap_px(pt);
808                 }
809
810                 pt_vaddr[pte] =
811                         gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
812                                         cache_level, true);
813                 if (++pte == GEN8_PTES) {
814                         kunmap_px(ppgtt, pt_vaddr);
815                         pt_vaddr = NULL;
816                         if (++pde == I915_PDES) {
817                                 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
818                                         break;
819                                 pde = 0;
820                         }
821                         pte = 0;
822                 }
823         }
824
825         if (pt_vaddr)
826                 kunmap_px(ppgtt, pt_vaddr);
827 }
828
829 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
830                                       struct sg_table *pages,
831                                       uint64_t start,
832                                       enum i915_cache_level cache_level,
833                                       u32 unused)
834 {
835         struct i915_hw_ppgtt *ppgtt =
836                 container_of(vm, struct i915_hw_ppgtt, base);
837         struct sg_page_iter sg_iter;
838
839         __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
840
841         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
842                 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
843                                               cache_level);
844         } else {
845                 struct i915_page_directory_pointer *pdp;
846                 uint64_t pml4e;
847                 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
848
849                 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
850                         gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
851                                                       start, cache_level);
852                 }
853         }
854 }
855
856 static void gen8_free_page_tables(struct drm_device *dev,
857                                   struct i915_page_directory *pd)
858 {
859         int i;
860
861         if (!px_page(pd))
862                 return;
863
864         for_each_set_bit(i, pd->used_pdes, I915_PDES) {
865                 if (WARN_ON(!pd->page_table[i]))
866                         continue;
867
868                 free_pt(dev, pd->page_table[i]);
869                 pd->page_table[i] = NULL;
870         }
871 }
872
873 static int gen8_init_scratch(struct i915_address_space *vm)
874 {
875         struct drm_device *dev = vm->dev;
876
877         vm->scratch_page = alloc_scratch_page(dev);
878         if (IS_ERR(vm->scratch_page))
879                 return PTR_ERR(vm->scratch_page);
880
881         vm->scratch_pt = alloc_pt(dev);
882         if (IS_ERR(vm->scratch_pt)) {
883                 free_scratch_page(dev, vm->scratch_page);
884                 return PTR_ERR(vm->scratch_pt);
885         }
886
887         vm->scratch_pd = alloc_pd(dev);
888         if (IS_ERR(vm->scratch_pd)) {
889                 free_pt(dev, vm->scratch_pt);
890                 free_scratch_page(dev, vm->scratch_page);
891                 return PTR_ERR(vm->scratch_pd);
892         }
893
894         if (USES_FULL_48BIT_PPGTT(dev)) {
895                 vm->scratch_pdp = alloc_pdp(dev);
896                 if (IS_ERR(vm->scratch_pdp)) {
897                         free_pd(dev, vm->scratch_pd);
898                         free_pt(dev, vm->scratch_pt);
899                         free_scratch_page(dev, vm->scratch_page);
900                         return PTR_ERR(vm->scratch_pdp);
901                 }
902         }
903
904         gen8_initialize_pt(vm, vm->scratch_pt);
905         gen8_initialize_pd(vm, vm->scratch_pd);
906         if (USES_FULL_48BIT_PPGTT(dev))
907                 gen8_initialize_pdp(vm, vm->scratch_pdp);
908
909         return 0;
910 }
911
912 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
913 {
914         enum vgt_g2v_type msg;
915         struct drm_device *dev = ppgtt->base.dev;
916         struct drm_i915_private *dev_priv = dev->dev_private;
917         int i;
918
919         if (USES_FULL_48BIT_PPGTT(dev)) {
920                 u64 daddr = px_dma(&ppgtt->pml4);
921
922                 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
923                 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
924
925                 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
926                                 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
927         } else {
928                 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
929                         u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
930
931                         I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
932                         I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
933                 }
934
935                 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
936                                 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
937         }
938
939         I915_WRITE(vgtif_reg(g2v_notify), msg);
940
941         return 0;
942 }
943
944 static void gen8_free_scratch(struct i915_address_space *vm)
945 {
946         struct drm_device *dev = vm->dev;
947
948         if (USES_FULL_48BIT_PPGTT(dev))
949                 free_pdp(dev, vm->scratch_pdp);
950         free_pd(dev, vm->scratch_pd);
951         free_pt(dev, vm->scratch_pt);
952         free_scratch_page(dev, vm->scratch_page);
953 }
954
955 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
956                                     struct i915_page_directory_pointer *pdp)
957 {
958         int i;
959
960         for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
961                 if (WARN_ON(!pdp->page_directory[i]))
962                         continue;
963
964                 gen8_free_page_tables(dev, pdp->page_directory[i]);
965                 free_pd(dev, pdp->page_directory[i]);
966         }
967
968         free_pdp(dev, pdp);
969 }
970
971 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
972 {
973         int i;
974
975         for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
976                 if (WARN_ON(!ppgtt->pml4.pdps[i]))
977                         continue;
978
979                 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
980         }
981
982         cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
983 }
984
985 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
986 {
987         struct i915_hw_ppgtt *ppgtt =
988                 container_of(vm, struct i915_hw_ppgtt, base);
989
990         if (intel_vgpu_active(vm->dev))
991                 gen8_ppgtt_notify_vgt(ppgtt, false);
992
993         if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
994                 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
995         else
996                 gen8_ppgtt_cleanup_4lvl(ppgtt);
997
998         gen8_free_scratch(vm);
999 }
1000
1001 /**
1002  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1003  * @vm: Master vm structure.
1004  * @pd: Page directory for this address range.
1005  * @start:      Starting virtual address to begin allocations.
1006  * @length:     Size of the allocations.
1007  * @new_pts:    Bitmap set by function with new allocations. Likely used by the
1008  *              caller to free on error.
1009  *
1010  * Allocate the required number of page tables. Extremely similar to
1011  * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1012  * the page directory boundary (instead of the page directory pointer). That
1013  * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1014  * possible, and likely that the caller will need to use multiple calls of this
1015  * function to achieve the appropriate allocation.
1016  *
1017  * Return: 0 if success; negative error code otherwise.
1018  */
1019 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1020                                      struct i915_page_directory *pd,
1021                                      uint64_t start,
1022                                      uint64_t length,
1023                                      unsigned long *new_pts)
1024 {
1025         struct drm_device *dev = vm->dev;
1026         struct i915_page_table *pt;
1027         uint32_t pde;
1028
1029         gen8_for_each_pde(pt, pd, start, length, pde) {
1030                 /* Don't reallocate page tables */
1031                 if (test_bit(pde, pd->used_pdes)) {
1032                         /* Scratch is never allocated this way */
1033                         WARN_ON(pt == vm->scratch_pt);
1034                         continue;
1035                 }
1036
1037                 pt = alloc_pt(dev);
1038                 if (IS_ERR(pt))
1039                         goto unwind_out;
1040
1041                 gen8_initialize_pt(vm, pt);
1042                 pd->page_table[pde] = pt;
1043                 __set_bit(pde, new_pts);
1044                 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1045         }
1046
1047         return 0;
1048
1049 unwind_out:
1050         for_each_set_bit(pde, new_pts, I915_PDES)
1051                 free_pt(dev, pd->page_table[pde]);
1052
1053         return -ENOMEM;
1054 }
1055
1056 /**
1057  * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1058  * @vm: Master vm structure.
1059  * @pdp:        Page directory pointer for this address range.
1060  * @start:      Starting virtual address to begin allocations.
1061  * @length:     Size of the allocations.
1062  * @new_pds:    Bitmap set by function with new allocations. Likely used by the
1063  *              caller to free on error.
1064  *
1065  * Allocate the required number of page directories starting at the pde index of
1066  * @start, and ending at the pde index @start + @length. This function will skip
1067  * over already allocated page directories within the range, and only allocate
1068  * new ones, setting the appropriate pointer within the pdp as well as the
1069  * correct position in the bitmap @new_pds.
1070  *
1071  * The function will only allocate the pages within the range for a give page
1072  * directory pointer. In other words, if @start + @length straddles a virtually
1073  * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1074  * required by the caller, This is not currently possible, and the BUG in the
1075  * code will prevent it.
1076  *
1077  * Return: 0 if success; negative error code otherwise.
1078  */
1079 static int
1080 gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1081                                   struct i915_page_directory_pointer *pdp,
1082                                   uint64_t start,
1083                                   uint64_t length,
1084                                   unsigned long *new_pds)
1085 {
1086         struct drm_device *dev = vm->dev;
1087         struct i915_page_directory *pd;
1088         uint32_t pdpe;
1089         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1090
1091         WARN_ON(!bitmap_empty(new_pds, pdpes));
1092
1093         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1094                 if (test_bit(pdpe, pdp->used_pdpes))
1095                         continue;
1096
1097                 pd = alloc_pd(dev);
1098                 if (IS_ERR(pd))
1099                         goto unwind_out;
1100
1101                 gen8_initialize_pd(vm, pd);
1102                 pdp->page_directory[pdpe] = pd;
1103                 __set_bit(pdpe, new_pds);
1104                 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
1105         }
1106
1107         return 0;
1108
1109 unwind_out:
1110         for_each_set_bit(pdpe, new_pds, pdpes)
1111                 free_pd(dev, pdp->page_directory[pdpe]);
1112
1113         return -ENOMEM;
1114 }
1115
1116 /**
1117  * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1118  * @vm: Master vm structure.
1119  * @pml4:       Page map level 4 for this address range.
1120  * @start:      Starting virtual address to begin allocations.
1121  * @length:     Size of the allocations.
1122  * @new_pdps:   Bitmap set by function with new allocations. Likely used by the
1123  *              caller to free on error.
1124  *
1125  * Allocate the required number of page directory pointers. Extremely similar to
1126  * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1127  * The main difference is here we are limited by the pml4 boundary (instead of
1128  * the page directory pointer).
1129  *
1130  * Return: 0 if success; negative error code otherwise.
1131  */
1132 static int
1133 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1134                                   struct i915_pml4 *pml4,
1135                                   uint64_t start,
1136                                   uint64_t length,
1137                                   unsigned long *new_pdps)
1138 {
1139         struct drm_device *dev = vm->dev;
1140         struct i915_page_directory_pointer *pdp;
1141         uint32_t pml4e;
1142
1143         WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1144
1145         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1146                 if (!test_bit(pml4e, pml4->used_pml4es)) {
1147                         pdp = alloc_pdp(dev);
1148                         if (IS_ERR(pdp))
1149                                 goto unwind_out;
1150
1151                         gen8_initialize_pdp(vm, pdp);
1152                         pml4->pdps[pml4e] = pdp;
1153                         __set_bit(pml4e, new_pdps);
1154                         trace_i915_page_directory_pointer_entry_alloc(vm,
1155                                                                       pml4e,
1156                                                                       start,
1157                                                                       GEN8_PML4E_SHIFT);
1158                 }
1159         }
1160
1161         return 0;
1162
1163 unwind_out:
1164         for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1165                 free_pdp(dev, pml4->pdps[pml4e]);
1166
1167         return -ENOMEM;
1168 }
1169
1170 static void
1171 free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1172 {
1173         kfree(new_pts);
1174         kfree(new_pds);
1175 }
1176
1177 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1178  * of these are based on the number of PDPEs in the system.
1179  */
1180 static
1181 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1182                                          unsigned long **new_pts,
1183                                          uint32_t pdpes)
1184 {
1185         unsigned long *pds;
1186         unsigned long *pts;
1187
1188         pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1189         if (!pds)
1190                 return -ENOMEM;
1191
1192         pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1193                       GFP_TEMPORARY);
1194         if (!pts)
1195                 goto err_out;
1196
1197         *new_pds = pds;
1198         *new_pts = pts;
1199
1200         return 0;
1201
1202 err_out:
1203         free_gen8_temp_bitmaps(pds, pts);
1204         return -ENOMEM;
1205 }
1206
1207 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1208  * the page table structures, we mark them dirty so that
1209  * context switching/execlist queuing code takes extra steps
1210  * to ensure that tlbs are flushed.
1211  */
1212 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1213 {
1214         ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1215 }
1216
1217 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1218                                     struct i915_page_directory_pointer *pdp,
1219                                     uint64_t start,
1220                                     uint64_t length)
1221 {
1222         struct i915_hw_ppgtt *ppgtt =
1223                 container_of(vm, struct i915_hw_ppgtt, base);
1224         unsigned long *new_page_dirs, *new_page_tables;
1225         struct drm_device *dev = vm->dev;
1226         struct i915_page_directory *pd;
1227         const uint64_t orig_start = start;
1228         const uint64_t orig_length = length;
1229         uint32_t pdpe;
1230         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1231         int ret;
1232
1233         /* Wrap is never okay since we can only represent 48b, and we don't
1234          * actually use the other side of the canonical address space.
1235          */
1236         if (WARN_ON(start + length < start))
1237                 return -ENODEV;
1238
1239         if (WARN_ON(start + length > vm->total))
1240                 return -ENODEV;
1241
1242         ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1243         if (ret)
1244                 return ret;
1245
1246         /* Do the allocations first so we can easily bail out */
1247         ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1248                                                 new_page_dirs);
1249         if (ret) {
1250                 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1251                 return ret;
1252         }
1253
1254         /* For every page directory referenced, allocate page tables */
1255         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1256                 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1257                                                 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1258                 if (ret)
1259                         goto err_out;
1260         }
1261
1262         start = orig_start;
1263         length = orig_length;
1264
1265         /* Allocations have completed successfully, so set the bitmaps, and do
1266          * the mappings. */
1267         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1268                 gen8_pde_t *const page_directory = kmap_px(pd);
1269                 struct i915_page_table *pt;
1270                 uint64_t pd_len = length;
1271                 uint64_t pd_start = start;
1272                 uint32_t pde;
1273
1274                 /* Every pd should be allocated, we just did that above. */
1275                 WARN_ON(!pd);
1276
1277                 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1278                         /* Same reasoning as pd */
1279                         WARN_ON(!pt);
1280                         WARN_ON(!pd_len);
1281                         WARN_ON(!gen8_pte_count(pd_start, pd_len));
1282
1283                         /* Set our used ptes within the page table */
1284                         bitmap_set(pt->used_ptes,
1285                                    gen8_pte_index(pd_start),
1286                                    gen8_pte_count(pd_start, pd_len));
1287
1288                         /* Our pde is now pointing to the pagetable, pt */
1289                         __set_bit(pde, pd->used_pdes);
1290
1291                         /* Map the PDE to the page table */
1292                         page_directory[pde] = gen8_pde_encode(px_dma(pt),
1293                                                               I915_CACHE_LLC);
1294                         trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1295                                                         gen8_pte_index(start),
1296                                                         gen8_pte_count(start, length),
1297                                                         GEN8_PTES);
1298
1299                         /* NB: We haven't yet mapped ptes to pages. At this
1300                          * point we're still relying on insert_entries() */
1301                 }
1302
1303                 kunmap_px(ppgtt, page_directory);
1304                 __set_bit(pdpe, pdp->used_pdpes);
1305                 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
1306         }
1307
1308         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1309         mark_tlbs_dirty(ppgtt);
1310         return 0;
1311
1312 err_out:
1313         while (pdpe--) {
1314                 unsigned long temp;
1315
1316                 for_each_set_bit(temp, new_page_tables + pdpe *
1317                                 BITS_TO_LONGS(I915_PDES), I915_PDES)
1318                         free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1319         }
1320
1321         for_each_set_bit(pdpe, new_page_dirs, pdpes)
1322                 free_pd(dev, pdp->page_directory[pdpe]);
1323
1324         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1325         mark_tlbs_dirty(ppgtt);
1326         return ret;
1327 }
1328
1329 static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1330                                     struct i915_pml4 *pml4,
1331                                     uint64_t start,
1332                                     uint64_t length)
1333 {
1334         DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1335         struct i915_hw_ppgtt *ppgtt =
1336                         container_of(vm, struct i915_hw_ppgtt, base);
1337         struct i915_page_directory_pointer *pdp;
1338         uint64_t pml4e;
1339         int ret = 0;
1340
1341         /* Do the pml4 allocations first, so we don't need to track the newly
1342          * allocated tables below the pdp */
1343         bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1344
1345         /* The pagedirectory and pagetable allocations are done in the shared 3
1346          * and 4 level code. Just allocate the pdps.
1347          */
1348         ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1349                                                 new_pdps);
1350         if (ret)
1351                 return ret;
1352
1353         WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1354              "The allocation has spanned more than 512GB. "
1355              "It is highly likely this is incorrect.");
1356
1357         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1358                 WARN_ON(!pdp);
1359
1360                 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1361                 if (ret)
1362                         goto err_out;
1363
1364                 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1365         }
1366
1367         bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1368                   GEN8_PML4ES_PER_PML4);
1369
1370         return 0;
1371
1372 err_out:
1373         for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1374                 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1375
1376         return ret;
1377 }
1378
1379 static int gen8_alloc_va_range(struct i915_address_space *vm,
1380                                uint64_t start, uint64_t length)
1381 {
1382         struct i915_hw_ppgtt *ppgtt =
1383                 container_of(vm, struct i915_hw_ppgtt, base);
1384
1385         if (USES_FULL_48BIT_PPGTT(vm->dev))
1386                 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1387         else
1388                 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1389 }
1390
1391 static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1392                           uint64_t start, uint64_t length,
1393                           gen8_pte_t scratch_pte,
1394                           struct seq_file *m)
1395 {
1396         struct i915_page_directory *pd;
1397         uint32_t pdpe;
1398
1399         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1400                 struct i915_page_table *pt;
1401                 uint64_t pd_len = length;
1402                 uint64_t pd_start = start;
1403                 uint32_t pde;
1404
1405                 if (!test_bit(pdpe, pdp->used_pdpes))
1406                         continue;
1407
1408                 seq_printf(m, "\tPDPE #%d\n", pdpe);
1409                 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1410                         uint32_t  pte;
1411                         gen8_pte_t *pt_vaddr;
1412
1413                         if (!test_bit(pde, pd->used_pdes))
1414                                 continue;
1415
1416                         pt_vaddr = kmap_px(pt);
1417                         for (pte = 0; pte < GEN8_PTES; pte += 4) {
1418                                 uint64_t va =
1419                                         (pdpe << GEN8_PDPE_SHIFT) |
1420                                         (pde << GEN8_PDE_SHIFT) |
1421                                         (pte << GEN8_PTE_SHIFT);
1422                                 int i;
1423                                 bool found = false;
1424
1425                                 for (i = 0; i < 4; i++)
1426                                         if (pt_vaddr[pte + i] != scratch_pte)
1427                                                 found = true;
1428                                 if (!found)
1429                                         continue;
1430
1431                                 seq_printf(m, "\t\t0x%lx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1432                                 for (i = 0; i < 4; i++) {
1433                                         if (pt_vaddr[pte + i] != scratch_pte)
1434                                                 seq_printf(m, " %lx", pt_vaddr[pte + i]);
1435                                         else
1436                                                 seq_puts(m, "  SCRATCH ");
1437                                 }
1438                                 seq_puts(m, "\n");
1439                         }
1440                         /* don't use kunmap_px, it could trigger
1441                          * an unnecessary flush.
1442                          */
1443                         kunmap_atomic(pt_vaddr);
1444                 }
1445         }
1446 }
1447
1448 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1449 {
1450         struct i915_address_space *vm = &ppgtt->base;
1451         uint64_t start = ppgtt->base.start;
1452         uint64_t length = ppgtt->base.total;
1453         gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1454                                                  I915_CACHE_LLC, true);
1455
1456         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1457                 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1458         } else {
1459                 uint64_t pml4e;
1460                 struct i915_pml4 *pml4 = &ppgtt->pml4;
1461                 struct i915_page_directory_pointer *pdp;
1462
1463                 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1464                         if (!test_bit(pml4e, pml4->used_pml4es))
1465                                 continue;
1466
1467                         seq_printf(m, "    PML4E #%lu\n", pml4e);
1468                         gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1469                 }
1470         }
1471 }
1472
1473 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1474 {
1475         unsigned long *new_page_dirs, *new_page_tables;
1476         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1477         int ret;
1478
1479         /* We allocate temp bitmap for page tables for no gain
1480          * but as this is for init only, lets keep the things simple
1481          */
1482         ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1483         if (ret)
1484                 return ret;
1485
1486         /* Allocate for all pdps regardless of how the ppgtt
1487          * was defined.
1488          */
1489         ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1490                                                 0, 1ULL << 32,
1491                                                 new_page_dirs);
1492         if (!ret)
1493                 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1494
1495         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1496
1497         return ret;
1498 }
1499
1500 /*
1501  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1502  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1503  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1504  * space.
1505  *
1506  */
1507 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1508 {
1509         int ret;
1510
1511         ret = gen8_init_scratch(&ppgtt->base);
1512         if (ret)
1513                 return ret;
1514
1515         ppgtt->base.start = 0;
1516         ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1517         ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1518         ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1519         ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1520         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1521         ppgtt->base.bind_vma = ppgtt_bind_vma;
1522         ppgtt->debug_dump = gen8_dump_ppgtt;
1523
1524         if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1525                 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1526                 if (ret)
1527                         goto free_scratch;
1528
1529                 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1530
1531                 ppgtt->base.total = 1ULL << 48;
1532                 ppgtt->switch_mm = gen8_48b_mm_switch;
1533         } else {
1534                 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1535                 if (ret)
1536                         goto free_scratch;
1537
1538                 ppgtt->base.total = 1ULL << 32;
1539                 ppgtt->switch_mm = gen8_legacy_mm_switch;
1540                 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1541                                                               0, 0,
1542                                                               GEN8_PML4E_SHIFT);
1543
1544                 if (intel_vgpu_active(ppgtt->base.dev)) {
1545                         ret = gen8_preallocate_top_level_pdps(ppgtt);
1546                         if (ret)
1547                                 goto free_scratch;
1548                 }
1549         }
1550
1551         if (intel_vgpu_active(ppgtt->base.dev))
1552                 gen8_ppgtt_notify_vgt(ppgtt, true);
1553
1554         return 0;
1555
1556 free_scratch:
1557         gen8_free_scratch(&ppgtt->base);
1558         return ret;
1559 }
1560
1561 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1562 {
1563         struct i915_address_space *vm = &ppgtt->base;
1564         struct i915_page_table *unused;
1565         gen6_pte_t scratch_pte;
1566         uint32_t pd_entry;
1567         uint32_t  pte, pde, temp;
1568         uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1569
1570         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1571                                      I915_CACHE_LLC, true, 0);
1572
1573         gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
1574                 u32 expected;
1575                 gen6_pte_t *pt_vaddr;
1576                 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1577                 pd_entry = readl(ppgtt->pd_addr + pde);
1578                 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1579
1580                 if (pd_entry != expected)
1581                         seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1582                                    pde,
1583                                    pd_entry,
1584                                    expected);
1585                 seq_printf(m, "\tPDE: %x\n", pd_entry);
1586
1587                 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1588
1589                 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1590                         unsigned long va =
1591                                 (pde * PAGE_SIZE * GEN6_PTES) +
1592                                 (pte * PAGE_SIZE);
1593                         int i;
1594                         bool found = false;
1595                         for (i = 0; i < 4; i++)
1596                                 if (pt_vaddr[pte + i] != scratch_pte)
1597                                         found = true;
1598                         if (!found)
1599                                 continue;
1600
1601                         seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1602                         for (i = 0; i < 4; i++) {
1603                                 if (pt_vaddr[pte + i] != scratch_pte)
1604                                         seq_printf(m, " %08x", pt_vaddr[pte + i]);
1605                                 else
1606                                         seq_puts(m, "  SCRATCH ");
1607                         }
1608                         seq_puts(m, "\n");
1609                 }
1610                 kunmap_px(ppgtt, pt_vaddr);
1611         }
1612 }
1613
1614 /* Write pde (index) from the page directory @pd to the page table @pt */
1615 static void gen6_write_pde(struct i915_page_directory *pd,
1616                             const int pde, struct i915_page_table *pt)
1617 {
1618         /* Caller needs to make sure the write completes if necessary */
1619         struct i915_hw_ppgtt *ppgtt =
1620                 container_of(pd, struct i915_hw_ppgtt, pd);
1621         u32 pd_entry;
1622
1623         pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1624         pd_entry |= GEN6_PDE_VALID;
1625
1626         writel(pd_entry, ppgtt->pd_addr + pde);
1627 }
1628
1629 /* Write all the page tables found in the ppgtt structure to incrementing page
1630  * directories. */
1631 static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1632                                   struct i915_page_directory *pd,
1633                                   uint32_t start, uint32_t length)
1634 {
1635         struct i915_page_table *pt;
1636         uint32_t pde, temp;
1637
1638         gen6_for_each_pde(pt, pd, start, length, temp, pde)
1639                 gen6_write_pde(pd, pde, pt);
1640
1641         /* Make sure write is complete before other code can use this page
1642          * table. Also require for WC mapped PTEs */
1643         readl(dev_priv->gtt.gsm);
1644 }
1645
1646 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1647 {
1648         BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1649
1650         return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1651 }
1652
1653 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1654                          struct drm_i915_gem_request *req)
1655 {
1656         struct intel_engine_cs *ring = req->ring;
1657         int ret;
1658
1659         /* NB: TLBs must be flushed and invalidated before a switch */
1660         ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1661         if (ret)
1662                 return ret;
1663
1664         ret = intel_ring_begin(req, 6);
1665         if (ret)
1666                 return ret;
1667
1668         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1669         intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1670         intel_ring_emit(ring, PP_DIR_DCLV_2G);
1671         intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1672         intel_ring_emit(ring, get_pd_offset(ppgtt));
1673         intel_ring_emit(ring, MI_NOOP);
1674         intel_ring_advance(ring);
1675
1676         return 0;
1677 }
1678
1679 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1680                           struct drm_i915_gem_request *req)
1681 {
1682         struct intel_engine_cs *ring = req->ring;
1683         struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1684
1685         I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1686         I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1687         return 0;
1688 }
1689
1690 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1691                           struct drm_i915_gem_request *req)
1692 {
1693         struct intel_engine_cs *ring = req->ring;
1694         int ret;
1695
1696         /* NB: TLBs must be flushed and invalidated before a switch */
1697         ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1698         if (ret)
1699                 return ret;
1700
1701         ret = intel_ring_begin(req, 6);
1702         if (ret)
1703                 return ret;
1704
1705         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1706         intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1707         intel_ring_emit(ring, PP_DIR_DCLV_2G);
1708         intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1709         intel_ring_emit(ring, get_pd_offset(ppgtt));
1710         intel_ring_emit(ring, MI_NOOP);
1711         intel_ring_advance(ring);
1712
1713         /* XXX: RCS is the only one to auto invalidate the TLBs? */
1714         if (ring->id != RCS) {
1715                 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1716                 if (ret)
1717                         return ret;
1718         }
1719
1720         return 0;
1721 }
1722
1723 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1724                           struct drm_i915_gem_request *req)
1725 {
1726         struct intel_engine_cs *ring = req->ring;
1727         struct drm_device *dev = ppgtt->base.dev;
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729
1730
1731         I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1732         I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1733
1734         POSTING_READ(RING_PP_DIR_DCLV(ring));
1735
1736         return 0;
1737 }
1738
1739 static void gen8_ppgtt_enable(struct drm_device *dev)
1740 {
1741         struct drm_i915_private *dev_priv = dev->dev_private;
1742         struct intel_engine_cs *ring;
1743         int j;
1744
1745         for_each_ring(ring, dev_priv, j) {
1746                 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1747                 I915_WRITE(RING_MODE_GEN7(ring),
1748                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1749         }
1750 }
1751
1752 static void gen7_ppgtt_enable(struct drm_device *dev)
1753 {
1754         struct drm_i915_private *dev_priv = dev->dev_private;
1755         struct intel_engine_cs *ring;
1756         uint32_t ecochk, ecobits;
1757         int i;
1758
1759         ecobits = I915_READ(GAC_ECO_BITS);
1760         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1761
1762         ecochk = I915_READ(GAM_ECOCHK);
1763         if (IS_HASWELL(dev)) {
1764                 ecochk |= ECOCHK_PPGTT_WB_HSW;
1765         } else {
1766                 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1767                 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1768         }
1769         I915_WRITE(GAM_ECOCHK, ecochk);
1770
1771         for_each_ring(ring, dev_priv, i) {
1772                 /* GFX_MODE is per-ring on gen7+ */
1773                 I915_WRITE(RING_MODE_GEN7(ring),
1774                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1775         }
1776 }
1777
1778 static void gen6_ppgtt_enable(struct drm_device *dev)
1779 {
1780         struct drm_i915_private *dev_priv = dev->dev_private;
1781         uint32_t ecochk, gab_ctl, ecobits;
1782
1783         ecobits = I915_READ(GAC_ECO_BITS);
1784         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1785                    ECOBITS_PPGTT_CACHE64B);
1786
1787         gab_ctl = I915_READ(GAB_CTL);
1788         I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1789
1790         ecochk = I915_READ(GAM_ECOCHK);
1791         I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1792
1793         I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1794 }
1795
1796 /* PPGTT support for Sandybdrige/Gen6 and later */
1797 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1798                                    uint64_t start,
1799                                    uint64_t length,
1800                                    bool use_scratch)
1801 {
1802         struct i915_hw_ppgtt *ppgtt =
1803                 container_of(vm, struct i915_hw_ppgtt, base);
1804         gen6_pte_t *pt_vaddr, scratch_pte;
1805         unsigned first_entry = start >> PAGE_SHIFT;
1806         unsigned num_entries = length >> PAGE_SHIFT;
1807         unsigned act_pt = first_entry / GEN6_PTES;
1808         unsigned first_pte = first_entry % GEN6_PTES;
1809         unsigned last_pte, i;
1810
1811         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1812                                      I915_CACHE_LLC, true, 0);
1813
1814         while (num_entries) {
1815                 last_pte = first_pte + num_entries;
1816                 if (last_pte > GEN6_PTES)
1817                         last_pte = GEN6_PTES;
1818
1819                 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1820
1821                 for (i = first_pte; i < last_pte; i++)
1822                         pt_vaddr[i] = scratch_pte;
1823
1824                 kunmap_px(ppgtt, pt_vaddr);
1825
1826                 num_entries -= last_pte - first_pte;
1827                 first_pte = 0;
1828                 act_pt++;
1829         }
1830 }
1831
1832 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1833                                       struct sg_table *pages,
1834                                       uint64_t start,
1835                                       enum i915_cache_level cache_level, u32 flags)
1836 {
1837         struct i915_hw_ppgtt *ppgtt =
1838                 container_of(vm, struct i915_hw_ppgtt, base);
1839         gen6_pte_t *pt_vaddr;
1840         unsigned first_entry = start >> PAGE_SHIFT;
1841         unsigned act_pt = first_entry / GEN6_PTES;
1842         unsigned act_pte = first_entry % GEN6_PTES;
1843         struct sg_page_iter sg_iter;
1844
1845         pt_vaddr = NULL;
1846         for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1847                 if (pt_vaddr == NULL)
1848                         pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1849
1850                 pt_vaddr[act_pte] =
1851                         vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1852                                        cache_level, true, flags);
1853
1854                 if (++act_pte == GEN6_PTES) {
1855                         kunmap_px(ppgtt, pt_vaddr);
1856                         pt_vaddr = NULL;
1857                         act_pt++;
1858                         act_pte = 0;
1859                 }
1860         }
1861         if (pt_vaddr)
1862                 kunmap_px(ppgtt, pt_vaddr);
1863 }
1864
1865 static int gen6_alloc_va_range(struct i915_address_space *vm,
1866                                uint64_t start_in, uint64_t length_in)
1867 {
1868         DECLARE_BITMAP(new_page_tables, I915_PDES);
1869         struct drm_device *dev = vm->dev;
1870         struct drm_i915_private *dev_priv = dev->dev_private;
1871         struct i915_hw_ppgtt *ppgtt =
1872                                 container_of(vm, struct i915_hw_ppgtt, base);
1873         struct i915_page_table *pt;
1874         uint32_t start, length, start_save, length_save;
1875         uint32_t pde, temp;
1876         int ret;
1877
1878         if (WARN_ON(start_in + length_in > ppgtt->base.total))
1879                 return -ENODEV;
1880
1881         start = start_save = start_in;
1882         length = length_save = length_in;
1883
1884         bitmap_zero(new_page_tables, I915_PDES);
1885
1886         /* The allocation is done in two stages so that we can bail out with
1887          * minimal amount of pain. The first stage finds new page tables that
1888          * need allocation. The second stage marks use ptes within the page
1889          * tables.
1890          */
1891         gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1892                 if (pt != vm->scratch_pt) {
1893                         WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1894                         continue;
1895                 }
1896
1897                 /* We've already allocated a page table */
1898                 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1899
1900                 pt = alloc_pt(dev);
1901                 if (IS_ERR(pt)) {
1902                         ret = PTR_ERR(pt);
1903                         goto unwind_out;
1904                 }
1905
1906                 gen6_initialize_pt(vm, pt);
1907
1908                 ppgtt->pd.page_table[pde] = pt;
1909                 __set_bit(pde, new_page_tables);
1910                 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1911         }
1912
1913         start = start_save;
1914         length = length_save;
1915
1916         gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1917                 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1918
1919                 bitmap_zero(tmp_bitmap, GEN6_PTES);
1920                 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1921                            gen6_pte_count(start, length));
1922
1923                 if (__test_and_clear_bit(pde, new_page_tables))
1924                         gen6_write_pde(&ppgtt->pd, pde, pt);
1925
1926                 trace_i915_page_table_entry_map(vm, pde, pt,
1927                                          gen6_pte_index(start),
1928                                          gen6_pte_count(start, length),
1929                                          GEN6_PTES);
1930                 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1931                                 GEN6_PTES);
1932         }
1933
1934         WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1935
1936         /* Make sure write is complete before other code can use this page
1937          * table. Also require for WC mapped PTEs */
1938         readl(dev_priv->gtt.gsm);
1939
1940         mark_tlbs_dirty(ppgtt);
1941         return 0;
1942
1943 unwind_out:
1944         for_each_set_bit(pde, new_page_tables, I915_PDES) {
1945                 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1946
1947                 ppgtt->pd.page_table[pde] = vm->scratch_pt;
1948                 free_pt(vm->dev, pt);
1949         }
1950
1951         mark_tlbs_dirty(ppgtt);
1952         return ret;
1953 }
1954
1955 static int gen6_init_scratch(struct i915_address_space *vm)
1956 {
1957         struct drm_device *dev = vm->dev;
1958
1959         vm->scratch_page = alloc_scratch_page(dev);
1960         if (IS_ERR(vm->scratch_page))
1961                 return PTR_ERR(vm->scratch_page);
1962
1963         vm->scratch_pt = alloc_pt(dev);
1964         if (IS_ERR(vm->scratch_pt)) {
1965                 free_scratch_page(dev, vm->scratch_page);
1966                 return PTR_ERR(vm->scratch_pt);
1967         }
1968
1969         gen6_initialize_pt(vm, vm->scratch_pt);
1970
1971         return 0;
1972 }
1973
1974 static void gen6_free_scratch(struct i915_address_space *vm)
1975 {
1976         struct drm_device *dev = vm->dev;
1977
1978         free_pt(dev, vm->scratch_pt);
1979         free_scratch_page(dev, vm->scratch_page);
1980 }
1981
1982 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1983 {
1984         struct i915_hw_ppgtt *ppgtt =
1985                 container_of(vm, struct i915_hw_ppgtt, base);
1986         struct i915_page_table *pt;
1987         uint32_t pde;
1988
1989         drm_mm_remove_node(&ppgtt->node);
1990
1991         gen6_for_all_pdes(pt, ppgtt, pde) {
1992                 if (pt != vm->scratch_pt)
1993                         free_pt(ppgtt->base.dev, pt);
1994         }
1995
1996         gen6_free_scratch(vm);
1997 }
1998
1999 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2000 {
2001         struct i915_address_space *vm = &ppgtt->base;
2002         struct drm_device *dev = ppgtt->base.dev;
2003         struct drm_i915_private *dev_priv = dev->dev_private;
2004         bool retried = false;
2005         int ret;
2006
2007         /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2008          * allocator works in address space sizes, so it's multiplied by page
2009          * size. We allocate at the top of the GTT to avoid fragmentation.
2010          */
2011         BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
2012
2013         ret = gen6_init_scratch(vm);
2014         if (ret)
2015                 return ret;
2016
2017 alloc:
2018         ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
2019                                                   &ppgtt->node, GEN6_PD_SIZE,
2020                                                   GEN6_PD_ALIGN, 0,
2021                                                   0, dev_priv->gtt.base.total,
2022                                                   DRM_MM_TOPDOWN);
2023         if (ret == -ENOSPC && !retried) {
2024                 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
2025                                                GEN6_PD_SIZE, GEN6_PD_ALIGN,
2026                                                I915_CACHE_NONE,
2027                                                0, dev_priv->gtt.base.total,
2028                                                0);
2029                 if (ret)
2030                         goto err_out;
2031
2032                 retried = true;
2033                 goto alloc;
2034         }
2035
2036         if (ret)
2037                 goto err_out;
2038
2039
2040         if (ppgtt->node.start < dev_priv->gtt.mappable_end)
2041                 DRM_DEBUG("Forced to use aperture for PDEs\n");
2042
2043         return 0;
2044
2045 err_out:
2046         gen6_free_scratch(vm);
2047         return ret;
2048 }
2049
2050 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2051 {
2052         return gen6_ppgtt_allocate_page_directories(ppgtt);
2053 }
2054
2055 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2056                                   uint64_t start, uint64_t length)
2057 {
2058         struct i915_page_table *unused;
2059         uint32_t pde, temp;
2060
2061         gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
2062                 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2063 }
2064
2065 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2066 {
2067         struct drm_device *dev = ppgtt->base.dev;
2068         struct drm_i915_private *dev_priv = dev->dev_private;
2069         int ret;
2070
2071         ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
2072         if (IS_GEN6(dev)) {
2073                 ppgtt->switch_mm = gen6_mm_switch;
2074         } else if (IS_HASWELL(dev)) {
2075                 ppgtt->switch_mm = hsw_mm_switch;
2076         } else if (IS_GEN7(dev)) {
2077                 ppgtt->switch_mm = gen7_mm_switch;
2078         } else
2079                 BUG();
2080
2081         if (intel_vgpu_active(dev))
2082                 ppgtt->switch_mm = vgpu_mm_switch;
2083
2084         ret = gen6_ppgtt_alloc(ppgtt);
2085         if (ret)
2086                 return ret;
2087
2088         ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2089         ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2090         ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2091         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2092         ppgtt->base.bind_vma = ppgtt_bind_vma;
2093         ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2094         ppgtt->base.start = 0;
2095         ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2096         ppgtt->debug_dump = gen6_dump_ppgtt;
2097
2098         ppgtt->pd.base.ggtt_offset =
2099                 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2100
2101         ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
2102                 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2103
2104         gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2105
2106         gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2107
2108         DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2109                          ppgtt->node.size >> 20,
2110                          ppgtt->node.start / PAGE_SIZE);
2111
2112         DRM_DEBUG("Adding PPGTT at offset %x\n",
2113                   ppgtt->pd.base.ggtt_offset << 10);
2114
2115         return 0;
2116 }
2117
2118 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2119 {
2120         ppgtt->base.dev = dev;
2121
2122         if (INTEL_INFO(dev)->gen < 8)
2123                 return gen6_ppgtt_init(ppgtt);
2124         else
2125                 return gen8_ppgtt_init(ppgtt);
2126 }
2127
2128 static void i915_address_space_init(struct i915_address_space *vm,
2129                                     struct drm_i915_private *dev_priv)
2130 {
2131         drm_mm_init(&vm->mm, vm->start, vm->total);
2132         vm->dev = dev_priv->dev;
2133         INIT_LIST_HEAD(&vm->active_list);
2134         INIT_LIST_HEAD(&vm->inactive_list);
2135         list_add_tail(&vm->global_link, &dev_priv->vm_list);
2136 }
2137
2138 static void gtt_write_workarounds(struct drm_device *dev)
2139 {
2140         struct drm_i915_private *dev_priv = dev->dev_private;
2141
2142         /* This function is for gtt related workarounds. This function is
2143          * called on driver load and after a GPU reset, so you can place
2144          * workarounds here even if they get overwritten by GPU reset.
2145          */
2146         /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2147         if (IS_BROADWELL(dev))
2148                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2149         else if (IS_CHERRYVIEW(dev))
2150                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2151         else if (IS_SKYLAKE(dev))
2152                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2153         else if (IS_BROXTON(dev))
2154                 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2155 }
2156
2157 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2158 {
2159         struct drm_i915_private *dev_priv = dev->dev_private;
2160         int ret = 0;
2161
2162         ret = __hw_ppgtt_init(dev, ppgtt);
2163         if (ret == 0) {
2164                 kref_init(&ppgtt->ref);
2165                 i915_address_space_init(&ppgtt->base, dev_priv);
2166         }
2167
2168         return ret;
2169 }
2170
2171 int i915_ppgtt_init_hw(struct drm_device *dev)
2172 {
2173         gtt_write_workarounds(dev);
2174
2175         /* In the case of execlists, PPGTT is enabled by the context descriptor
2176          * and the PDPs are contained within the context itself.  We don't
2177          * need to do anything here. */
2178         if (i915.enable_execlists)
2179                 return 0;
2180
2181         if (!USES_PPGTT(dev))
2182                 return 0;
2183
2184         if (IS_GEN6(dev))
2185                 gen6_ppgtt_enable(dev);
2186         else if (IS_GEN7(dev))
2187                 gen7_ppgtt_enable(dev);
2188         else if (INTEL_INFO(dev)->gen >= 8)
2189                 gen8_ppgtt_enable(dev);
2190         else
2191                 MISSING_CASE(INTEL_INFO(dev)->gen);
2192
2193         return 0;
2194 }
2195
2196 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2197 {
2198         struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
2199         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2200
2201         if (i915.enable_execlists)
2202                 return 0;
2203
2204         if (!ppgtt)
2205                 return 0;
2206
2207         return ppgtt->switch_mm(ppgtt, req);
2208 }
2209
2210 struct i915_hw_ppgtt *
2211 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2212 {
2213         struct i915_hw_ppgtt *ppgtt;
2214         int ret;
2215
2216         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2217         if (!ppgtt)
2218                 return ERR_PTR(-ENOMEM);
2219
2220         ret = i915_ppgtt_init(dev, ppgtt);
2221         if (ret) {
2222                 kfree(ppgtt);
2223                 return ERR_PTR(ret);
2224         }
2225
2226         ppgtt->file_priv = fpriv;
2227
2228         trace_i915_ppgtt_create(&ppgtt->base);
2229
2230         return ppgtt;
2231 }
2232
2233 void  i915_ppgtt_release(struct kref *kref)
2234 {
2235         struct i915_hw_ppgtt *ppgtt =
2236                 container_of(kref, struct i915_hw_ppgtt, ref);
2237
2238         trace_i915_ppgtt_release(&ppgtt->base);
2239
2240         /* vmas should already be unbound */
2241         WARN_ON(!list_empty(&ppgtt->base.active_list));
2242         WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2243
2244         list_del(&ppgtt->base.global_link);
2245         drm_mm_takedown(&ppgtt->base.mm);
2246
2247         ppgtt->base.cleanup(&ppgtt->base);
2248         kfree(ppgtt);
2249 }
2250
2251 extern int intel_iommu_gfx_mapped;
2252 /* Certain Gen5 chipsets require require idling the GPU before
2253  * unmapping anything from the GTT when VT-d is enabled.
2254  */
2255 static bool needs_idle_maps(struct drm_device *dev)
2256 {
2257 #ifdef CONFIG_INTEL_IOMMU
2258         /* Query intel_iommu to see if we need the workaround. Presumably that
2259          * was loaded first.
2260          */
2261         if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2262                 return true;
2263 #endif
2264         return false;
2265 }
2266
2267 static bool do_idling(struct drm_i915_private *dev_priv)
2268 {
2269         bool ret = dev_priv->mm.interruptible;
2270
2271         if (unlikely(dev_priv->gtt.do_idle_maps)) {
2272                 dev_priv->mm.interruptible = false;
2273                 if (i915_gpu_idle(dev_priv->dev)) {
2274                         DRM_ERROR("Couldn't idle GPU\n");
2275                         /* Wait a bit, in hopes it avoids the hang */
2276                         udelay(10);
2277                 }
2278         }
2279
2280         return ret;
2281 }
2282
2283 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2284 {
2285         if (unlikely(dev_priv->gtt.do_idle_maps))
2286                 dev_priv->mm.interruptible = interruptible;
2287 }
2288
2289 void i915_check_and_clear_faults(struct drm_device *dev)
2290 {
2291         struct drm_i915_private *dev_priv = dev->dev_private;
2292         struct intel_engine_cs *ring;
2293         int i;
2294
2295         if (INTEL_INFO(dev)->gen < 6)
2296                 return;
2297
2298         for_each_ring(ring, dev_priv, i) {
2299                 u32 fault_reg;
2300                 fault_reg = I915_READ(RING_FAULT_REG(ring));
2301                 if (fault_reg & RING_FAULT_VALID) {
2302 #if 0
2303                         DRM_DEBUG_DRIVER("Unexpected fault\n"
2304                                          "\tAddr: 0x%08lx\n"
2305                                          "\tAddress space: %s\n"
2306                                          "\tSource ID: %d\n"
2307                                          "\tType: %d\n",
2308                                          fault_reg & PAGE_MASK,
2309                                          fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2310                                          RING_FAULT_SRCID(fault_reg),
2311                                          RING_FAULT_FAULT_TYPE(fault_reg));
2312 #endif
2313                         I915_WRITE(RING_FAULT_REG(ring),
2314                                    fault_reg & ~RING_FAULT_VALID);
2315                 }
2316         }
2317         POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
2318 }
2319
2320 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2321 {
2322         if (INTEL_INFO(dev_priv->dev)->gen < 6) {
2323                 intel_gtt_chipset_flush();
2324         } else {
2325                 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2326                 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2327         }
2328 }
2329
2330 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2331 {
2332         struct drm_i915_private *dev_priv = dev->dev_private;
2333
2334         /* Don't bother messing with faults pre GEN6 as we have little
2335          * documentation supporting that it's a good idea.
2336          */
2337         if (INTEL_INFO(dev)->gen < 6)
2338                 return;
2339
2340         i915_check_and_clear_faults(dev);
2341
2342         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
2343                                        dev_priv->gtt.base.start,
2344                                        dev_priv->gtt.base.total,
2345                                        true);
2346
2347         i915_ggtt_flush(dev_priv);
2348 }
2349
2350 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2351 {
2352         if (!dma_map_sg(&obj->base.dev->pdev->dev,
2353                         obj->pages->sgl, obj->pages->nents,
2354                         PCI_DMA_BIDIRECTIONAL))
2355                 return -ENOSPC;
2356
2357         return 0;
2358 }
2359
2360 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2361 {
2362 #ifdef writeq
2363         writeq(pte, addr);
2364 #else
2365         iowrite32((u32)pte, addr);
2366         iowrite32(pte >> 32, addr + 4);
2367 #endif
2368 }
2369
2370 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2371                                      struct sg_table *st,
2372                                      uint64_t start,
2373                                      enum i915_cache_level level, u32 unused)
2374 {
2375         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2376         unsigned first_entry = start >> PAGE_SHIFT;
2377         gen8_pte_t __iomem *gtt_entries =
2378                 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2379         int i = 0;
2380         struct sg_page_iter sg_iter;
2381         dma_addr_t addr = 0; /* shut up gcc */
2382         int rpm_atomic_seq;
2383
2384         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2385
2386         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2387                 addr = sg_dma_address(sg_iter.sg) +
2388                         (sg_iter.sg_pgoffset << PAGE_SHIFT);
2389                 gen8_set_pte(&gtt_entries[i],
2390                              gen8_pte_encode(addr, level, true));
2391                 i++;
2392         }
2393
2394         /*
2395          * XXX: This serves as a posting read to make sure that the PTE has
2396          * actually been updated. There is some concern that even though
2397          * registers and PTEs are within the same BAR that they are potentially
2398          * of NUMA access patterns. Therefore, even with the way we assume
2399          * hardware should work, we must keep this posting read for paranoia.
2400          */
2401         if (i != 0)
2402                 WARN_ON(readq(&gtt_entries[i-1])
2403                         != gen8_pte_encode(addr, level, true));
2404
2405         /* This next bit makes the above posting read even more important. We
2406          * want to flush the TLBs only after we're certain all the PTE updates
2407          * have finished.
2408          */
2409         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2410         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2411
2412         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2413 }
2414
2415 struct insert_entries {
2416         struct i915_address_space *vm;
2417         struct sg_table *st;
2418         uint64_t start;
2419         enum i915_cache_level level;
2420         u32 flags;
2421 };
2422
2423 static int gen8_ggtt_insert_entries__cb(void *_arg)
2424 {
2425         struct insert_entries *arg = _arg;
2426         gen8_ggtt_insert_entries(arg->vm, arg->st,
2427                                  arg->start, arg->level, arg->flags);
2428         return 0;
2429 }
2430
2431 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2432                                           struct sg_table *st,
2433                                           uint64_t start,
2434                                           enum i915_cache_level level,
2435                                           u32 flags)
2436 {
2437         struct insert_entries arg = { vm, st, start, level, flags };
2438 #ifndef __DragonFly__
2439         stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2440 #else
2441         /* XXX: is this enough ?
2442          * See Linux commit 5bab6f60cb4d1417ad7c599166bcfec87529c1a2 */
2443         get_mplock();
2444         gen8_ggtt_insert_entries__cb(&arg);
2445         rel_mplock();
2446 #endif
2447 }
2448
2449 /*
2450  * Binds an object into the global gtt with the specified cache level. The object
2451  * will be accessible to the GPU via commands whose operands reference offsets
2452  * within the global GTT as well as accessible by the GPU through the GMADR
2453  * mapped BAR (dev_priv->mm.gtt->gtt).
2454  */
2455 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2456                                      struct sg_table *st,
2457                                      uint64_t start,
2458                                      enum i915_cache_level level, u32 flags)
2459 {
2460         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2461         unsigned first_entry = start >> PAGE_SHIFT;
2462         gen6_pte_t __iomem *gtt_entries =
2463                 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2464         int i = 0;
2465         struct sg_page_iter sg_iter;
2466         dma_addr_t addr = 0;
2467         int rpm_atomic_seq;
2468
2469         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2470
2471         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2472                 addr = sg_page_iter_dma_address(&sg_iter);
2473                 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
2474                 i++;
2475         }
2476
2477         /* XXX: This serves as a posting read to make sure that the PTE has
2478          * actually been updated. There is some concern that even though
2479          * registers and PTEs are within the same BAR that they are potentially
2480          * of NUMA access patterns. Therefore, even with the way we assume
2481          * hardware should work, we must keep this posting read for paranoia.
2482          */
2483         if (i != 0) {
2484                 unsigned long gtt = readl(&gtt_entries[i-1]);
2485                 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2486         }
2487
2488         /* This next bit makes the above posting read even more important. We
2489          * want to flush the TLBs only after we're certain all the PTE updates
2490          * have finished.
2491          */
2492         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2493         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2494
2495         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2496 }
2497
2498 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2499                                   uint64_t start,
2500                                   uint64_t length,
2501                                   bool use_scratch)
2502 {
2503         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2504         unsigned first_entry = start >> PAGE_SHIFT;
2505         unsigned num_entries = length >> PAGE_SHIFT;
2506         gen8_pte_t scratch_pte, __iomem *gtt_base =
2507                 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2508         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2509         int i;
2510         int rpm_atomic_seq;
2511
2512         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2513
2514         if (WARN(num_entries > max_entries,
2515                  "First entry = %d; Num entries = %d (max=%d)\n",
2516                  first_entry, num_entries, max_entries))
2517                 num_entries = max_entries;
2518
2519         scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
2520                                       I915_CACHE_LLC,
2521                                       use_scratch);
2522         for (i = 0; i < num_entries; i++)
2523                 gen8_set_pte(&gtt_base[i], scratch_pte);
2524         readl(gtt_base);
2525
2526         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2527 }
2528
2529 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2530                                   uint64_t start,
2531                                   uint64_t length,
2532                                   bool use_scratch)
2533 {
2534         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2535         unsigned first_entry = start >> PAGE_SHIFT;
2536         unsigned num_entries = length >> PAGE_SHIFT;
2537         gen6_pte_t scratch_pte, __iomem *gtt_base =
2538                 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2539         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2540         int i;
2541         int rpm_atomic_seq;
2542
2543         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2544
2545         if (WARN(num_entries > max_entries,
2546                  "First entry = %d; Num entries = %d (max=%d)\n",
2547                  first_entry, num_entries, max_entries))
2548                 num_entries = max_entries;
2549
2550         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2551                                      I915_CACHE_LLC, use_scratch, 0);
2552
2553         for (i = 0; i < num_entries; i++)
2554                 iowrite32(scratch_pte, &gtt_base[i]);
2555         readl(gtt_base);
2556
2557         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2558 }
2559
2560 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2561                                      struct sg_table *pages,
2562                                      uint64_t start,
2563                                      enum i915_cache_level cache_level, u32 unused)
2564 {
2565         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2566         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2567                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2568         int rpm_atomic_seq;
2569
2570         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2571
2572         intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2573
2574         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2575
2576 }
2577
2578 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2579                                   uint64_t start,
2580                                   uint64_t length,
2581                                   bool unused)
2582 {
2583         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2584         unsigned first_entry = start >> PAGE_SHIFT;
2585         unsigned num_entries = length >> PAGE_SHIFT;
2586         int rpm_atomic_seq;
2587
2588         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2589
2590         intel_gtt_clear_range(first_entry, num_entries);
2591
2592         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2593 }
2594
2595 static int ggtt_bind_vma(struct i915_vma *vma,
2596                          enum i915_cache_level cache_level,
2597                          u32 flags)
2598 {
2599         struct drm_i915_gem_object *obj = vma->obj;
2600         u32 pte_flags = 0;
2601         int ret;
2602
2603         ret = i915_get_ggtt_vma_pages(vma);
2604         if (ret)
2605                 return ret;
2606
2607         /* Currently applicable only to VLV */
2608         if (obj->gt_ro)
2609                 pte_flags |= PTE_READ_ONLY;
2610
2611         vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2612                                 vma->node.start,
2613                                 cache_level, pte_flags);
2614
2615         /*
2616          * Without aliasing PPGTT there's no difference between
2617          * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2618          * upgrade to both bound if we bind either to avoid double-binding.
2619          */
2620         vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2621
2622         return 0;
2623 }
2624
2625 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2626                                  enum i915_cache_level cache_level,
2627                                  u32 flags)
2628 {
2629         struct drm_device *dev = vma->vm->dev;
2630         struct drm_i915_private *dev_priv = dev->dev_private;
2631         struct drm_i915_gem_object *obj = vma->obj;
2632         struct sg_table *pages = obj->pages;
2633         u32 pte_flags = 0;
2634         int ret;
2635
2636         ret = i915_get_ggtt_vma_pages(vma);
2637         if (ret)
2638                 return ret;
2639         pages = vma->ggtt_view.pages;
2640
2641         /* Currently applicable only to VLV */
2642         if (obj->gt_ro)
2643                 pte_flags |= PTE_READ_ONLY;
2644
2645
2646         if (flags & GLOBAL_BIND) {
2647                 vma->vm->insert_entries(vma->vm, pages,
2648                                         vma->node.start,
2649                                         cache_level, pte_flags);
2650         }
2651
2652         if (flags & LOCAL_BIND) {
2653                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2654                 appgtt->base.insert_entries(&appgtt->base, pages,
2655                                             vma->node.start,
2656                                             cache_level, pte_flags);
2657         }
2658
2659         return 0;
2660 }
2661
2662 static void ggtt_unbind_vma(struct i915_vma *vma)
2663 {
2664         struct drm_device *dev = vma->vm->dev;
2665         struct drm_i915_private *dev_priv = dev->dev_private;
2666         struct drm_i915_gem_object *obj = vma->obj;
2667         const uint64_t size = min_t(uint64_t,
2668                                     obj->base.size,
2669                                     vma->node.size);
2670
2671         if (vma->bound & GLOBAL_BIND) {
2672                 vma->vm->clear_range(vma->vm,
2673                                      vma->node.start,
2674                                      size,
2675                                      true);
2676         }
2677
2678         if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
2679                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2680
2681                 appgtt->base.clear_range(&appgtt->base,
2682                                          vma->node.start,
2683                                          size,
2684                                          true);
2685         }
2686 }
2687
2688 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2689 {
2690         struct drm_device *dev = obj->base.dev;
2691         struct drm_i915_private *dev_priv = dev->dev_private;
2692         bool interruptible;
2693
2694         interruptible = do_idling(dev_priv);
2695
2696         dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2697                      PCI_DMA_BIDIRECTIONAL);
2698
2699         undo_idling(dev_priv, interruptible);
2700 }
2701
2702 static void i915_gtt_color_adjust(struct drm_mm_node *node,
2703                                   unsigned long color,
2704                                   u64 *start,
2705                                   u64 *end)
2706 {
2707         if (node->color != color)
2708                 *start += 4096;
2709
2710         if (!list_empty(&node->node_list)) {
2711                 node = list_entry(node->node_list.next,
2712                                   struct drm_mm_node,
2713                                   node_list);
2714                 if (node->allocated && node->color != color)
2715                         *end -= 4096;
2716         }
2717 }
2718
2719 static int i915_gem_setup_global_gtt(struct drm_device *dev,
2720                                      u64 start,
2721                                      u64 mappable_end,
2722                                      u64 end)
2723 {
2724         /* Let GEM Manage all of the aperture.
2725          *
2726          * However, leave one page at the end still bound to the scratch page.
2727          * There are a number of places where the hardware apparently prefetches
2728          * past the end of the object, and we've seen multiple hangs with the
2729          * GPU head pointer stuck in a batchbuffer bound at the last page of the
2730          * aperture.  One page should be enough to keep any prefetching inside
2731          * of the aperture.
2732          */
2733         struct drm_i915_private *dev_priv = dev->dev_private;
2734         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2735         struct drm_mm_node *entry;
2736         struct drm_i915_gem_object *obj;
2737         unsigned long hole_start, hole_end;
2738         int ret;
2739         unsigned long mappable;
2740         int error;
2741
2742         mappable = min(end, mappable_end) - start;
2743         BUG_ON(mappable_end > end);
2744
2745         ggtt_vm->start = start;
2746
2747         /* Subtract the guard page before address space initialization to
2748          * shrink the range used by drm_mm */
2749         ggtt_vm->total = end - start - PAGE_SIZE;
2750         i915_address_space_init(ggtt_vm, dev_priv);
2751         ggtt_vm->total += PAGE_SIZE;
2752
2753         if (intel_vgpu_active(dev)) {
2754                 ret = intel_vgt_balloon(dev);
2755                 if (ret)
2756                         return ret;
2757         }
2758
2759         if (!HAS_LLC(dev))
2760                 ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
2761
2762         /* Mark any preallocated objects as occupied */
2763         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2764                 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2765
2766                 DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
2767                               i915_gem_obj_ggtt_offset(obj), obj->base.size);
2768
2769                 WARN_ON(i915_gem_obj_ggtt_bound(obj));
2770                 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
2771                 if (ret) {
2772                         DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2773                         return ret;
2774                 }
2775                 vma->bound |= GLOBAL_BIND;
2776                 __i915_vma_set_map_and_fenceable(vma);
2777                 list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
2778         }
2779
2780         /* Clear any non-preallocated blocks */
2781         drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2782                 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2783                               hole_start, hole_end);
2784                 ggtt_vm->clear_range(ggtt_vm, hole_start,
2785                                      hole_end - hole_start, true);
2786         }
2787
2788 #ifdef __DragonFly__
2789         device_printf(dev->dev->bsddev,
2790             "taking over the fictitious range 0x%llx-0x%llx\n",
2791             dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable);
2792         error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start,
2793             dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
2794 #endif
2795
2796         /* And finally clear the reserved guard page */
2797         ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
2798
2799         if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2800                 struct i915_hw_ppgtt *ppgtt;
2801
2802                 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2803                 if (!ppgtt)
2804                         return -ENOMEM;
2805
2806                 ret = __hw_ppgtt_init(dev, ppgtt);
2807                 if (ret) {
2808                         ppgtt->base.cleanup(&ppgtt->base);
2809                         kfree(ppgtt);
2810                         return ret;
2811                 }
2812
2813                 if (ppgtt->base.allocate_va_range)
2814                         ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2815                                                             ppgtt->base.total);
2816                 if (ret) {
2817                         ppgtt->base.cleanup(&ppgtt->base);
2818                         kfree(ppgtt);
2819                         return ret;
2820                 }
2821
2822                 ppgtt->base.clear_range(&ppgtt->base,
2823                                         ppgtt->base.start,
2824                                         ppgtt->base.total,
2825                                         true);
2826
2827                 dev_priv->mm.aliasing_ppgtt = ppgtt;
2828                 WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
2829                 dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
2830         }
2831
2832         return 0;
2833 }
2834
2835 void i915_gem_init_global_gtt(struct drm_device *dev)
2836 {
2837         struct drm_i915_private *dev_priv = dev->dev_private;
2838         u64 gtt_size, mappable_size;
2839
2840         gtt_size = dev_priv->gtt.base.total;
2841         mappable_size = dev_priv->gtt.mappable_end;
2842
2843         i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
2844 }
2845
2846 void i915_global_gtt_cleanup(struct drm_device *dev)
2847 {
2848         struct drm_i915_private *dev_priv = dev->dev_private;
2849         struct i915_address_space *vm = &dev_priv->gtt.base;
2850
2851         if (dev_priv->mm.aliasing_ppgtt) {
2852                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2853
2854                 ppgtt->base.cleanup(&ppgtt->base);
2855         }
2856
2857         i915_gem_cleanup_stolen(dev);
2858
2859         if (drm_mm_initialized(&vm->mm)) {
2860                 if (intel_vgpu_active(dev))
2861                         intel_vgt_deballoon();
2862
2863                 drm_mm_takedown(&vm->mm);
2864                 list_del(&vm->global_link);
2865         }
2866
2867         vm->cleanup(vm);
2868 }
2869
2870 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2871 {
2872         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2873         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2874         return snb_gmch_ctl << 20;
2875 }
2876
2877 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2878 {
2879         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2880         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2881         if (bdw_gmch_ctl)
2882                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2883
2884 #ifdef CONFIG_X86_32
2885         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2886         if (bdw_gmch_ctl > 4)
2887                 bdw_gmch_ctl = 4;
2888 #endif
2889
2890         return bdw_gmch_ctl << 20;
2891 }
2892
2893 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2894 {
2895         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2896         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2897
2898         if (gmch_ctrl)
2899                 return 1 << (20 + gmch_ctrl);
2900
2901         return 0;
2902 }
2903
2904 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2905 {
2906         snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2907         snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2908         return snb_gmch_ctl << 25; /* 32 MB units */
2909 }
2910
2911 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2912 {
2913         bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2914         bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2915         return bdw_gmch_ctl << 25; /* 32 MB units */
2916 }
2917
2918 static size_t chv_get_stolen_size(u16 gmch_ctrl)
2919 {
2920         gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2921         gmch_ctrl &= SNB_GMCH_GMS_MASK;
2922
2923         /*
2924          * 0x0  to 0x10: 32MB increments starting at 0MB
2925          * 0x11 to 0x16: 4MB increments starting at 8MB
2926          * 0x17 to 0x1d: 4MB increments start at 36MB
2927          */
2928         if (gmch_ctrl < 0x11)
2929                 return gmch_ctrl << 25;
2930         else if (gmch_ctrl < 0x17)
2931                 return (gmch_ctrl - 0x11 + 2) << 22;
2932         else
2933                 return (gmch_ctrl - 0x17 + 9) << 22;
2934 }
2935
2936 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2937 {
2938         gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2939         gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2940
2941         if (gen9_gmch_ctl < 0xf0)
2942                 return gen9_gmch_ctl << 25; /* 32 MB units */
2943         else
2944                 /* 4MB increments starting at 0xf0 for 4MB */
2945                 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2946 }
2947
2948 static int ggtt_probe_common(struct drm_device *dev,
2949                              size_t gtt_size)
2950 {
2951         struct drm_i915_private *dev_priv = dev->dev_private;
2952         struct i915_page_scratch *scratch_page;
2953         phys_addr_t gtt_phys_addr;
2954
2955         /* For Modern GENs the PTEs and register space are split in the BAR */
2956         gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2957                 (pci_resource_len(dev->pdev, 0) / 2);
2958
2959         /*
2960          * On BXT writes larger than 64 bit to the GTT pagetable range will be
2961          * dropped. For WC mappings in general we have 64 byte burst writes
2962          * when the WC buffer is flushed, so we can't use it, but have to
2963          * resort to an uncached mapping. The WC issue is easily caught by the
2964          * readback check when writing GTT PTE entries.
2965          */
2966         if (IS_BROXTON(dev))
2967                 dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
2968         else
2969                 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
2970         if (!dev_priv->gtt.gsm) {
2971                 DRM_ERROR("Failed to map the gtt page table\n");
2972                 return -ENOMEM;
2973         }
2974
2975         scratch_page = alloc_scratch_page(dev);
2976         if (IS_ERR(scratch_page)) {
2977                 DRM_ERROR("Scratch setup failed\n");
2978                 /* iounmap will also get called at remove, but meh */
2979                 iounmap(dev_priv->gtt.gsm);
2980                 return PTR_ERR(scratch_page);
2981         }
2982
2983         dev_priv->gtt.base.scratch_page = scratch_page;
2984
2985         return 0;
2986 }
2987
2988 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2989  * bits. When using advanced contexts each context stores its own PAT, but
2990  * writing this data shouldn't be harmful even in those cases. */
2991 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2992 {
2993         uint64_t pat;
2994
2995         pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
2996               GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2997               GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2998               GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
2999               GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
3000               GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
3001               GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
3002               GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3003
3004         if (!USES_PPGTT(dev_priv->dev))
3005                 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3006                  * so RTL will always use the value corresponding to
3007                  * pat_sel = 000".
3008                  * So let's disable cache for GGTT to avoid screen corruptions.
3009                  * MOCS still can be used though.
3010                  * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3011                  * before this patch, i.e. the same uncached + snooping access
3012                  * like on gen6/7 seems to be in effect.
3013                  * - So this just fixes blitter/render access. Again it looks
3014                  * like it's not just uncached access, but uncached + snooping.
3015                  * So we can still hold onto all our assumptions wrt cpu
3016                  * clflushing on LLC machines.
3017                  */
3018                 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
3019
3020         /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
3021          * write would work. */
3022         I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3023         I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3024 }
3025
3026 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3027 {
3028         uint64_t pat;
3029
3030         /*
3031          * Map WB on BDW to snooped on CHV.
3032          *
3033          * Only the snoop bit has meaning for CHV, the rest is
3034          * ignored.
3035          *
3036          * The hardware will never snoop for certain types of accesses:
3037          * - CPU GTT (GMADR->GGTT->no snoop->memory)
3038          * - PPGTT page tables
3039          * - some other special cycles
3040          *
3041          * As with BDW, we also need to consider the following for GT accesses:
3042          * "For GGTT, there is NO pat_sel[2:0] from the entry,
3043          * so RTL will always use the value corresponding to
3044          * pat_sel = 000".
3045          * Which means we must set the snoop bit in PAT entry 0
3046          * in order to keep the global status page working.
3047          */
3048         pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3049               GEN8_PPAT(1, 0) |
3050               GEN8_PPAT(2, 0) |
3051               GEN8_PPAT(3, 0) |
3052               GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3053               GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3054               GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3055               GEN8_PPAT(7, CHV_PPAT_SNOOP);
3056
3057         I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3058         I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3059 }
3060
3061 static int gen8_gmch_probe(struct drm_device *dev,
3062                            u64 *gtt_total,
3063                            size_t *stolen,
3064                            phys_addr_t *mappable_base,
3065                            u64 *mappable_end)
3066 {
3067         struct drm_i915_private *dev_priv = dev->dev_private;
3068         u64 gtt_size;
3069         u16 snb_gmch_ctl;
3070         int ret;
3071
3072         /* TODO: We're not aware of mappable constraints on gen8 yet */
3073         *mappable_base = pci_resource_start(dev->pdev, 2);
3074         *mappable_end = pci_resource_len(dev->pdev, 2);
3075
3076 #if 0
3077         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3078                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3079 #endif
3080
3081         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3082
3083         if (INTEL_INFO(dev)->gen >= 9) {
3084                 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
3085                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3086         } else if (IS_CHERRYVIEW(dev)) {
3087                 *stolen = chv_get_stolen_size(snb_gmch_ctl);
3088                 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
3089         } else {
3090                 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
3091                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3092         }
3093
3094         *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3095
3096         if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3097                 chv_setup_private_ppat(dev_priv);
3098         else
3099                 bdw_setup_private_ppat(dev_priv);
3100
3101         ret = ggtt_probe_common(dev, gtt_size);
3102
3103         dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
3104         dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
3105         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3106         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3107
3108         if (IS_CHERRYVIEW(dev_priv))
3109                 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
3110
3111         return ret;
3112 }
3113
3114 static int gen6_gmch_probe(struct drm_device *dev,
3115                            u64 *gtt_total,
3116                            size_t *stolen,
3117                            phys_addr_t *mappable_base,
3118                            u64 *mappable_end)
3119 {
3120         struct drm_i915_private *dev_priv = dev->dev_private;
3121         unsigned int gtt_size;
3122         u16 snb_gmch_ctl;
3123         int ret;
3124
3125         *mappable_base = pci_resource_start(dev->pdev, 2);
3126         *mappable_end = pci_resource_len(dev->pdev, 2);
3127
3128         /* 64/512MB is the current min/max we actually know of, but this is just
3129          * a coarse sanity check.
3130          */
3131         if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
3132                 DRM_ERROR("Unknown GMADR size (%llx)\n", dev_priv->gtt.mappable_end);
3133                 return -ENXIO;
3134         }
3135
3136 #if 0
3137         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3138                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
3139 #endif
3140         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3141
3142         *stolen = gen6_get_stolen_size(snb_gmch_ctl);
3143
3144         gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
3145         *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3146
3147         ret = ggtt_probe_common(dev, gtt_size);
3148
3149         dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
3150         dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
3151         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3152         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3153
3154         return ret;
3155 }
3156
3157 static void gen6_gmch_remove(struct i915_address_space *vm)
3158 {
3159
3160         struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
3161
3162         iounmap(gtt->gsm);
3163         free_scratch_page(vm->dev, vm->scratch_page);
3164 }
3165
3166 static int i915_gmch_probe(struct drm_device *dev,
3167                            u64 *gtt_total,
3168                            size_t *stolen,
3169                            phys_addr_t *mappable_base,
3170                            u64 *mappable_end)
3171 {
3172         struct drm_i915_private *dev_priv = dev->dev_private;
3173 #if 0
3174         int ret;
3175
3176         ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3177         if (!ret) {
3178                 DRM_ERROR("failed to set up gmch\n");
3179                 return -EIO;
3180         }
3181 #endif
3182
3183         intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
3184
3185         dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
3186         dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
3187         dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
3188         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3189         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3190
3191         if (unlikely(dev_priv->gtt.do_idle_maps))
3192                 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3193
3194         return 0;
3195 }
3196
3197 static void i915_gmch_remove(struct i915_address_space *vm)
3198 {
3199         intel_gmch_remove();
3200 }
3201
3202 int i915_gem_gtt_init(struct drm_device *dev)
3203 {
3204         struct drm_i915_private *dev_priv = dev->dev_private;
3205         struct i915_gtt *gtt = &dev_priv->gtt;
3206         int ret;
3207
3208         if (INTEL_INFO(dev)->gen <= 5) {
3209                 gtt->gtt_probe = i915_gmch_probe;
3210                 gtt->base.cleanup = i915_gmch_remove;
3211         } else if (INTEL_INFO(dev)->gen < 8) {
3212                 gtt->gtt_probe = gen6_gmch_probe;
3213                 gtt->base.cleanup = gen6_gmch_remove;
3214                 if (IS_HASWELL(dev) && dev_priv->ellc_size)
3215                         gtt->base.pte_encode = iris_pte_encode;
3216                 else if (IS_HASWELL(dev))
3217                         gtt->base.pte_encode = hsw_pte_encode;
3218                 else if (IS_VALLEYVIEW(dev))
3219                         gtt->base.pte_encode = byt_pte_encode;
3220                 else if (INTEL_INFO(dev)->gen >= 7)
3221                         gtt->base.pte_encode = ivb_pte_encode;
3222                 else
3223                         gtt->base.pte_encode = snb_pte_encode;
3224         } else {
3225                 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3226                 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
3227         }
3228
3229         gtt->base.dev = dev;
3230         gtt->base.is_ggtt = true;
3231
3232         ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
3233                              &gtt->mappable_base, &gtt->mappable_end);
3234         if (ret)
3235                 return ret;
3236
3237         /*
3238          * Initialise stolen early so that we may reserve preallocated
3239          * objects for the BIOS to KMS transition.
3240          */
3241         ret = i915_gem_init_stolen(dev);
3242         if (ret)
3243                 goto out_gtt_cleanup;
3244
3245         /* GMADR is the PCI mmio aperture into the global GTT. */
3246         DRM_INFO("Memory usable by graphics device = %lluM\n",
3247                  gtt->base.total >> 20);
3248         DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
3249         DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
3250 #ifdef CONFIG_INTEL_IOMMU
3251         if (intel_iommu_gfx_mapped)
3252                 DRM_INFO("VT-d active for gfx access\n");
3253 #endif
3254         /*
3255          * i915.enable_ppgtt is read-only, so do an early pass to validate the
3256          * user's requested state against the hardware/driver capabilities.  We
3257          * do this now so that we can print out any log messages once rather
3258          * than every time we check intel_enable_ppgtt().
3259          */
3260         i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3261         DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3262
3263         return 0;
3264
3265 out_gtt_cleanup:
3266         gtt->base.cleanup(&dev_priv->gtt.base);
3267
3268         return ret;
3269 }
3270
3271 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3272 {
3273         struct drm_i915_private *dev_priv = dev->dev_private;
3274         struct drm_i915_gem_object *obj;
3275         struct i915_address_space *vm;
3276         struct i915_vma *vma;
3277         bool flush;
3278
3279         i915_check_and_clear_faults(dev);
3280
3281         /* First fill our portion of the GTT with scratch pages */
3282         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
3283                                        dev_priv->gtt.base.start,
3284                                        dev_priv->gtt.base.total,
3285                                        true);
3286
3287         /* Cache flush objects bound into GGTT and rebind them. */
3288         vm = &dev_priv->gtt.base;
3289         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3290                 flush = false;
3291                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3292                         if (vma->vm != vm)
3293                                 continue;
3294
3295                         WARN_ON(i915_vma_bind(vma, obj->cache_level,
3296                                               PIN_UPDATE));
3297
3298                         flush = true;
3299                 }
3300
3301                 if (flush)
3302                         i915_gem_clflush_object(obj, obj->pin_display);
3303         }
3304
3305         if (INTEL_INFO(dev)->gen >= 8) {
3306                 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3307                         chv_setup_private_ppat(dev_priv);
3308                 else
3309                         bdw_setup_private_ppat(dev_priv);
3310
3311                 return;
3312         }
3313
3314         if (USES_PPGTT(dev)) {
3315                 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3316                         /* TODO: Perhaps it shouldn't be gen6 specific */
3317
3318                         struct i915_hw_ppgtt *ppgtt =
3319                                         container_of(vm, struct i915_hw_ppgtt,
3320                                                      base);
3321
3322                         if (i915_is_ggtt(vm))
3323                                 ppgtt = dev_priv->mm.aliasing_ppgtt;
3324
3325                         gen6_write_page_range(dev_priv, &ppgtt->pd,
3326                                               0, ppgtt->base.total);
3327                 }
3328         }
3329
3330         i915_ggtt_flush(dev_priv);
3331 }
3332
3333 static struct i915_vma *
3334 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
3335                       struct i915_address_space *vm,
3336                       const struct i915_ggtt_view *ggtt_view)
3337 {
3338         struct i915_vma *vma;
3339
3340         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3341                 return ERR_PTR(-EINVAL);
3342
3343         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3344         if (vma == NULL)
3345                 return ERR_PTR(-ENOMEM);
3346
3347         INIT_LIST_HEAD(&vma->vm_link);
3348         INIT_LIST_HEAD(&vma->obj_link);
3349         INIT_LIST_HEAD(&vma->exec_list);
3350         vma->vm = vm;
3351         vma->obj = obj;
3352         vma->is_ggtt = i915_is_ggtt(vm);
3353
3354         if (i915_is_ggtt(vm))
3355                 vma->ggtt_view = *ggtt_view;
3356         else
3357                 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3358
3359         list_add_tail(&vma->obj_link, &obj->vma_list);
3360
3361         return vma;
3362 }
3363
3364 struct i915_vma *
3365 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3366                                   struct i915_address_space *vm)
3367 {
3368         struct i915_vma *vma;
3369
3370         vma = i915_gem_obj_to_vma(obj, vm);
3371         if (!vma)
3372                 vma = __i915_gem_vma_create(obj, vm,
3373                                             i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3374
3375         return vma;
3376 }
3377
3378 struct i915_vma *
3379 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
3380                                        const struct i915_ggtt_view *view)
3381 {
3382         struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
3383         struct i915_vma *vma;
3384
3385         if (WARN_ON(!view))
3386                 return ERR_PTR(-EINVAL);
3387
3388         vma = i915_gem_obj_to_ggtt_view(obj, view);
3389
3390         if (IS_ERR(vma))
3391                 return vma;
3392
3393         if (!vma)
3394                 vma = __i915_gem_vma_create(obj, ggtt, view);
3395
3396         return vma;
3397
3398 }
3399
3400 static struct scatterlist *
3401 rotate_pages(const dma_addr_t *in, unsigned int offset,
3402              unsigned int width, unsigned int height,
3403              unsigned int stride,
3404              struct sg_table *st, struct scatterlist *sg)
3405 {
3406         unsigned int column, row;
3407         unsigned int src_idx;
3408
3409         if (!sg) {
3410                 st->nents = 0;
3411                 sg = st->sgl;
3412         }
3413
3414         for (column = 0; column < width; column++) {
3415                 src_idx = stride * (height - 1) + column;
3416                 for (row = 0; row < height; row++) {
3417                         st->nents++;
3418                         /* We don't need the pages, but need to initialize
3419                          * the entries so the sg list can be happily traversed.
3420                          * The only thing we need are DMA addresses.
3421                          */
3422                         sg_set_page(sg, NULL, PAGE_SIZE, 0);
3423                         sg_dma_address(sg) = in[offset + src_idx];
3424                         sg_dma_len(sg) = PAGE_SIZE;
3425                         sg = sg_next(sg);
3426                         src_idx -= stride;
3427                 }
3428         }
3429
3430         return sg;
3431 }
3432
3433 static struct sg_table *
3434 intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3435                           struct drm_i915_gem_object *obj)
3436 {
3437         unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3438         unsigned int size_pages_uv;
3439         struct sg_page_iter sg_iter;
3440         unsigned long i;
3441         dma_addr_t *page_addr_list;
3442         struct sg_table *st;
3443         unsigned int uv_start_page;
3444         struct scatterlist *sg;
3445         int ret = -ENOMEM;
3446
3447         /* Allocate a temporary list of source pages for random access. */
3448         page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
3449                                        sizeof(dma_addr_t));
3450         if (!page_addr_list)
3451                 return ERR_PTR(ret);
3452
3453         /* Account for UV plane with NV12. */
3454         if (rot_info->pixel_format == DRM_FORMAT_NV12)
3455                 size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
3456         else
3457                 size_pages_uv = 0;
3458
3459         /* Allocate target SG list. */
3460         st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3461         if (!st)
3462                 goto err_st_alloc;
3463
3464         ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
3465         if (ret)
3466                 goto err_sg_alloc;
3467
3468         /* Populate source page list from the object. */
3469         i = 0;
3470         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3471                 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3472                 i++;
3473         }
3474
3475         /* Rotate the pages. */
3476         sg = rotate_pages(page_addr_list, 0,
3477                      rot_info->width_pages, rot_info->height_pages,
3478                      rot_info->width_pages,
3479                      st, NULL);
3480
3481         /* Append the UV plane if NV12. */
3482         if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3483                 uv_start_page = size_pages;
3484
3485                 /* Check for tile-row un-alignment. */
3486                 if (offset_in_page(rot_info->uv_offset))
3487                         uv_start_page--;
3488
3489                 rot_info->uv_start_page = uv_start_page;
3490
3491                 rotate_pages(page_addr_list, uv_start_page,
3492                              rot_info->width_pages_uv,
3493                              rot_info->height_pages_uv,
3494                              rot_info->width_pages_uv,
3495                              st, sg);
3496         }
3497
3498         DRM_DEBUG_KMS(
3499                       "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
3500                       obj->base.size, rot_info->pitch, rot_info->height,
3501                       rot_info->pixel_format, rot_info->width_pages,
3502                       rot_info->height_pages, size_pages + size_pages_uv,
3503                       size_pages);
3504
3505         drm_free_large(page_addr_list);
3506
3507         return st;
3508
3509 err_sg_alloc:
3510         kfree(st);
3511 err_st_alloc:
3512         drm_free_large(page_addr_list);
3513
3514         DRM_DEBUG_KMS(
3515                       "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
3516                       obj->base.size, ret, rot_info->pitch, rot_info->height,
3517                       rot_info->pixel_format, rot_info->width_pages,
3518                       rot_info->height_pages, size_pages + size_pages_uv,
3519                       size_pages);
3520         return ERR_PTR(ret);
3521 }
3522
3523 static struct sg_table *
3524 intel_partial_pages(const struct i915_ggtt_view *view,
3525                     struct drm_i915_gem_object *obj)
3526 {
3527         struct sg_table *st;
3528         struct scatterlist *sg;
3529         struct sg_page_iter obj_sg_iter;
3530         int ret = -ENOMEM;
3531
3532         st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3533         if (!st)
3534                 goto err_st_alloc;
3535
3536         ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3537         if (ret)
3538                 goto err_sg_alloc;
3539
3540         sg = st->sgl;
3541         st->nents = 0;
3542         for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3543                 view->params.partial.offset)
3544         {
3545                 if (st->nents >= view->params.partial.size)
3546                         break;
3547
3548                 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3549                 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3550                 sg_dma_len(sg) = PAGE_SIZE;
3551
3552                 sg = sg_next(sg);
3553                 st->nents++;
3554         }
3555
3556         return st;
3557
3558 err_sg_alloc:
3559         kfree(st);
3560 err_st_alloc:
3561         return ERR_PTR(ret);
3562 }
3563
3564 static int
3565 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3566 {
3567         int ret = 0;
3568
3569         if (vma->ggtt_view.pages)
3570                 return 0;
3571
3572         if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3573                 vma->ggtt_view.pages = vma->obj->pages;
3574         else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3575                 vma->ggtt_view.pages =
3576                         intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
3577         else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3578                 vma->ggtt_view.pages =
3579                         intel_partial_pages(&vma->ggtt_view, vma->obj);
3580         else
3581                 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3582                           vma->ggtt_view.type);
3583
3584         if (!vma->ggtt_view.pages) {
3585                 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3586                           vma->ggtt_view.type);
3587                 ret = -EINVAL;
3588         } else if (IS_ERR(vma->ggtt_view.pages)) {
3589                 ret = PTR_ERR(vma->ggtt_view.pages);
3590                 vma->ggtt_view.pages = NULL;
3591                 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3592                           vma->ggtt_view.type, ret);
3593         }
3594
3595         return ret;
3596 }
3597
3598 /**
3599  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3600  * @vma: VMA to map
3601  * @cache_level: mapping cache level
3602  * @flags: flags like global or local mapping
3603  *
3604  * DMA addresses are taken from the scatter-gather table of this object (or of
3605  * this VMA in case of non-default GGTT views) and PTE entries set up.
3606  * Note that DMA addresses are also the only part of the SG table we care about.
3607  */
3608 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3609                   u32 flags)
3610 {
3611         int ret;
3612         u32 bind_flags;
3613
3614         if (WARN_ON(flags == 0))
3615                 return -EINVAL;
3616
3617         bind_flags = 0;
3618         if (flags & PIN_GLOBAL)
3619                 bind_flags |= GLOBAL_BIND;
3620         if (flags & PIN_USER)
3621                 bind_flags |= LOCAL_BIND;
3622
3623         if (flags & PIN_UPDATE)
3624                 bind_flags |= vma->bound;
3625         else
3626                 bind_flags &= ~vma->bound;
3627
3628         if (bind_flags == 0)
3629                 return 0;
3630
3631         if (vma->bound == 0 && vma->vm->allocate_va_range) {
3632                 /* XXX: i915_vma_pin() will fix this +- hack */
3633                 vma->pin_count++;
3634                 trace_i915_va_alloc(vma);
3635                 ret = vma->vm->allocate_va_range(vma->vm,
3636                                                  vma->node.start,
3637                                                  vma->node.size);
3638                 vma->pin_count--;
3639                 if (ret)
3640                         return ret;
3641         }
3642
3643         ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
3644         if (ret)
3645                 return ret;
3646
3647         vma->bound |= bind_flags;
3648
3649         return 0;
3650 }
3651
3652 /**
3653  * i915_ggtt_view_size - Get the size of a GGTT view.
3654  * @obj: Object the view is of.
3655  * @view: The view in question.
3656  *
3657  * @return The size of the GGTT view in bytes.
3658  */
3659 size_t
3660 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3661                     const struct i915_ggtt_view *view)
3662 {
3663         if (view->type == I915_GGTT_VIEW_NORMAL) {
3664                 return obj->base.size;
3665         } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3666                 return view->params.rotated.size;
3667         } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3668                 return view->params.partial.size << PAGE_SHIFT;
3669         } else {
3670                 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3671                 return obj->base.size;
3672         }
3673 }