c0d7e04dc1c3dcc29593df439b1a59ae44844f31
[dragonfly.git] / sys / dev / drm / i915 / i915_gem_gtt.c
1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/seq_file.h>
27 #include <drm/drmP.h>
28 #include <drm/i915_drm.h>
29 #include "i915_drv.h"
30 #include "i915_vgpu.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
33
34 #include <linux/bitmap.h>
35
36 #include <sys/mplock2.h>
37
38 /**
39  * DOC: Global GTT views
40  *
41  * Background and previous state
42  *
43  * Historically objects could exists (be bound) in global GTT space only as
44  * singular instances with a view representing all of the object's backing pages
45  * in a linear fashion. This view will be called a normal view.
46  *
47  * To support multiple views of the same object, where the number of mapped
48  * pages is not equal to the backing store, or where the layout of the pages
49  * is not linear, concept of a GGTT view was added.
50  *
51  * One example of an alternative view is a stereo display driven by a single
52  * image. In this case we would have a framebuffer looking like this
53  * (2x2 pages):
54  *
55  *    12
56  *    34
57  *
58  * Above would represent a normal GGTT view as normally mapped for GPU or CPU
59  * rendering. In contrast, fed to the display engine would be an alternative
60  * view which could look something like this:
61  *
62  *   1212
63  *   3434
64  *
65  * In this example both the size and layout of pages in the alternative view is
66  * different from the normal view.
67  *
68  * Implementation and usage
69  *
70  * GGTT views are implemented using VMAs and are distinguished via enum
71  * i915_ggtt_view_type and struct i915_ggtt_view.
72  *
73  * A new flavour of core GEM functions which work with GGTT bound objects were
74  * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
75  * renaming  in large amounts of code. They take the struct i915_ggtt_view
76  * parameter encapsulating all metadata required to implement a view.
77  *
78  * As a helper for callers which are only interested in the normal view,
79  * globally const i915_ggtt_view_normal singleton instance exists. All old core
80  * GEM API functions, the ones not taking the view parameter, are operating on,
81  * or with the normal GGTT view.
82  *
83  * Code wanting to add or use a new GGTT view needs to:
84  *
85  * 1. Add a new enum with a suitable name.
86  * 2. Extend the metadata in the i915_ggtt_view structure if required.
87  * 3. Add support to i915_get_vma_pages().
88  *
89  * New views are required to build a scatter-gather table from within the
90  * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
91  * exists for the lifetime of an VMA.
92  *
93  * Core API is designed to have copy semantics which means that passed in
94  * struct i915_ggtt_view does not need to be persistent (left around after
95  * calling the core API functions).
96  *
97  */
98
99 static int
100 i915_get_ggtt_vma_pages(struct i915_vma *vma);
101
102 const struct i915_ggtt_view i915_ggtt_view_normal;
103 const struct i915_ggtt_view i915_ggtt_view_rotated = {
104         .type = I915_GGTT_VIEW_ROTATED
105 };
106
107 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
108 {
109         bool has_aliasing_ppgtt;
110         bool has_full_ppgtt;
111         bool has_full_48bit_ppgtt;
112
113         has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
114         has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
115         has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
116
117         if (intel_vgpu_active(dev))
118                 has_full_ppgtt = false; /* emulation is too hard */
119
120         /*
121          * We don't allow disabling PPGTT for gen9+ as it's a requirement for
122          * execlists, the sole mechanism available to submit work.
123          */
124         if (INTEL_INFO(dev)->gen < 9 &&
125             (enable_ppgtt == 0 || !has_aliasing_ppgtt))
126                 return 0;
127
128         if (enable_ppgtt == 1)
129                 return 1;
130
131         if (enable_ppgtt == 2 && has_full_ppgtt)
132                 return 2;
133
134         if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
135                 return 3;
136
137 #ifdef CONFIG_INTEL_IOMMU
138         /* Disable ppgtt on SNB if VT-d is on. */
139         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
140                 DRM_INFO("Disabling PPGTT because VT-d is on\n");
141                 return 0;
142         }
143 #endif
144
145         /* Early VLV doesn't have this */
146         if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
147                 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
148                 return 0;
149         }
150
151         if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
152                 return has_full_48bit_ppgtt ? 3 : 2;
153         else
154                 return has_aliasing_ppgtt ? 1 : 0;
155 }
156
157 static int ppgtt_bind_vma(struct i915_vma *vma,
158                           enum i915_cache_level cache_level,
159                           u32 unused)
160 {
161         u32 pte_flags = 0;
162
163         /* Currently applicable only to VLV */
164         if (vma->obj->gt_ro)
165                 pte_flags |= PTE_READ_ONLY;
166
167         vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
168                                 cache_level, pte_flags);
169
170         return 0;
171 }
172
173 static void ppgtt_unbind_vma(struct i915_vma *vma)
174 {
175         vma->vm->clear_range(vma->vm,
176                              vma->node.start,
177                              vma->obj->base.size,
178                              true);
179 }
180
181 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
182                                   enum i915_cache_level level,
183                                   bool valid)
184 {
185         gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
186         pte |= addr;
187
188         switch (level) {
189         case I915_CACHE_NONE:
190                 pte |= PPAT_UNCACHED_INDEX;
191                 break;
192         case I915_CACHE_WT:
193                 pte |= PPAT_DISPLAY_ELLC_INDEX;
194                 break;
195         default:
196                 pte |= PPAT_CACHED_INDEX;
197                 break;
198         }
199
200         return pte;
201 }
202
203 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
204                                   const enum i915_cache_level level)
205 {
206         gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
207         pde |= addr;
208         if (level != I915_CACHE_NONE)
209                 pde |= PPAT_CACHED_PDE_INDEX;
210         else
211                 pde |= PPAT_UNCACHED_INDEX;
212         return pde;
213 }
214
215 #define gen8_pdpe_encode gen8_pde_encode
216 #define gen8_pml4e_encode gen8_pde_encode
217
218 static gen6_pte_t snb_pte_encode(dma_addr_t addr,
219                                  enum i915_cache_level level,
220                                  bool valid, u32 unused)
221 {
222         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
223         pte |= GEN6_PTE_ADDR_ENCODE(addr);
224
225         switch (level) {
226         case I915_CACHE_L3_LLC:
227         case I915_CACHE_LLC:
228                 pte |= GEN6_PTE_CACHE_LLC;
229                 break;
230         case I915_CACHE_NONE:
231                 pte |= GEN6_PTE_UNCACHED;
232                 break;
233         default:
234                 MISSING_CASE(level);
235         }
236
237         return pte;
238 }
239
240 static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
241                                  enum i915_cache_level level,
242                                  bool valid, u32 unused)
243 {
244         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
245         pte |= GEN6_PTE_ADDR_ENCODE(addr);
246
247         switch (level) {
248         case I915_CACHE_L3_LLC:
249                 pte |= GEN7_PTE_CACHE_L3_LLC;
250                 break;
251         case I915_CACHE_LLC:
252                 pte |= GEN6_PTE_CACHE_LLC;
253                 break;
254         case I915_CACHE_NONE:
255                 pte |= GEN6_PTE_UNCACHED;
256                 break;
257         default:
258                 MISSING_CASE(level);
259         }
260
261         return pte;
262 }
263
264 static gen6_pte_t byt_pte_encode(dma_addr_t addr,
265                                  enum i915_cache_level level,
266                                  bool valid, u32 flags)
267 {
268         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
269         pte |= GEN6_PTE_ADDR_ENCODE(addr);
270
271         if (!(flags & PTE_READ_ONLY))
272                 pte |= BYT_PTE_WRITEABLE;
273
274         if (level != I915_CACHE_NONE)
275                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
276
277         return pte;
278 }
279
280 static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
281                                  enum i915_cache_level level,
282                                  bool valid, u32 unused)
283 {
284         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
285         pte |= HSW_PTE_ADDR_ENCODE(addr);
286
287         if (level != I915_CACHE_NONE)
288                 pte |= HSW_WB_LLC_AGE3;
289
290         return pte;
291 }
292
293 static gen6_pte_t iris_pte_encode(dma_addr_t addr,
294                                   enum i915_cache_level level,
295                                   bool valid, u32 unused)
296 {
297         gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
298         pte |= HSW_PTE_ADDR_ENCODE(addr);
299
300         switch (level) {
301         case I915_CACHE_NONE:
302                 break;
303         case I915_CACHE_WT:
304                 pte |= HSW_WT_ELLC_LLC_AGE3;
305                 break;
306         default:
307                 pte |= HSW_WB_ELLC_LLC_AGE3;
308                 break;
309         }
310
311         return pte;
312 }
313
314 static int __setup_page_dma(struct drm_device *dev,
315                             struct i915_page_dma *p, gfp_t flags)
316 {
317         struct device *device = &dev->pdev->dev;
318
319         p->page = alloc_page(flags);
320         if (!p->page)
321                 return -ENOMEM;
322
323         p->daddr = dma_map_page(device,
324                                 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
325
326         if (dma_mapping_error(device, p->daddr)) {
327                 __free_page(p->page);
328                 return -EINVAL;
329         }
330
331         return 0;
332 }
333
334 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
335 {
336         return __setup_page_dma(dev, p, GFP_KERNEL);
337 }
338
339 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
340 {
341         if (WARN_ON(!p->page))
342                 return;
343
344         dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
345         __free_page(p->page);
346         memset(p, 0, sizeof(*p));
347 }
348
349 static void *kmap_page_dma(struct i915_page_dma *p)
350 {
351         return kmap_atomic(p->page);
352 }
353
354 /* We use the flushing unmap only with ppgtt structures:
355  * page directories, page tables and scratch pages.
356  */
357 static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
358 {
359         /* There are only few exceptions for gen >=6. chv and bxt.
360          * And we are not sure about the latter so play safe for now.
361          */
362         if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
363                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
364
365         kunmap_atomic(vaddr);
366 }
367
368 #define kmap_px(px) kmap_page_dma(px_base(px))
369 #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
370
371 #define setup_px(dev, px) setup_page_dma((dev), px_base(px))
372 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
373 #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
374 #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
375
376 static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
377                           const uint64_t val)
378 {
379         int i;
380         uint64_t * const vaddr = kmap_page_dma(p);
381
382         for (i = 0; i < 512; i++)
383                 vaddr[i] = val;
384
385         kunmap_page_dma(dev, vaddr);
386 }
387
388 static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
389                              const uint32_t val32)
390 {
391         uint64_t v = val32;
392
393         v = v << 32 | val32;
394
395         fill_page_dma(dev, p, v);
396 }
397
398 static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
399 {
400         struct i915_page_scratch *sp;
401         int ret;
402
403         sp = kzalloc(sizeof(*sp), GFP_KERNEL);
404         if (sp == NULL)
405                 return ERR_PTR(-ENOMEM);
406
407         ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
408         if (ret) {
409                 kfree(sp);
410                 return ERR_PTR(ret);
411         }
412
413         set_pages_uc(px_page(sp), 1);
414
415         return sp;
416 }
417
418 static void free_scratch_page(struct drm_device *dev,
419                               struct i915_page_scratch *sp)
420 {
421         set_pages_wb(px_page(sp), 1);
422
423         cleanup_px(dev, sp);
424         kfree(sp);
425 }
426
427 static struct i915_page_table *alloc_pt(struct drm_device *dev)
428 {
429         struct i915_page_table *pt;
430         const size_t count = INTEL_INFO(dev)->gen >= 8 ?
431                 GEN8_PTES : GEN6_PTES;
432         int ret = -ENOMEM;
433
434         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
435         if (!pt)
436                 return ERR_PTR(-ENOMEM);
437
438         pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
439                                 GFP_KERNEL);
440
441         if (!pt->used_ptes)
442                 goto fail_bitmap;
443
444         ret = setup_px(dev, pt);
445         if (ret)
446                 goto fail_page_m;
447
448         return pt;
449
450 fail_page_m:
451         kfree(pt->used_ptes);
452 fail_bitmap:
453         kfree(pt);
454
455         return ERR_PTR(ret);
456 }
457
458 static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
459 {
460         cleanup_px(dev, pt);
461         kfree(pt->used_ptes);
462         kfree(pt);
463 }
464
465 static void gen8_initialize_pt(struct i915_address_space *vm,
466                                struct i915_page_table *pt)
467 {
468         gen8_pte_t scratch_pte;
469
470         scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
471                                       I915_CACHE_LLC, true);
472
473         fill_px(vm->dev, pt, scratch_pte);
474 }
475
476 static void gen6_initialize_pt(struct i915_address_space *vm,
477                                struct i915_page_table *pt)
478 {
479         gen6_pte_t scratch_pte;
480
481         WARN_ON(px_dma(vm->scratch_page) == 0);
482
483         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
484                                      I915_CACHE_LLC, true, 0);
485
486         fill32_px(vm->dev, pt, scratch_pte);
487 }
488
489 static struct i915_page_directory *alloc_pd(struct drm_device *dev)
490 {
491         struct i915_page_directory *pd;
492         int ret = -ENOMEM;
493
494         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
495         if (!pd)
496                 return ERR_PTR(-ENOMEM);
497
498         pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
499                                 sizeof(*pd->used_pdes), GFP_KERNEL);
500         if (!pd->used_pdes)
501                 goto fail_bitmap;
502
503         ret = setup_px(dev, pd);
504         if (ret)
505                 goto fail_page_m;
506
507         return pd;
508
509 fail_page_m:
510         kfree(pd->used_pdes);
511 fail_bitmap:
512         kfree(pd);
513
514         return ERR_PTR(ret);
515 }
516
517 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
518 {
519         if (px_page(pd)) {
520                 cleanup_px(dev, pd);
521                 kfree(pd->used_pdes);
522                 kfree(pd);
523         }
524 }
525
526 static void gen8_initialize_pd(struct i915_address_space *vm,
527                                struct i915_page_directory *pd)
528 {
529         gen8_pde_t scratch_pde;
530
531         scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
532
533         fill_px(vm->dev, pd, scratch_pde);
534 }
535
536 static int __pdp_init(struct drm_device *dev,
537                       struct i915_page_directory_pointer *pdp)
538 {
539         size_t pdpes = I915_PDPES_PER_PDP(dev);
540
541         pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
542                                   sizeof(unsigned long),
543                                   GFP_KERNEL);
544         if (!pdp->used_pdpes)
545                 return -ENOMEM;
546
547         pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
548                                       GFP_KERNEL);
549         if (!pdp->page_directory) {
550                 kfree(pdp->used_pdpes);
551                 /* the PDP might be the statically allocated top level. Keep it
552                  * as clean as possible */
553                 pdp->used_pdpes = NULL;
554                 return -ENOMEM;
555         }
556
557         return 0;
558 }
559
560 static void __pdp_fini(struct i915_page_directory_pointer *pdp)
561 {
562         kfree(pdp->used_pdpes);
563         kfree(pdp->page_directory);
564         pdp->page_directory = NULL;
565 }
566
567 static struct
568 i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
569 {
570         struct i915_page_directory_pointer *pdp;
571         int ret = -ENOMEM;
572
573         WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
574
575         pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
576         if (!pdp)
577                 return ERR_PTR(-ENOMEM);
578
579         ret = __pdp_init(dev, pdp);
580         if (ret)
581                 goto fail_bitmap;
582
583         ret = setup_px(dev, pdp);
584         if (ret)
585                 goto fail_page_m;
586
587         return pdp;
588
589 fail_page_m:
590         __pdp_fini(pdp);
591 fail_bitmap:
592         kfree(pdp);
593
594         return ERR_PTR(ret);
595 }
596
597 static void free_pdp(struct drm_device *dev,
598                      struct i915_page_directory_pointer *pdp)
599 {
600         __pdp_fini(pdp);
601         if (USES_FULL_48BIT_PPGTT(dev)) {
602                 cleanup_px(dev, pdp);
603                 kfree(pdp);
604         }
605 }
606
607 static void gen8_initialize_pdp(struct i915_address_space *vm,
608                                 struct i915_page_directory_pointer *pdp)
609 {
610         gen8_ppgtt_pdpe_t scratch_pdpe;
611
612         scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
613
614         fill_px(vm->dev, pdp, scratch_pdpe);
615 }
616
617 static void gen8_initialize_pml4(struct i915_address_space *vm,
618                                  struct i915_pml4 *pml4)
619 {
620         gen8_ppgtt_pml4e_t scratch_pml4e;
621
622         scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
623                                           I915_CACHE_LLC);
624
625         fill_px(vm->dev, pml4, scratch_pml4e);
626 }
627
628 static void
629 gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
630                           struct i915_page_directory_pointer *pdp,
631                           struct i915_page_directory *pd,
632                           int index)
633 {
634         gen8_ppgtt_pdpe_t *page_directorypo;
635
636         if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
637                 return;
638
639         page_directorypo = kmap_px(pdp);
640         page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
641         kunmap_px(ppgtt, page_directorypo);
642 }
643
644 static void
645 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
646                                   struct i915_pml4 *pml4,
647                                   struct i915_page_directory_pointer *pdp,
648                                   int index)
649 {
650         gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
651
652         WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
653         pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
654         kunmap_px(ppgtt, pagemap);
655 }
656
657 /* Broadwell Page Directory Pointer Descriptors */
658 static int gen8_write_pdp(struct drm_i915_gem_request *req,
659                           unsigned entry,
660                           dma_addr_t addr)
661 {
662         struct intel_engine_cs *ring = req->ring;
663         int ret;
664
665         BUG_ON(entry >= 4);
666
667         ret = intel_ring_begin(req, 6);
668         if (ret)
669                 return ret;
670
671         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
672         intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
673         intel_ring_emit(ring, upper_32_bits(addr));
674         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
675         intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
676         intel_ring_emit(ring, lower_32_bits(addr));
677         intel_ring_advance(ring);
678
679         return 0;
680 }
681
682 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
683                                  struct drm_i915_gem_request *req)
684 {
685         int i, ret;
686
687         for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
688                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
689
690                 ret = gen8_write_pdp(req, i, pd_daddr);
691                 if (ret)
692                         return ret;
693         }
694
695         return 0;
696 }
697
698 static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
699                               struct drm_i915_gem_request *req)
700 {
701         return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
702 }
703
704 static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
705                                        struct i915_page_directory_pointer *pdp,
706                                        uint64_t start,
707                                        uint64_t length,
708                                        gen8_pte_t scratch_pte)
709 {
710         struct i915_hw_ppgtt *ppgtt =
711                 container_of(vm, struct i915_hw_ppgtt, base);
712         gen8_pte_t *pt_vaddr;
713         unsigned pdpe = gen8_pdpe_index(start);
714         unsigned pde = gen8_pde_index(start);
715         unsigned pte = gen8_pte_index(start);
716         unsigned num_entries = length >> PAGE_SHIFT;
717         unsigned last_pte, i;
718
719         if (WARN_ON(!pdp))
720                 return;
721
722         while (num_entries) {
723                 struct i915_page_directory *pd;
724                 struct i915_page_table *pt;
725
726                 if (WARN_ON(!pdp->page_directory[pdpe]))
727                         break;
728
729                 pd = pdp->page_directory[pdpe];
730
731                 if (WARN_ON(!pd->page_table[pde]))
732                         break;
733
734                 pt = pd->page_table[pde];
735
736                 if (WARN_ON(!px_page(pt)))
737                         break;
738
739                 last_pte = pte + num_entries;
740                 if (last_pte > GEN8_PTES)
741                         last_pte = GEN8_PTES;
742
743                 pt_vaddr = kmap_px(pt);
744
745                 for (i = pte; i < last_pte; i++) {
746                         pt_vaddr[i] = scratch_pte;
747                         num_entries--;
748                 }
749
750                 kunmap_px(ppgtt, pt);
751
752                 pte = 0;
753                 if (++pde == I915_PDES) {
754                         if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
755                                 break;
756                         pde = 0;
757                 }
758         }
759 }
760
761 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
762                                    uint64_t start,
763                                    uint64_t length,
764                                    bool use_scratch)
765 {
766         struct i915_hw_ppgtt *ppgtt =
767                 container_of(vm, struct i915_hw_ppgtt, base);
768         gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
769                                                  I915_CACHE_LLC, use_scratch);
770
771         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
772                 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
773                                            scratch_pte);
774         } else {
775                 uint64_t pml4e;
776                 struct i915_page_directory_pointer *pdp;
777
778                 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
779                         gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
780                                                    scratch_pte);
781                 }
782         }
783 }
784
785 static void
786 gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
787                               struct i915_page_directory_pointer *pdp,
788                               struct sg_page_iter *sg_iter,
789                               uint64_t start,
790                               enum i915_cache_level cache_level)
791 {
792         struct i915_hw_ppgtt *ppgtt =
793                 container_of(vm, struct i915_hw_ppgtt, base);
794         gen8_pte_t *pt_vaddr;
795         unsigned pdpe = gen8_pdpe_index(start);
796         unsigned pde = gen8_pde_index(start);
797         unsigned pte = gen8_pte_index(start);
798
799         pt_vaddr = NULL;
800
801         while (__sg_page_iter_next(sg_iter)) {
802                 if (pt_vaddr == NULL) {
803                         struct i915_page_directory *pd = pdp->page_directory[pdpe];
804                         struct i915_page_table *pt = pd->page_table[pde];
805                         pt_vaddr = kmap_px(pt);
806                 }
807
808                 pt_vaddr[pte] =
809                         gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
810                                         cache_level, true);
811                 if (++pte == GEN8_PTES) {
812                         kunmap_px(ppgtt, pt_vaddr);
813                         pt_vaddr = NULL;
814                         if (++pde == I915_PDES) {
815                                 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
816                                         break;
817                                 pde = 0;
818                         }
819                         pte = 0;
820                 }
821         }
822
823         if (pt_vaddr)
824                 kunmap_px(ppgtt, pt_vaddr);
825 }
826
827 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
828                                       struct sg_table *pages,
829                                       uint64_t start,
830                                       enum i915_cache_level cache_level,
831                                       u32 unused)
832 {
833         struct i915_hw_ppgtt *ppgtt =
834                 container_of(vm, struct i915_hw_ppgtt, base);
835         struct sg_page_iter sg_iter;
836
837         __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
838
839         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
840                 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
841                                               cache_level);
842         } else {
843                 struct i915_page_directory_pointer *pdp;
844                 uint64_t pml4e;
845                 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
846
847                 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
848                         gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
849                                                       start, cache_level);
850                 }
851         }
852 }
853
854 static void gen8_free_page_tables(struct drm_device *dev,
855                                   struct i915_page_directory *pd)
856 {
857         int i;
858
859         if (!px_page(pd))
860                 return;
861
862         for_each_set_bit(i, pd->used_pdes, I915_PDES) {
863                 if (WARN_ON(!pd->page_table[i]))
864                         continue;
865
866                 free_pt(dev, pd->page_table[i]);
867                 pd->page_table[i] = NULL;
868         }
869 }
870
871 static int gen8_init_scratch(struct i915_address_space *vm)
872 {
873         struct drm_device *dev = vm->dev;
874
875         vm->scratch_page = alloc_scratch_page(dev);
876         if (IS_ERR(vm->scratch_page))
877                 return PTR_ERR(vm->scratch_page);
878
879         vm->scratch_pt = alloc_pt(dev);
880         if (IS_ERR(vm->scratch_pt)) {
881                 free_scratch_page(dev, vm->scratch_page);
882                 return PTR_ERR(vm->scratch_pt);
883         }
884
885         vm->scratch_pd = alloc_pd(dev);
886         if (IS_ERR(vm->scratch_pd)) {
887                 free_pt(dev, vm->scratch_pt);
888                 free_scratch_page(dev, vm->scratch_page);
889                 return PTR_ERR(vm->scratch_pd);
890         }
891
892         if (USES_FULL_48BIT_PPGTT(dev)) {
893                 vm->scratch_pdp = alloc_pdp(dev);
894                 if (IS_ERR(vm->scratch_pdp)) {
895                         free_pd(dev, vm->scratch_pd);
896                         free_pt(dev, vm->scratch_pt);
897                         free_scratch_page(dev, vm->scratch_page);
898                         return PTR_ERR(vm->scratch_pdp);
899                 }
900         }
901
902         gen8_initialize_pt(vm, vm->scratch_pt);
903         gen8_initialize_pd(vm, vm->scratch_pd);
904         if (USES_FULL_48BIT_PPGTT(dev))
905                 gen8_initialize_pdp(vm, vm->scratch_pdp);
906
907         return 0;
908 }
909
910 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
911 {
912         enum vgt_g2v_type msg;
913         struct drm_device *dev = ppgtt->base.dev;
914         struct drm_i915_private *dev_priv = dev->dev_private;
915         int i;
916
917         if (USES_FULL_48BIT_PPGTT(dev)) {
918                 u64 daddr = px_dma(&ppgtt->pml4);
919
920                 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
921                 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
922
923                 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
924                                 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
925         } else {
926                 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
927                         u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
928
929                         I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
930                         I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
931                 }
932
933                 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
934                                 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
935         }
936
937         I915_WRITE(vgtif_reg(g2v_notify), msg);
938
939         return 0;
940 }
941
942 static void gen8_free_scratch(struct i915_address_space *vm)
943 {
944         struct drm_device *dev = vm->dev;
945
946         if (USES_FULL_48BIT_PPGTT(dev))
947                 free_pdp(dev, vm->scratch_pdp);
948         free_pd(dev, vm->scratch_pd);
949         free_pt(dev, vm->scratch_pt);
950         free_scratch_page(dev, vm->scratch_page);
951 }
952
953 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
954                                     struct i915_page_directory_pointer *pdp)
955 {
956         int i;
957
958         for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
959                 if (WARN_ON(!pdp->page_directory[i]))
960                         continue;
961
962                 gen8_free_page_tables(dev, pdp->page_directory[i]);
963                 free_pd(dev, pdp->page_directory[i]);
964         }
965
966         free_pdp(dev, pdp);
967 }
968
969 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
970 {
971         int i;
972
973         for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
974                 if (WARN_ON(!ppgtt->pml4.pdps[i]))
975                         continue;
976
977                 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
978         }
979
980         cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
981 }
982
983 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
984 {
985         struct i915_hw_ppgtt *ppgtt =
986                 container_of(vm, struct i915_hw_ppgtt, base);
987
988         if (intel_vgpu_active(vm->dev))
989                 gen8_ppgtt_notify_vgt(ppgtt, false);
990
991         if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
992                 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
993         else
994                 gen8_ppgtt_cleanup_4lvl(ppgtt);
995
996         gen8_free_scratch(vm);
997 }
998
999 /**
1000  * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1001  * @vm: Master vm structure.
1002  * @pd: Page directory for this address range.
1003  * @start:      Starting virtual address to begin allocations.
1004  * @length:     Size of the allocations.
1005  * @new_pts:    Bitmap set by function with new allocations. Likely used by the
1006  *              caller to free on error.
1007  *
1008  * Allocate the required number of page tables. Extremely similar to
1009  * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1010  * the page directory boundary (instead of the page directory pointer). That
1011  * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1012  * possible, and likely that the caller will need to use multiple calls of this
1013  * function to achieve the appropriate allocation.
1014  *
1015  * Return: 0 if success; negative error code otherwise.
1016  */
1017 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1018                                      struct i915_page_directory *pd,
1019                                      uint64_t start,
1020                                      uint64_t length,
1021                                      unsigned long *new_pts)
1022 {
1023         struct drm_device *dev = vm->dev;
1024         struct i915_page_table *pt;
1025         uint32_t pde;
1026
1027         gen8_for_each_pde(pt, pd, start, length, pde) {
1028                 /* Don't reallocate page tables */
1029                 if (test_bit(pde, pd->used_pdes)) {
1030                         /* Scratch is never allocated this way */
1031                         WARN_ON(pt == vm->scratch_pt);
1032                         continue;
1033                 }
1034
1035                 pt = alloc_pt(dev);
1036                 if (IS_ERR(pt))
1037                         goto unwind_out;
1038
1039                 gen8_initialize_pt(vm, pt);
1040                 pd->page_table[pde] = pt;
1041                 __set_bit(pde, new_pts);
1042                 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1043         }
1044
1045         return 0;
1046
1047 unwind_out:
1048         for_each_set_bit(pde, new_pts, I915_PDES)
1049                 free_pt(dev, pd->page_table[pde]);
1050
1051         return -ENOMEM;
1052 }
1053
1054 /**
1055  * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1056  * @vm: Master vm structure.
1057  * @pdp:        Page directory pointer for this address range.
1058  * @start:      Starting virtual address to begin allocations.
1059  * @length:     Size of the allocations.
1060  * @new_pds:    Bitmap set by function with new allocations. Likely used by the
1061  *              caller to free on error.
1062  *
1063  * Allocate the required number of page directories starting at the pde index of
1064  * @start, and ending at the pde index @start + @length. This function will skip
1065  * over already allocated page directories within the range, and only allocate
1066  * new ones, setting the appropriate pointer within the pdp as well as the
1067  * correct position in the bitmap @new_pds.
1068  *
1069  * The function will only allocate the pages within the range for a give page
1070  * directory pointer. In other words, if @start + @length straddles a virtually
1071  * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1072  * required by the caller, This is not currently possible, and the BUG in the
1073  * code will prevent it.
1074  *
1075  * Return: 0 if success; negative error code otherwise.
1076  */
1077 static int
1078 gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1079                                   struct i915_page_directory_pointer *pdp,
1080                                   uint64_t start,
1081                                   uint64_t length,
1082                                   unsigned long *new_pds)
1083 {
1084         struct drm_device *dev = vm->dev;
1085         struct i915_page_directory *pd;
1086         uint32_t pdpe;
1087         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1088
1089         WARN_ON(!bitmap_empty(new_pds, pdpes));
1090
1091         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1092                 if (test_bit(pdpe, pdp->used_pdpes))
1093                         continue;
1094
1095                 pd = alloc_pd(dev);
1096                 if (IS_ERR(pd))
1097                         goto unwind_out;
1098
1099                 gen8_initialize_pd(vm, pd);
1100                 pdp->page_directory[pdpe] = pd;
1101                 __set_bit(pdpe, new_pds);
1102                 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
1103         }
1104
1105         return 0;
1106
1107 unwind_out:
1108         for_each_set_bit(pdpe, new_pds, pdpes)
1109                 free_pd(dev, pdp->page_directory[pdpe]);
1110
1111         return -ENOMEM;
1112 }
1113
1114 /**
1115  * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1116  * @vm: Master vm structure.
1117  * @pml4:       Page map level 4 for this address range.
1118  * @start:      Starting virtual address to begin allocations.
1119  * @length:     Size of the allocations.
1120  * @new_pdps:   Bitmap set by function with new allocations. Likely used by the
1121  *              caller to free on error.
1122  *
1123  * Allocate the required number of page directory pointers. Extremely similar to
1124  * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1125  * The main difference is here we are limited by the pml4 boundary (instead of
1126  * the page directory pointer).
1127  *
1128  * Return: 0 if success; negative error code otherwise.
1129  */
1130 static int
1131 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1132                                   struct i915_pml4 *pml4,
1133                                   uint64_t start,
1134                                   uint64_t length,
1135                                   unsigned long *new_pdps)
1136 {
1137         struct drm_device *dev = vm->dev;
1138         struct i915_page_directory_pointer *pdp;
1139         uint32_t pml4e;
1140
1141         WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1142
1143         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1144                 if (!test_bit(pml4e, pml4->used_pml4es)) {
1145                         pdp = alloc_pdp(dev);
1146                         if (IS_ERR(pdp))
1147                                 goto unwind_out;
1148
1149                         gen8_initialize_pdp(vm, pdp);
1150                         pml4->pdps[pml4e] = pdp;
1151                         __set_bit(pml4e, new_pdps);
1152                         trace_i915_page_directory_pointer_entry_alloc(vm,
1153                                                                       pml4e,
1154                                                                       start,
1155                                                                       GEN8_PML4E_SHIFT);
1156                 }
1157         }
1158
1159         return 0;
1160
1161 unwind_out:
1162         for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1163                 free_pdp(dev, pml4->pdps[pml4e]);
1164
1165         return -ENOMEM;
1166 }
1167
1168 static void
1169 free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1170 {
1171         kfree(new_pts);
1172         kfree(new_pds);
1173 }
1174
1175 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1176  * of these are based on the number of PDPEs in the system.
1177  */
1178 static
1179 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1180                                          unsigned long **new_pts,
1181                                          uint32_t pdpes)
1182 {
1183         unsigned long *pds;
1184         unsigned long *pts;
1185
1186         pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1187         if (!pds)
1188                 return -ENOMEM;
1189
1190         pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1191                       GFP_TEMPORARY);
1192         if (!pts)
1193                 goto err_out;
1194
1195         *new_pds = pds;
1196         *new_pts = pts;
1197
1198         return 0;
1199
1200 err_out:
1201         free_gen8_temp_bitmaps(pds, pts);
1202         return -ENOMEM;
1203 }
1204
1205 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1206  * the page table structures, we mark them dirty so that
1207  * context switching/execlist queuing code takes extra steps
1208  * to ensure that tlbs are flushed.
1209  */
1210 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1211 {
1212         ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1213 }
1214
1215 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1216                                     struct i915_page_directory_pointer *pdp,
1217                                     uint64_t start,
1218                                     uint64_t length)
1219 {
1220         struct i915_hw_ppgtt *ppgtt =
1221                 container_of(vm, struct i915_hw_ppgtt, base);
1222         unsigned long *new_page_dirs, *new_page_tables;
1223         struct drm_device *dev = vm->dev;
1224         struct i915_page_directory *pd;
1225         const uint64_t orig_start = start;
1226         const uint64_t orig_length = length;
1227         uint32_t pdpe;
1228         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1229         int ret;
1230
1231         /* Wrap is never okay since we can only represent 48b, and we don't
1232          * actually use the other side of the canonical address space.
1233          */
1234         if (WARN_ON(start + length < start))
1235                 return -ENODEV;
1236
1237         if (WARN_ON(start + length > vm->total))
1238                 return -ENODEV;
1239
1240         ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1241         if (ret)
1242                 return ret;
1243
1244         /* Do the allocations first so we can easily bail out */
1245         ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1246                                                 new_page_dirs);
1247         if (ret) {
1248                 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1249                 return ret;
1250         }
1251
1252         /* For every page directory referenced, allocate page tables */
1253         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1254                 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1255                                                 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1256                 if (ret)
1257                         goto err_out;
1258         }
1259
1260         start = orig_start;
1261         length = orig_length;
1262
1263         /* Allocations have completed successfully, so set the bitmaps, and do
1264          * the mappings. */
1265         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1266                 gen8_pde_t *const page_directory = kmap_px(pd);
1267                 struct i915_page_table *pt;
1268                 uint64_t pd_len = length;
1269                 uint64_t pd_start = start;
1270                 uint32_t pde;
1271
1272                 /* Every pd should be allocated, we just did that above. */
1273                 WARN_ON(!pd);
1274
1275                 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1276                         /* Same reasoning as pd */
1277                         WARN_ON(!pt);
1278                         if (pt == NULL)         /* XXX dillon hack */
1279                                 continue;       /* XXX dillon hack */
1280                         WARN_ON(!pd_len);
1281                         WARN_ON(!gen8_pte_count(pd_start, pd_len));
1282
1283                         /* Set our used ptes within the page table */
1284                         bitmap_set(pt->used_ptes,
1285                                    gen8_pte_index(pd_start),
1286                                    gen8_pte_count(pd_start, pd_len));
1287
1288                         /* Our pde is now pointing to the pagetable, pt */
1289                         __set_bit(pde, pd->used_pdes);
1290
1291                         /* Map the PDE to the page table */
1292                         page_directory[pde] = gen8_pde_encode(px_dma(pt),
1293                                                               I915_CACHE_LLC);
1294                         trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1295                                                         gen8_pte_index(start),
1296                                                         gen8_pte_count(start, length),
1297                                                         GEN8_PTES);
1298
1299                         /* NB: We haven't yet mapped ptes to pages. At this
1300                          * point we're still relying on insert_entries() */
1301                 }
1302
1303                 kunmap_px(ppgtt, page_directory);
1304                 __set_bit(pdpe, pdp->used_pdpes);
1305                 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
1306         }
1307
1308         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1309         mark_tlbs_dirty(ppgtt);
1310         return 0;
1311
1312 err_out:
1313         while (pdpe--) {
1314                 unsigned long temp;
1315
1316                 for_each_set_bit(temp, new_page_tables + pdpe *
1317                                 BITS_TO_LONGS(I915_PDES), I915_PDES)
1318                         free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1319         }
1320
1321         for_each_set_bit(pdpe, new_page_dirs, pdpes)
1322                 free_pd(dev, pdp->page_directory[pdpe]);
1323
1324         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1325         mark_tlbs_dirty(ppgtt);
1326         return ret;
1327 }
1328
1329 static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1330                                     struct i915_pml4 *pml4,
1331                                     uint64_t start,
1332                                     uint64_t length)
1333 {
1334         DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1335         struct i915_hw_ppgtt *ppgtt =
1336                         container_of(vm, struct i915_hw_ppgtt, base);
1337         struct i915_page_directory_pointer *pdp;
1338         uint64_t pml4e;
1339         int ret = 0;
1340
1341         /* Do the pml4 allocations first, so we don't need to track the newly
1342          * allocated tables below the pdp */
1343         bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1344
1345         /* The pagedirectory and pagetable allocations are done in the shared 3
1346          * and 4 level code. Just allocate the pdps.
1347          */
1348         ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1349                                                 new_pdps);
1350         if (ret)
1351                 return ret;
1352
1353         WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1354              "The allocation has spanned more than 512GB. "
1355              "It is highly likely this is incorrect.");
1356
1357         gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1358                 WARN_ON(!pdp);
1359
1360                 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1361                 if (ret)
1362                         goto err_out;
1363
1364                 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1365         }
1366
1367         bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1368                   GEN8_PML4ES_PER_PML4);
1369
1370         return 0;
1371
1372 err_out:
1373         for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1374                 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1375
1376         return ret;
1377 }
1378
1379 static int gen8_alloc_va_range(struct i915_address_space *vm,
1380                                uint64_t start, uint64_t length)
1381 {
1382         struct i915_hw_ppgtt *ppgtt =
1383                 container_of(vm, struct i915_hw_ppgtt, base);
1384
1385         if (USES_FULL_48BIT_PPGTT(vm->dev))
1386                 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1387         else
1388                 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1389 }
1390
1391 static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1392                           uint64_t start, uint64_t length,
1393                           gen8_pte_t scratch_pte,
1394                           struct seq_file *m)
1395 {
1396         struct i915_page_directory *pd;
1397         uint32_t pdpe;
1398
1399         gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1400                 struct i915_page_table *pt;
1401                 uint64_t pd_len = length;
1402                 uint64_t pd_start = start;
1403                 uint32_t pde;
1404
1405                 if (!test_bit(pdpe, pdp->used_pdpes))
1406                         continue;
1407
1408                 seq_printf(m, "\tPDPE #%d\n", pdpe);
1409                 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1410                         uint32_t  pte;
1411                         gen8_pte_t *pt_vaddr;
1412
1413                         if (!test_bit(pde, pd->used_pdes))
1414                                 continue;
1415
1416                         pt_vaddr = kmap_px(pt);
1417                         for (pte = 0; pte < GEN8_PTES; pte += 4) {
1418                                 uint64_t va =
1419                                         (pdpe << GEN8_PDPE_SHIFT) |
1420                                         (pde << GEN8_PDE_SHIFT) |
1421                                         (pte << GEN8_PTE_SHIFT);
1422                                 int i;
1423                                 bool found = false;
1424
1425                                 for (i = 0; i < 4; i++)
1426                                         if (pt_vaddr[pte + i] != scratch_pte)
1427                                                 found = true;
1428                                 if (!found)
1429                                         continue;
1430
1431                                 seq_printf(m, "\t\t0x%lx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1432                                 for (i = 0; i < 4; i++) {
1433                                         if (pt_vaddr[pte + i] != scratch_pte)
1434                                                 seq_printf(m, " %lx", pt_vaddr[pte + i]);
1435                                         else
1436                                                 seq_puts(m, "  SCRATCH ");
1437                                 }
1438                                 seq_puts(m, "\n");
1439                         }
1440                         /* don't use kunmap_px, it could trigger
1441                          * an unnecessary flush.
1442                          */
1443                         kunmap_atomic(pt_vaddr);
1444                 }
1445         }
1446 }
1447
1448 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1449 {
1450         struct i915_address_space *vm = &ppgtt->base;
1451         uint64_t start = ppgtt->base.start;
1452         uint64_t length = ppgtt->base.total;
1453         gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
1454                                                  I915_CACHE_LLC, true);
1455
1456         if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1457                 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1458         } else {
1459                 uint64_t pml4e;
1460                 struct i915_pml4 *pml4 = &ppgtt->pml4;
1461                 struct i915_page_directory_pointer *pdp;
1462
1463                 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1464                         if (!test_bit(pml4e, pml4->used_pml4es))
1465                                 continue;
1466
1467                         seq_printf(m, "    PML4E #%lu\n", pml4e);
1468                         gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1469                 }
1470         }
1471 }
1472
1473 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1474 {
1475         unsigned long *new_page_dirs, *new_page_tables;
1476         uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1477         int ret;
1478
1479         /* We allocate temp bitmap for page tables for no gain
1480          * but as this is for init only, lets keep the things simple
1481          */
1482         ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1483         if (ret)
1484                 return ret;
1485
1486         /* Allocate for all pdps regardless of how the ppgtt
1487          * was defined.
1488          */
1489         ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1490                                                 0, 1ULL << 32,
1491                                                 new_page_dirs);
1492         if (!ret)
1493                 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1494
1495         free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1496
1497         return ret;
1498 }
1499
1500 /*
1501  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1502  * with a net effect resembling a 2-level page table in normal x86 terms. Each
1503  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1504  * space.
1505  *
1506  */
1507 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1508 {
1509         int ret;
1510
1511         ret = gen8_init_scratch(&ppgtt->base);
1512         if (ret)
1513                 return ret;
1514
1515         ppgtt->base.start = 0;
1516         ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1517         ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1518         ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1519         ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1520         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1521         ppgtt->base.bind_vma = ppgtt_bind_vma;
1522         ppgtt->debug_dump = gen8_dump_ppgtt;
1523
1524         if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1525                 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1526                 if (ret)
1527                         goto free_scratch;
1528
1529                 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1530
1531                 ppgtt->base.total = 1ULL << 48;
1532                 ppgtt->switch_mm = gen8_48b_mm_switch;
1533         } else {
1534                 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1535                 if (ret)
1536                         goto free_scratch;
1537
1538                 ppgtt->base.total = 1ULL << 32;
1539                 ppgtt->switch_mm = gen8_legacy_mm_switch;
1540                 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1541                                                               0, 0,
1542                                                               GEN8_PML4E_SHIFT);
1543
1544                 if (intel_vgpu_active(ppgtt->base.dev)) {
1545                         ret = gen8_preallocate_top_level_pdps(ppgtt);
1546                         if (ret)
1547                                 goto free_scratch;
1548                 }
1549         }
1550
1551         if (intel_vgpu_active(ppgtt->base.dev))
1552                 gen8_ppgtt_notify_vgt(ppgtt, true);
1553
1554         return 0;
1555
1556 free_scratch:
1557         gen8_free_scratch(&ppgtt->base);
1558         return ret;
1559 }
1560
1561 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1562 {
1563         struct i915_address_space *vm = &ppgtt->base;
1564         struct i915_page_table *unused;
1565         gen6_pte_t scratch_pte;
1566         uint32_t pd_entry;
1567         uint32_t  pte, pde, temp;
1568         uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1569
1570         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1571                                      I915_CACHE_LLC, true, 0);
1572
1573         gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
1574                 u32 expected;
1575                 gen6_pte_t *pt_vaddr;
1576                 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1577                 pd_entry = readl(ppgtt->pd_addr + pde);
1578                 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1579
1580                 if (pd_entry != expected)
1581                         seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1582                                    pde,
1583                                    pd_entry,
1584                                    expected);
1585                 seq_printf(m, "\tPDE: %x\n", pd_entry);
1586
1587                 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1588
1589                 for (pte = 0; pte < GEN6_PTES; pte+=4) {
1590                         unsigned long va =
1591                                 (pde * PAGE_SIZE * GEN6_PTES) +
1592                                 (pte * PAGE_SIZE);
1593                         int i;
1594                         bool found = false;
1595                         for (i = 0; i < 4; i++)
1596                                 if (pt_vaddr[pte + i] != scratch_pte)
1597                                         found = true;
1598                         if (!found)
1599                                 continue;
1600
1601                         seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1602                         for (i = 0; i < 4; i++) {
1603                                 if (pt_vaddr[pte + i] != scratch_pte)
1604                                         seq_printf(m, " %08x", pt_vaddr[pte + i]);
1605                                 else
1606                                         seq_puts(m, "  SCRATCH ");
1607                         }
1608                         seq_puts(m, "\n");
1609                 }
1610                 kunmap_px(ppgtt, pt_vaddr);
1611         }
1612 }
1613
1614 /* Write pde (index) from the page directory @pd to the page table @pt */
1615 static void gen6_write_pde(struct i915_page_directory *pd,
1616                             const int pde, struct i915_page_table *pt)
1617 {
1618         /* Caller needs to make sure the write completes if necessary */
1619         struct i915_hw_ppgtt *ppgtt =
1620                 container_of(pd, struct i915_hw_ppgtt, pd);
1621         u32 pd_entry;
1622
1623         pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1624         pd_entry |= GEN6_PDE_VALID;
1625
1626         writel(pd_entry, ppgtt->pd_addr + pde);
1627 }
1628
1629 /* Write all the page tables found in the ppgtt structure to incrementing page
1630  * directories. */
1631 static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1632                                   struct i915_page_directory *pd,
1633                                   uint32_t start, uint32_t length)
1634 {
1635         struct i915_page_table *pt;
1636         uint32_t pde, temp;
1637
1638         gen6_for_each_pde(pt, pd, start, length, temp, pde)
1639                 gen6_write_pde(pd, pde, pt);
1640
1641         /* Make sure write is complete before other code can use this page
1642          * table. Also require for WC mapped PTEs */
1643         readl(dev_priv->gtt.gsm);
1644 }
1645
1646 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
1647 {
1648         BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1649
1650         return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1651 }
1652
1653 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1654                          struct drm_i915_gem_request *req)
1655 {
1656         struct intel_engine_cs *ring = req->ring;
1657         int ret;
1658
1659         /* NB: TLBs must be flushed and invalidated before a switch */
1660         ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1661         if (ret)
1662                 return ret;
1663
1664         ret = intel_ring_begin(req, 6);
1665         if (ret)
1666                 return ret;
1667
1668         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1669         intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1670         intel_ring_emit(ring, PP_DIR_DCLV_2G);
1671         intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1672         intel_ring_emit(ring, get_pd_offset(ppgtt));
1673         intel_ring_emit(ring, MI_NOOP);
1674         intel_ring_advance(ring);
1675
1676         return 0;
1677 }
1678
1679 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1680                           struct drm_i915_gem_request *req)
1681 {
1682         struct intel_engine_cs *ring = req->ring;
1683         struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1684
1685         I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1686         I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1687         return 0;
1688 }
1689
1690 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1691                           struct drm_i915_gem_request *req)
1692 {
1693         struct intel_engine_cs *ring = req->ring;
1694         int ret;
1695
1696         /* NB: TLBs must be flushed and invalidated before a switch */
1697         ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1698         if (ret)
1699                 return ret;
1700
1701         ret = intel_ring_begin(req, 6);
1702         if (ret)
1703                 return ret;
1704
1705         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1706         intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1707         intel_ring_emit(ring, PP_DIR_DCLV_2G);
1708         intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1709         intel_ring_emit(ring, get_pd_offset(ppgtt));
1710         intel_ring_emit(ring, MI_NOOP);
1711         intel_ring_advance(ring);
1712
1713         /* XXX: RCS is the only one to auto invalidate the TLBs? */
1714         if (ring->id != RCS) {
1715                 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1716                 if (ret)
1717                         return ret;
1718         }
1719
1720         return 0;
1721 }
1722
1723 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1724                           struct drm_i915_gem_request *req)
1725 {
1726         struct intel_engine_cs *ring = req->ring;
1727         struct drm_device *dev = ppgtt->base.dev;
1728         struct drm_i915_private *dev_priv = dev->dev_private;
1729
1730
1731         I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
1732         I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
1733
1734         POSTING_READ(RING_PP_DIR_DCLV(ring));
1735
1736         return 0;
1737 }
1738
1739 static void gen8_ppgtt_enable(struct drm_device *dev)
1740 {
1741         struct drm_i915_private *dev_priv = dev->dev_private;
1742         struct intel_engine_cs *ring;
1743         int j;
1744
1745         for_each_ring(ring, dev_priv, j) {
1746                 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1747                 I915_WRITE(RING_MODE_GEN7(ring),
1748                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1749         }
1750 }
1751
1752 static void gen7_ppgtt_enable(struct drm_device *dev)
1753 {
1754         struct drm_i915_private *dev_priv = dev->dev_private;
1755         struct intel_engine_cs *ring;
1756         uint32_t ecochk, ecobits;
1757         int i;
1758
1759         ecobits = I915_READ(GAC_ECO_BITS);
1760         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1761
1762         ecochk = I915_READ(GAM_ECOCHK);
1763         if (IS_HASWELL(dev)) {
1764                 ecochk |= ECOCHK_PPGTT_WB_HSW;
1765         } else {
1766                 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1767                 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1768         }
1769         I915_WRITE(GAM_ECOCHK, ecochk);
1770
1771         for_each_ring(ring, dev_priv, i) {
1772                 /* GFX_MODE is per-ring on gen7+ */
1773                 I915_WRITE(RING_MODE_GEN7(ring),
1774                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1775         }
1776 }
1777
1778 static void gen6_ppgtt_enable(struct drm_device *dev)
1779 {
1780         struct drm_i915_private *dev_priv = dev->dev_private;
1781         uint32_t ecochk, gab_ctl, ecobits;
1782
1783         ecobits = I915_READ(GAC_ECO_BITS);
1784         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1785                    ECOBITS_PPGTT_CACHE64B);
1786
1787         gab_ctl = I915_READ(GAB_CTL);
1788         I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1789
1790         ecochk = I915_READ(GAM_ECOCHK);
1791         I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1792
1793         I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1794 }
1795
1796 /* PPGTT support for Sandybdrige/Gen6 and later */
1797 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1798                                    uint64_t start,
1799                                    uint64_t length,
1800                                    bool use_scratch)
1801 {
1802         struct i915_hw_ppgtt *ppgtt =
1803                 container_of(vm, struct i915_hw_ppgtt, base);
1804         gen6_pte_t *pt_vaddr, scratch_pte;
1805         unsigned first_entry = start >> PAGE_SHIFT;
1806         unsigned num_entries = length >> PAGE_SHIFT;
1807         unsigned act_pt = first_entry / GEN6_PTES;
1808         unsigned first_pte = first_entry % GEN6_PTES;
1809         unsigned last_pte, i;
1810
1811         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1812                                      I915_CACHE_LLC, true, 0);
1813
1814         while (num_entries) {
1815                 last_pte = first_pte + num_entries;
1816                 if (last_pte > GEN6_PTES)
1817                         last_pte = GEN6_PTES;
1818
1819                 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1820
1821                 for (i = first_pte; i < last_pte; i++)
1822                         pt_vaddr[i] = scratch_pte;
1823
1824                 kunmap_px(ppgtt, pt_vaddr);
1825
1826                 num_entries -= last_pte - first_pte;
1827                 first_pte = 0;
1828                 act_pt++;
1829         }
1830 }
1831
1832 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1833                                       struct sg_table *pages,
1834                                       uint64_t start,
1835                                       enum i915_cache_level cache_level, u32 flags)
1836 {
1837         struct i915_hw_ppgtt *ppgtt =
1838                 container_of(vm, struct i915_hw_ppgtt, base);
1839         gen6_pte_t *pt_vaddr;
1840         unsigned first_entry = start >> PAGE_SHIFT;
1841         unsigned act_pt = first_entry / GEN6_PTES;
1842         unsigned act_pte = first_entry % GEN6_PTES;
1843         struct sg_page_iter sg_iter;
1844
1845         pt_vaddr = NULL;
1846         for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1847                 if (pt_vaddr == NULL)
1848                         pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1849
1850                 pt_vaddr[act_pte] =
1851                         vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1852                                        cache_level, true, flags);
1853
1854                 if (++act_pte == GEN6_PTES) {
1855                         kunmap_px(ppgtt, pt_vaddr);
1856                         pt_vaddr = NULL;
1857                         act_pt++;
1858                         act_pte = 0;
1859                 }
1860         }
1861         if (pt_vaddr)
1862                 kunmap_px(ppgtt, pt_vaddr);
1863 }
1864
1865 static int gen6_alloc_va_range(struct i915_address_space *vm,
1866                                uint64_t start_in, uint64_t length_in)
1867 {
1868         DECLARE_BITMAP(new_page_tables, I915_PDES);
1869         struct drm_device *dev = vm->dev;
1870         struct drm_i915_private *dev_priv = dev->dev_private;
1871         struct i915_hw_ppgtt *ppgtt =
1872                                 container_of(vm, struct i915_hw_ppgtt, base);
1873         struct i915_page_table *pt;
1874         uint32_t start, length, start_save, length_save;
1875         uint32_t pde, temp;
1876         int ret;
1877
1878         if (WARN_ON(start_in + length_in > ppgtt->base.total))
1879                 return -ENODEV;
1880
1881         start = start_save = start_in;
1882         length = length_save = length_in;
1883
1884         bitmap_zero(new_page_tables, I915_PDES);
1885
1886         /* The allocation is done in two stages so that we can bail out with
1887          * minimal amount of pain. The first stage finds new page tables that
1888          * need allocation. The second stage marks use ptes within the page
1889          * tables.
1890          */
1891         gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1892                 if (pt != vm->scratch_pt) {
1893                         WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1894                         continue;
1895                 }
1896
1897                 /* We've already allocated a page table */
1898                 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1899
1900                 pt = alloc_pt(dev);
1901                 if (IS_ERR(pt)) {
1902                         ret = PTR_ERR(pt);
1903                         goto unwind_out;
1904                 }
1905
1906                 gen6_initialize_pt(vm, pt);
1907
1908                 ppgtt->pd.page_table[pde] = pt;
1909                 __set_bit(pde, new_page_tables);
1910                 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1911         }
1912
1913         start = start_save;
1914         length = length_save;
1915
1916         gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
1917                 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1918
1919                 bitmap_zero(tmp_bitmap, GEN6_PTES);
1920                 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1921                            gen6_pte_count(start, length));
1922
1923                 if (__test_and_clear_bit(pde, new_page_tables))
1924                         gen6_write_pde(&ppgtt->pd, pde, pt);
1925
1926                 trace_i915_page_table_entry_map(vm, pde, pt,
1927                                          gen6_pte_index(start),
1928                                          gen6_pte_count(start, length),
1929                                          GEN6_PTES);
1930                 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1931                                 GEN6_PTES);
1932         }
1933
1934         WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1935
1936         /* Make sure write is complete before other code can use this page
1937          * table. Also require for WC mapped PTEs */
1938         readl(dev_priv->gtt.gsm);
1939
1940         mark_tlbs_dirty(ppgtt);
1941         return 0;
1942
1943 unwind_out:
1944         for_each_set_bit(pde, new_page_tables, I915_PDES) {
1945                 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1946
1947                 ppgtt->pd.page_table[pde] = vm->scratch_pt;
1948                 free_pt(vm->dev, pt);
1949         }
1950
1951         mark_tlbs_dirty(ppgtt);
1952         return ret;
1953 }
1954
1955 static int gen6_init_scratch(struct i915_address_space *vm)
1956 {
1957         struct drm_device *dev = vm->dev;
1958
1959         vm->scratch_page = alloc_scratch_page(dev);
1960         if (IS_ERR(vm->scratch_page))
1961                 return PTR_ERR(vm->scratch_page);
1962
1963         vm->scratch_pt = alloc_pt(dev);
1964         if (IS_ERR(vm->scratch_pt)) {
1965                 free_scratch_page(dev, vm->scratch_page);
1966                 return PTR_ERR(vm->scratch_pt);
1967         }
1968
1969         gen6_initialize_pt(vm, vm->scratch_pt);
1970
1971         return 0;
1972 }
1973
1974 static void gen6_free_scratch(struct i915_address_space *vm)
1975 {
1976         struct drm_device *dev = vm->dev;
1977
1978         free_pt(dev, vm->scratch_pt);
1979         free_scratch_page(dev, vm->scratch_page);
1980 }
1981
1982 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1983 {
1984         struct i915_hw_ppgtt *ppgtt =
1985                 container_of(vm, struct i915_hw_ppgtt, base);
1986         struct i915_page_table *pt;
1987         uint32_t pde;
1988
1989         drm_mm_remove_node(&ppgtt->node);
1990
1991         gen6_for_all_pdes(pt, ppgtt, pde) {
1992                 if (pt != vm->scratch_pt)
1993                         free_pt(ppgtt->base.dev, pt);
1994         }
1995
1996         gen6_free_scratch(vm);
1997 }
1998
1999 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2000 {
2001         struct i915_address_space *vm = &ppgtt->base;
2002         struct drm_device *dev = ppgtt->base.dev;
2003         struct drm_i915_private *dev_priv = dev->dev_private;
2004         bool retried = false;
2005         int ret;
2006
2007         /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2008          * allocator works in address space sizes, so it's multiplied by page
2009          * size. We allocate at the top of the GTT to avoid fragmentation.
2010          */
2011         BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
2012
2013         ret = gen6_init_scratch(vm);
2014         if (ret)
2015                 return ret;
2016
2017 alloc:
2018         ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
2019                                                   &ppgtt->node, GEN6_PD_SIZE,
2020                                                   GEN6_PD_ALIGN, 0,
2021                                                   0, dev_priv->gtt.base.total,
2022                                                   DRM_MM_TOPDOWN);
2023         if (ret == -ENOSPC && !retried) {
2024                 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
2025                                                GEN6_PD_SIZE, GEN6_PD_ALIGN,
2026                                                I915_CACHE_NONE,
2027                                                0, dev_priv->gtt.base.total,
2028                                                0);
2029                 if (ret)
2030                         goto err_out;
2031
2032                 retried = true;
2033                 goto alloc;
2034         }
2035
2036         if (ret)
2037                 goto err_out;
2038
2039
2040         if (ppgtt->node.start < dev_priv->gtt.mappable_end)
2041                 DRM_DEBUG("Forced to use aperture for PDEs\n");
2042
2043         return 0;
2044
2045 err_out:
2046         gen6_free_scratch(vm);
2047         return ret;
2048 }
2049
2050 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2051 {
2052         return gen6_ppgtt_allocate_page_directories(ppgtt);
2053 }
2054
2055 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2056                                   uint64_t start, uint64_t length)
2057 {
2058         struct i915_page_table *unused;
2059         uint32_t pde, temp;
2060
2061         gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
2062                 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2063 }
2064
2065 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2066 {
2067         struct drm_device *dev = ppgtt->base.dev;
2068         struct drm_i915_private *dev_priv = dev->dev_private;
2069         int ret;
2070
2071         ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
2072         if (IS_GEN6(dev)) {
2073                 ppgtt->switch_mm = gen6_mm_switch;
2074         } else if (IS_HASWELL(dev)) {
2075                 ppgtt->switch_mm = hsw_mm_switch;
2076         } else if (IS_GEN7(dev)) {
2077                 ppgtt->switch_mm = gen7_mm_switch;
2078         } else
2079                 BUG();
2080
2081         if (intel_vgpu_active(dev))
2082                 ppgtt->switch_mm = vgpu_mm_switch;
2083
2084         ret = gen6_ppgtt_alloc(ppgtt);
2085         if (ret)
2086                 return ret;
2087
2088         ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2089         ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2090         ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2091         ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2092         ppgtt->base.bind_vma = ppgtt_bind_vma;
2093         ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2094         ppgtt->base.start = 0;
2095         ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
2096         ppgtt->debug_dump = gen6_dump_ppgtt;
2097
2098         ppgtt->pd.base.ggtt_offset =
2099                 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2100
2101         ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
2102                 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2103
2104         gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2105
2106         gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2107
2108         DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
2109                          ppgtt->node.size >> 20,
2110                          ppgtt->node.start / PAGE_SIZE);
2111
2112         DRM_DEBUG("Adding PPGTT at offset %x\n",
2113                   ppgtt->pd.base.ggtt_offset << 10);
2114
2115         return 0;
2116 }
2117
2118 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2119 {
2120         ppgtt->base.dev = dev;
2121
2122         if (INTEL_INFO(dev)->gen < 8)
2123                 return gen6_ppgtt_init(ppgtt);
2124         else
2125                 return gen8_ppgtt_init(ppgtt);
2126 }
2127
2128 static void i915_address_space_init(struct i915_address_space *vm,
2129                                     struct drm_i915_private *dev_priv)
2130 {
2131         drm_mm_init(&vm->mm, vm->start, vm->total);
2132         vm->dev = dev_priv->dev;
2133         INIT_LIST_HEAD(&vm->active_list);
2134         INIT_LIST_HEAD(&vm->inactive_list);
2135         list_add_tail(&vm->global_link, &dev_priv->vm_list);
2136 }
2137
2138 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2139 {
2140         struct drm_i915_private *dev_priv = dev->dev_private;
2141         int ret = 0;
2142
2143         ret = __hw_ppgtt_init(dev, ppgtt);
2144         if (ret == 0) {
2145                 kref_init(&ppgtt->ref);
2146                 i915_address_space_init(&ppgtt->base, dev_priv);
2147         }
2148
2149         return ret;
2150 }
2151
2152 int i915_ppgtt_init_hw(struct drm_device *dev)
2153 {
2154         /* In the case of execlists, PPGTT is enabled by the context descriptor
2155          * and the PDPs are contained within the context itself.  We don't
2156          * need to do anything here. */
2157         if (i915.enable_execlists)
2158                 return 0;
2159
2160         if (!USES_PPGTT(dev))
2161                 return 0;
2162
2163         if (IS_GEN6(dev))
2164                 gen6_ppgtt_enable(dev);
2165         else if (IS_GEN7(dev))
2166                 gen7_ppgtt_enable(dev);
2167         else if (INTEL_INFO(dev)->gen >= 8)
2168                 gen8_ppgtt_enable(dev);
2169         else
2170                 MISSING_CASE(INTEL_INFO(dev)->gen);
2171
2172         return 0;
2173 }
2174
2175 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2176 {
2177         struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
2178         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2179
2180         if (i915.enable_execlists)
2181                 return 0;
2182
2183         if (!ppgtt)
2184                 return 0;
2185
2186         return ppgtt->switch_mm(ppgtt, req);
2187 }
2188
2189 struct i915_hw_ppgtt *
2190 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2191 {
2192         struct i915_hw_ppgtt *ppgtt;
2193         int ret;
2194
2195         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2196         if (!ppgtt)
2197                 return ERR_PTR(-ENOMEM);
2198
2199         ret = i915_ppgtt_init(dev, ppgtt);
2200         if (ret) {
2201                 kfree(ppgtt);
2202                 return ERR_PTR(ret);
2203         }
2204
2205         ppgtt->file_priv = fpriv;
2206
2207         trace_i915_ppgtt_create(&ppgtt->base);
2208
2209         return ppgtt;
2210 }
2211
2212 void  i915_ppgtt_release(struct kref *kref)
2213 {
2214         struct i915_hw_ppgtt *ppgtt =
2215                 container_of(kref, struct i915_hw_ppgtt, ref);
2216
2217         trace_i915_ppgtt_release(&ppgtt->base);
2218
2219         /* vmas should already be unbound */
2220         WARN_ON(!list_empty(&ppgtt->base.active_list));
2221         WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2222
2223         list_del(&ppgtt->base.global_link);
2224         drm_mm_takedown(&ppgtt->base.mm);
2225
2226         ppgtt->base.cleanup(&ppgtt->base);
2227         kfree(ppgtt);
2228 }
2229
2230 extern int intel_iommu_gfx_mapped;
2231 /* Certain Gen5 chipsets require require idling the GPU before
2232  * unmapping anything from the GTT when VT-d is enabled.
2233  */
2234 static bool needs_idle_maps(struct drm_device *dev)
2235 {
2236 #ifdef CONFIG_INTEL_IOMMU
2237         /* Query intel_iommu to see if we need the workaround. Presumably that
2238          * was loaded first.
2239          */
2240         if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
2241                 return true;
2242 #endif
2243         return false;
2244 }
2245
2246 static bool do_idling(struct drm_i915_private *dev_priv)
2247 {
2248         bool ret = dev_priv->mm.interruptible;
2249
2250         if (unlikely(dev_priv->gtt.do_idle_maps)) {
2251                 dev_priv->mm.interruptible = false;
2252                 if (i915_gpu_idle(dev_priv->dev)) {
2253                         DRM_ERROR("Couldn't idle GPU\n");
2254                         /* Wait a bit, in hopes it avoids the hang */
2255                         udelay(10);
2256                 }
2257         }
2258
2259         return ret;
2260 }
2261
2262 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2263 {
2264         if (unlikely(dev_priv->gtt.do_idle_maps))
2265                 dev_priv->mm.interruptible = interruptible;
2266 }
2267
2268 void i915_check_and_clear_faults(struct drm_device *dev)
2269 {
2270         struct drm_i915_private *dev_priv = dev->dev_private;
2271         struct intel_engine_cs *ring;
2272         int i;
2273
2274         if (INTEL_INFO(dev)->gen < 6)
2275                 return;
2276
2277         for_each_ring(ring, dev_priv, i) {
2278                 u32 fault_reg;
2279                 fault_reg = I915_READ(RING_FAULT_REG(ring));
2280                 if (fault_reg & RING_FAULT_VALID) {
2281 #if 0
2282                         DRM_DEBUG_DRIVER("Unexpected fault\n"
2283                                          "\tAddr: 0x%08lx\n"
2284                                          "\tAddress space: %s\n"
2285                                          "\tSource ID: %d\n"
2286                                          "\tType: %d\n",
2287                                          fault_reg & PAGE_MASK,
2288                                          fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2289                                          RING_FAULT_SRCID(fault_reg),
2290                                          RING_FAULT_FAULT_TYPE(fault_reg));
2291 #endif
2292                         I915_WRITE(RING_FAULT_REG(ring),
2293                                    fault_reg & ~RING_FAULT_VALID);
2294                 }
2295         }
2296         POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
2297 }
2298
2299 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2300 {
2301         if (INTEL_INFO(dev_priv->dev)->gen < 6) {
2302                 intel_gtt_chipset_flush();
2303         } else {
2304                 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2305                 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2306         }
2307 }
2308
2309 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2310 {
2311         struct drm_i915_private *dev_priv = dev->dev_private;
2312
2313         /* Don't bother messing with faults pre GEN6 as we have little
2314          * documentation supporting that it's a good idea.
2315          */
2316         if (INTEL_INFO(dev)->gen < 6)
2317                 return;
2318
2319         i915_check_and_clear_faults(dev);
2320
2321         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
2322                                        dev_priv->gtt.base.start,
2323                                        dev_priv->gtt.base.total,
2324                                        true);
2325
2326         i915_ggtt_flush(dev_priv);
2327 }
2328
2329 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2330 {
2331         if (!dma_map_sg(&obj->base.dev->pdev->dev,
2332                         obj->pages->sgl, obj->pages->nents,
2333                         PCI_DMA_BIDIRECTIONAL))
2334                 return -ENOSPC;
2335
2336         return 0;
2337 }
2338
2339 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2340 {
2341 #ifdef writeq
2342         writeq(pte, addr);
2343 #else
2344         iowrite32((u32)pte, addr);
2345         iowrite32(pte >> 32, addr + 4);
2346 #endif
2347 }
2348
2349 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2350                                      struct sg_table *st,
2351                                      uint64_t start,
2352                                      enum i915_cache_level level, u32 unused)
2353 {
2354         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2355         unsigned first_entry = start >> PAGE_SHIFT;
2356         gen8_pte_t __iomem *gtt_entries =
2357                 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2358         int i = 0;
2359         struct sg_page_iter sg_iter;
2360         dma_addr_t addr = 0; /* shut up gcc */
2361         int rpm_atomic_seq;
2362
2363         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2364
2365         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2366                 addr = sg_dma_address(sg_iter.sg) +
2367                         (sg_iter.sg_pgoffset << PAGE_SHIFT);
2368                 gen8_set_pte(&gtt_entries[i],
2369                              gen8_pte_encode(addr, level, true));
2370                 i++;
2371         }
2372
2373         /*
2374          * XXX: This serves as a posting read to make sure that the PTE has
2375          * actually been updated. There is some concern that even though
2376          * registers and PTEs are within the same BAR that they are potentially
2377          * of NUMA access patterns. Therefore, even with the way we assume
2378          * hardware should work, we must keep this posting read for paranoia.
2379          */
2380         if (i != 0)
2381                 WARN_ON(readq(&gtt_entries[i-1])
2382                         != gen8_pte_encode(addr, level, true));
2383
2384         /* This next bit makes the above posting read even more important. We
2385          * want to flush the TLBs only after we're certain all the PTE updates
2386          * have finished.
2387          */
2388         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2389         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2390
2391         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2392 }
2393
2394 struct insert_entries {
2395         struct i915_address_space *vm;
2396         struct sg_table *st;
2397         uint64_t start;
2398         enum i915_cache_level level;
2399         u32 flags;
2400 };
2401
2402 static int gen8_ggtt_insert_entries__cb(void *_arg)
2403 {
2404         struct insert_entries *arg = _arg;
2405         gen8_ggtt_insert_entries(arg->vm, arg->st,
2406                                  arg->start, arg->level, arg->flags);
2407         return 0;
2408 }
2409
2410 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2411                                           struct sg_table *st,
2412                                           uint64_t start,
2413                                           enum i915_cache_level level,
2414                                           u32 flags)
2415 {
2416         struct insert_entries arg = { vm, st, start, level, flags };
2417 #ifndef __DragonFly__
2418         stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2419 #else
2420         /* XXX: is this enough ?
2421          * See Linux commit 5bab6f60cb4d1417ad7c599166bcfec87529c1a2 */
2422         get_mplock();
2423         gen8_ggtt_insert_entries__cb(&arg);
2424         rel_mplock();
2425 #endif
2426 }
2427
2428 /*
2429  * Binds an object into the global gtt with the specified cache level. The object
2430  * will be accessible to the GPU via commands whose operands reference offsets
2431  * within the global GTT as well as accessible by the GPU through the GMADR
2432  * mapped BAR (dev_priv->mm.gtt->gtt).
2433  */
2434 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2435                                      struct sg_table *st,
2436                                      uint64_t start,
2437                                      enum i915_cache_level level, u32 flags)
2438 {
2439         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2440         unsigned first_entry = start >> PAGE_SHIFT;
2441         gen6_pte_t __iomem *gtt_entries =
2442                 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2443         int i = 0;
2444         struct sg_page_iter sg_iter;
2445         dma_addr_t addr = 0;
2446         int rpm_atomic_seq;
2447
2448         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2449
2450         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2451                 addr = sg_page_iter_dma_address(&sg_iter);
2452                 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
2453                 i++;
2454         }
2455
2456         /* XXX: This serves as a posting read to make sure that the PTE has
2457          * actually been updated. There is some concern that even though
2458          * registers and PTEs are within the same BAR that they are potentially
2459          * of NUMA access patterns. Therefore, even with the way we assume
2460          * hardware should work, we must keep this posting read for paranoia.
2461          */
2462         if (i != 0) {
2463                 unsigned long gtt = readl(&gtt_entries[i-1]);
2464                 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465         }
2466
2467         /* This next bit makes the above posting read even more important. We
2468          * want to flush the TLBs only after we're certain all the PTE updates
2469          * have finished.
2470          */
2471         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2472         POSTING_READ(GFX_FLSH_CNTL_GEN6);
2473
2474         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2475 }
2476
2477 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2478                                   uint64_t start,
2479                                   uint64_t length,
2480                                   bool use_scratch)
2481 {
2482         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2483         unsigned first_entry = start >> PAGE_SHIFT;
2484         unsigned num_entries = length >> PAGE_SHIFT;
2485         gen8_pte_t scratch_pte, __iomem *gtt_base =
2486                 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2487         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2488         int i;
2489         int rpm_atomic_seq;
2490
2491         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2492
2493         if (WARN(num_entries > max_entries,
2494                  "First entry = %d; Num entries = %d (max=%d)\n",
2495                  first_entry, num_entries, max_entries))
2496                 num_entries = max_entries;
2497
2498         scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
2499                                       I915_CACHE_LLC,
2500                                       use_scratch);
2501         for (i = 0; i < num_entries; i++)
2502                 gen8_set_pte(&gtt_base[i], scratch_pte);
2503         readl(gtt_base);
2504
2505         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2506 }
2507
2508 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2509                                   uint64_t start,
2510                                   uint64_t length,
2511                                   bool use_scratch)
2512 {
2513         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2514         unsigned first_entry = start >> PAGE_SHIFT;
2515         unsigned num_entries = length >> PAGE_SHIFT;
2516         gen6_pte_t scratch_pte, __iomem *gtt_base =
2517                 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2518         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2519         int i;
2520         int rpm_atomic_seq;
2521
2522         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2523
2524         if (WARN(num_entries > max_entries,
2525                  "First entry = %d; Num entries = %d (max=%d)\n",
2526                  first_entry, num_entries, max_entries))
2527                 num_entries = max_entries;
2528
2529         scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
2530                                      I915_CACHE_LLC, use_scratch, 0);
2531
2532         for (i = 0; i < num_entries; i++)
2533                 iowrite32(scratch_pte, &gtt_base[i]);
2534         readl(gtt_base);
2535
2536         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2537 }
2538
2539 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2540                                      struct sg_table *pages,
2541                                      uint64_t start,
2542                                      enum i915_cache_level cache_level, u32 unused)
2543 {
2544         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2545         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2546                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2547         int rpm_atomic_seq;
2548
2549         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2550
2551         intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2552
2553         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2554
2555 }
2556
2557 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2558                                   uint64_t start,
2559                                   uint64_t length,
2560                                   bool unused)
2561 {
2562         struct drm_i915_private *dev_priv = vm->dev->dev_private;
2563         unsigned first_entry = start >> PAGE_SHIFT;
2564         unsigned num_entries = length >> PAGE_SHIFT;
2565         int rpm_atomic_seq;
2566
2567         rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2568
2569         intel_gtt_clear_range(first_entry, num_entries);
2570
2571         assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2572 }
2573
2574 static int ggtt_bind_vma(struct i915_vma *vma,
2575                          enum i915_cache_level cache_level,
2576                          u32 flags)
2577 {
2578         struct drm_i915_gem_object *obj = vma->obj;
2579         u32 pte_flags = 0;
2580         int ret;
2581
2582         ret = i915_get_ggtt_vma_pages(vma);
2583         if (ret)
2584                 return ret;
2585
2586         /* Currently applicable only to VLV */
2587         if (obj->gt_ro)
2588                 pte_flags |= PTE_READ_ONLY;
2589
2590         vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
2591                                 vma->node.start,
2592                                 cache_level, pte_flags);
2593
2594         /*
2595          * Without aliasing PPGTT there's no difference between
2596          * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2597          * upgrade to both bound if we bind either to avoid double-binding.
2598          */
2599         vma->bound |= GLOBAL_BIND | LOCAL_BIND;
2600
2601         return 0;
2602 }
2603
2604 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2605                                  enum i915_cache_level cache_level,
2606                                  u32 flags)
2607 {
2608         struct drm_device *dev = vma->vm->dev;
2609         struct drm_i915_private *dev_priv = dev->dev_private;
2610         struct drm_i915_gem_object *obj = vma->obj;
2611         struct sg_table *pages = obj->pages;
2612         u32 pte_flags = 0;
2613         int ret;
2614
2615         ret = i915_get_ggtt_vma_pages(vma);
2616         if (ret)
2617                 return ret;
2618         pages = vma->ggtt_view.pages;
2619
2620         /* Currently applicable only to VLV */
2621         if (obj->gt_ro)
2622                 pte_flags |= PTE_READ_ONLY;
2623
2624
2625         if (flags & GLOBAL_BIND) {
2626                 vma->vm->insert_entries(vma->vm, pages,
2627                                         vma->node.start,
2628                                         cache_level, pte_flags);
2629         }
2630
2631         if (flags & LOCAL_BIND) {
2632                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2633                 appgtt->base.insert_entries(&appgtt->base, pages,
2634                                             vma->node.start,
2635                                             cache_level, pte_flags);
2636         }
2637
2638         return 0;
2639 }
2640
2641 static void ggtt_unbind_vma(struct i915_vma *vma)
2642 {
2643         struct drm_device *dev = vma->vm->dev;
2644         struct drm_i915_private *dev_priv = dev->dev_private;
2645         struct drm_i915_gem_object *obj = vma->obj;
2646         const uint64_t size = min_t(uint64_t,
2647                                     obj->base.size,
2648                                     vma->node.size);
2649
2650         if (vma->bound & GLOBAL_BIND) {
2651                 vma->vm->clear_range(vma->vm,
2652                                      vma->node.start,
2653                                      size,
2654                                      true);
2655         }
2656
2657         if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
2658                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2659
2660                 appgtt->base.clear_range(&appgtt->base,
2661                                          vma->node.start,
2662                                          size,
2663                                          true);
2664         }
2665 }
2666
2667 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2668 {
2669         struct drm_device *dev = obj->base.dev;
2670         struct drm_i915_private *dev_priv = dev->dev_private;
2671         bool interruptible;
2672
2673         interruptible = do_idling(dev_priv);
2674
2675         dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
2676                      PCI_DMA_BIDIRECTIONAL);
2677
2678         undo_idling(dev_priv, interruptible);
2679 }
2680
2681 static void i915_gtt_color_adjust(struct drm_mm_node *node,
2682                                   unsigned long color,
2683                                   u64 *start,
2684                                   u64 *end)
2685 {
2686         if (node->color != color)
2687                 *start += 4096;
2688
2689         if (!list_empty(&node->node_list)) {
2690                 node = list_entry(node->node_list.next,
2691                                   struct drm_mm_node,
2692                                   node_list);
2693                 if (node->allocated && node->color != color)
2694                         *end -= 4096;
2695         }
2696 }
2697
2698 static int i915_gem_setup_global_gtt(struct drm_device *dev,
2699                                      u64 start,
2700                                      u64 mappable_end,
2701                                      u64 end)
2702 {
2703         /* Let GEM Manage all of the aperture.
2704          *
2705          * However, leave one page at the end still bound to the scratch page.
2706          * There are a number of places where the hardware apparently prefetches
2707          * past the end of the object, and we've seen multiple hangs with the
2708          * GPU head pointer stuck in a batchbuffer bound at the last page of the
2709          * aperture.  One page should be enough to keep any prefetching inside
2710          * of the aperture.
2711          */
2712         struct drm_i915_private *dev_priv = dev->dev_private;
2713         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2714         struct drm_mm_node *entry;
2715         struct drm_i915_gem_object *obj;
2716         unsigned long hole_start, hole_end;
2717         int ret;
2718         unsigned long mappable;
2719         int error;
2720
2721         mappable = min(end, mappable_end) - start;
2722         BUG_ON(mappable_end > end);
2723
2724         ggtt_vm->start = start;
2725
2726         /* Subtract the guard page before address space initialization to
2727          * shrink the range used by drm_mm */
2728         ggtt_vm->total = end - start - PAGE_SIZE;
2729         i915_address_space_init(ggtt_vm, dev_priv);
2730         ggtt_vm->total += PAGE_SIZE;
2731
2732         if (intel_vgpu_active(dev)) {
2733                 ret = intel_vgt_balloon(dev);
2734                 if (ret)
2735                         return ret;
2736         }
2737
2738         if (!HAS_LLC(dev))
2739                 ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
2740
2741         /* Mark any preallocated objects as occupied */
2742         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2743                 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2744
2745                 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
2746                               i915_gem_obj_ggtt_offset(obj), obj->base.size);
2747
2748                 WARN_ON(i915_gem_obj_ggtt_bound(obj));
2749                 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
2750                 if (ret) {
2751                         DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2752                         return ret;
2753                 }
2754                 vma->bound |= GLOBAL_BIND;
2755                 __i915_vma_set_map_and_fenceable(vma);
2756                 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2757         }
2758
2759         /* Clear any non-preallocated blocks */
2760         drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2761                 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2762                               hole_start, hole_end);
2763                 ggtt_vm->clear_range(ggtt_vm, hole_start,
2764                                      hole_end - hole_start, true);
2765         }
2766
2767 #ifdef __DragonFly__
2768         device_printf(dev->dev->bsddev,
2769             "taking over the fictitious range 0x%lx-0x%lx\n",
2770             dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable);
2771         error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start,
2772             dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
2773 #endif
2774
2775         /* And finally clear the reserved guard page */
2776         ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
2777
2778         if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
2779                 struct i915_hw_ppgtt *ppgtt;
2780
2781                 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2782                 if (!ppgtt)
2783                         return -ENOMEM;
2784
2785                 ret = __hw_ppgtt_init(dev, ppgtt);
2786                 if (ret) {
2787                         ppgtt->base.cleanup(&ppgtt->base);
2788                         kfree(ppgtt);
2789                         return ret;
2790                 }
2791
2792                 if (ppgtt->base.allocate_va_range)
2793                         ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2794                                                             ppgtt->base.total);
2795                 if (ret) {
2796                         ppgtt->base.cleanup(&ppgtt->base);
2797                         kfree(ppgtt);
2798                         return ret;
2799                 }
2800
2801                 ppgtt->base.clear_range(&ppgtt->base,
2802                                         ppgtt->base.start,
2803                                         ppgtt->base.total,
2804                                         true);
2805
2806                 dev_priv->mm.aliasing_ppgtt = ppgtt;
2807                 WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
2808                 dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
2809         }
2810
2811         return 0;
2812 }
2813
2814 void i915_gem_init_global_gtt(struct drm_device *dev)
2815 {
2816         struct drm_i915_private *dev_priv = dev->dev_private;
2817         u64 gtt_size, mappable_size;
2818
2819         gtt_size = dev_priv->gtt.base.total;
2820         mappable_size = dev_priv->gtt.mappable_end;
2821
2822         i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
2823 }
2824
2825 void i915_global_gtt_cleanup(struct drm_device *dev)
2826 {
2827         struct drm_i915_private *dev_priv = dev->dev_private;
2828         struct i915_address_space *vm = &dev_priv->gtt.base;
2829
2830         if (dev_priv->mm.aliasing_ppgtt) {
2831                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2832
2833                 ppgtt->base.cleanup(&ppgtt->base);
2834         }
2835
2836         if (drm_mm_initialized(&vm->mm)) {
2837                 if (intel_vgpu_active(dev))
2838                         intel_vgt_deballoon();
2839
2840                 drm_mm_takedown(&vm->mm);
2841                 list_del(&vm->global_link);
2842         }
2843
2844         vm->cleanup(vm);
2845 }
2846
2847 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2848 {
2849         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2850         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2851         return snb_gmch_ctl << 20;
2852 }
2853
2854 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2855 {
2856         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2857         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2858         if (bdw_gmch_ctl)
2859                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2860
2861 #ifdef CONFIG_X86_32
2862         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2863         if (bdw_gmch_ctl > 4)
2864                 bdw_gmch_ctl = 4;
2865 #endif
2866
2867         return bdw_gmch_ctl << 20;
2868 }
2869
2870 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2871 {
2872         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2873         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2874
2875         if (gmch_ctrl)
2876                 return 1 << (20 + gmch_ctrl);
2877
2878         return 0;
2879 }
2880
2881 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2882 {
2883         snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2884         snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2885         return snb_gmch_ctl << 25; /* 32 MB units */
2886 }
2887
2888 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2889 {
2890         bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2891         bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2892         return bdw_gmch_ctl << 25; /* 32 MB units */
2893 }
2894
2895 static size_t chv_get_stolen_size(u16 gmch_ctrl)
2896 {
2897         gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2898         gmch_ctrl &= SNB_GMCH_GMS_MASK;
2899
2900         /*
2901          * 0x0  to 0x10: 32MB increments starting at 0MB
2902          * 0x11 to 0x16: 4MB increments starting at 8MB
2903          * 0x17 to 0x1d: 4MB increments start at 36MB
2904          */
2905         if (gmch_ctrl < 0x11)
2906                 return gmch_ctrl << 25;
2907         else if (gmch_ctrl < 0x17)
2908                 return (gmch_ctrl - 0x11 + 2) << 22;
2909         else
2910                 return (gmch_ctrl - 0x17 + 9) << 22;
2911 }
2912
2913 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2914 {
2915         gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2916         gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2917
2918         if (gen9_gmch_ctl < 0xf0)
2919                 return gen9_gmch_ctl << 25; /* 32 MB units */
2920         else
2921                 /* 4MB increments starting at 0xf0 for 4MB */
2922                 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2923 }
2924
2925 static int ggtt_probe_common(struct drm_device *dev,
2926                              size_t gtt_size)
2927 {
2928         struct drm_i915_private *dev_priv = dev->dev_private;
2929         struct i915_page_scratch *scratch_page;
2930         phys_addr_t gtt_phys_addr;
2931
2932         /* For Modern GENs the PTEs and register space are split in the BAR */
2933         gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
2934                 (pci_resource_len(dev->pdev, 0) / 2);
2935
2936         /*
2937          * On BXT writes larger than 64 bit to the GTT pagetable range will be
2938          * dropped. For WC mappings in general we have 64 byte burst writes
2939          * when the WC buffer is flushed, so we can't use it, but have to
2940          * resort to an uncached mapping. The WC issue is easily caught by the
2941          * readback check when writing GTT PTE entries.
2942          */
2943         if (IS_BROXTON(dev))
2944                 dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
2945         else
2946                 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
2947         if (!dev_priv->gtt.gsm) {
2948                 DRM_ERROR("Failed to map the gtt page table\n");
2949                 return -ENOMEM;
2950         }
2951
2952         scratch_page = alloc_scratch_page(dev);
2953         if (IS_ERR(scratch_page)) {
2954                 DRM_ERROR("Scratch setup failed\n");
2955                 /* iounmap will also get called at remove, but meh */
2956                 iounmap(dev_priv->gtt.gsm);
2957                 return PTR_ERR(scratch_page);
2958         }
2959
2960         dev_priv->gtt.base.scratch_page = scratch_page;
2961
2962         return 0;
2963 }
2964
2965 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2966  * bits. When using advanced contexts each context stores its own PAT, but
2967  * writing this data shouldn't be harmful even in those cases. */
2968 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
2969 {
2970         uint64_t pat;
2971
2972         pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
2973               GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2974               GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2975               GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
2976               GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2977               GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2978               GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2979               GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2980
2981         if (!USES_PPGTT(dev_priv->dev))
2982                 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2983                  * so RTL will always use the value corresponding to
2984                  * pat_sel = 000".
2985                  * So let's disable cache for GGTT to avoid screen corruptions.
2986                  * MOCS still can be used though.
2987                  * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2988                  * before this patch, i.e. the same uncached + snooping access
2989                  * like on gen6/7 seems to be in effect.
2990                  * - So this just fixes blitter/render access. Again it looks
2991                  * like it's not just uncached access, but uncached + snooping.
2992                  * So we can still hold onto all our assumptions wrt cpu
2993                  * clflushing on LLC machines.
2994                  */
2995                 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2996
2997         /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2998          * write would work. */
2999         I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3000         I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3001 }
3002
3003 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3004 {
3005         uint64_t pat;
3006
3007         /*
3008          * Map WB on BDW to snooped on CHV.
3009          *
3010          * Only the snoop bit has meaning for CHV, the rest is
3011          * ignored.
3012          *
3013          * The hardware will never snoop for certain types of accesses:
3014          * - CPU GTT (GMADR->GGTT->no snoop->memory)
3015          * - PPGTT page tables
3016          * - some other special cycles
3017          *
3018          * As with BDW, we also need to consider the following for GT accesses:
3019          * "For GGTT, there is NO pat_sel[2:0] from the entry,
3020          * so RTL will always use the value corresponding to
3021          * pat_sel = 000".
3022          * Which means we must set the snoop bit in PAT entry 0
3023          * in order to keep the global status page working.
3024          */
3025         pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3026               GEN8_PPAT(1, 0) |
3027               GEN8_PPAT(2, 0) |
3028               GEN8_PPAT(3, 0) |
3029               GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3030               GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3031               GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3032               GEN8_PPAT(7, CHV_PPAT_SNOOP);
3033
3034         I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3035         I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3036 }
3037
3038 static int gen8_gmch_probe(struct drm_device *dev,
3039                            u64 *gtt_total,
3040                            size_t *stolen,
3041                            phys_addr_t *mappable_base,
3042                            u64 *mappable_end)
3043 {
3044         struct drm_i915_private *dev_priv = dev->dev_private;
3045         u64 gtt_size;
3046         u16 snb_gmch_ctl;
3047         int ret;
3048
3049         /* TODO: We're not aware of mappable constraints on gen8 yet */
3050         *mappable_base = pci_resource_start(dev->pdev, 2);
3051         *mappable_end = pci_resource_len(dev->pdev, 2);
3052
3053 #if 0
3054         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
3055                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3056 #endif
3057
3058         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3059
3060         if (INTEL_INFO(dev)->gen >= 9) {
3061                 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
3062                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3063         } else if (IS_CHERRYVIEW(dev)) {
3064                 *stolen = chv_get_stolen_size(snb_gmch_ctl);
3065                 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
3066         } else {
3067                 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
3068                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
3069         }
3070
3071         *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
3072
3073         if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3074                 chv_setup_private_ppat(dev_priv);
3075         else
3076                 bdw_setup_private_ppat(dev_priv);
3077
3078         ret = ggtt_probe_common(dev, gtt_size);
3079
3080         dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
3081         dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
3082         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3083         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3084
3085         if (IS_CHERRYVIEW(dev_priv))
3086                 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
3087
3088         return ret;
3089 }
3090
3091 static int gen6_gmch_probe(struct drm_device *dev,
3092                            u64 *gtt_total,
3093                            size_t *stolen,
3094                            phys_addr_t *mappable_base,
3095                            u64 *mappable_end)
3096 {
3097         struct drm_i915_private *dev_priv = dev->dev_private;
3098         unsigned int gtt_size;
3099         u16 snb_gmch_ctl;
3100         int ret;
3101
3102         *mappable_base = pci_resource_start(dev->pdev, 2);
3103         *mappable_end = pci_resource_len(dev->pdev, 2);
3104
3105         /* 64/512MB is the current min/max we actually know of, but this is just
3106          * a coarse sanity check.
3107          */
3108         if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
3109                 DRM_ERROR("Unknown GMADR size (%lx)\n",
3110                           dev_priv->gtt.mappable_end);
3111                 return -ENXIO;
3112         }
3113
3114 #if 0
3115         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
3116                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
3117 #endif
3118         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3119
3120         *stolen = gen6_get_stolen_size(snb_gmch_ctl);
3121
3122         gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
3123         *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3124
3125         ret = ggtt_probe_common(dev, gtt_size);
3126
3127         dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
3128         dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
3129         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3130         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3131
3132         return ret;
3133 }
3134
3135 static void gen6_gmch_remove(struct i915_address_space *vm)
3136 {
3137
3138         struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
3139
3140         iounmap(gtt->gsm);
3141         free_scratch_page(vm->dev, vm->scratch_page);
3142 }
3143
3144 static int i915_gmch_probe(struct drm_device *dev,
3145                            u64 *gtt_total,
3146                            size_t *stolen,
3147                            phys_addr_t *mappable_base,
3148                            u64 *mappable_end)
3149 {
3150         struct drm_i915_private *dev_priv = dev->dev_private;
3151 #if 0
3152         int ret;
3153
3154         ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
3155         if (!ret) {
3156                 DRM_ERROR("failed to set up gmch\n");
3157                 return -EIO;
3158         }
3159 #endif
3160
3161         intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
3162
3163         dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
3164         dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
3165         dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
3166         dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3167         dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3168
3169         if (unlikely(dev_priv->gtt.do_idle_maps))
3170                 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3171
3172         return 0;
3173 }
3174
3175 static void i915_gmch_remove(struct i915_address_space *vm)
3176 {
3177         intel_gmch_remove();
3178 }
3179
3180 int i915_gem_gtt_init(struct drm_device *dev)
3181 {
3182         struct drm_i915_private *dev_priv = dev->dev_private;
3183         struct i915_gtt *gtt = &dev_priv->gtt;
3184         int ret;
3185
3186         if (INTEL_INFO(dev)->gen <= 5) {
3187                 gtt->gtt_probe = i915_gmch_probe;
3188                 gtt->base.cleanup = i915_gmch_remove;
3189         } else if (INTEL_INFO(dev)->gen < 8) {
3190                 gtt->gtt_probe = gen6_gmch_probe;
3191                 gtt->base.cleanup = gen6_gmch_remove;
3192                 if (IS_HASWELL(dev) && dev_priv->ellc_size)
3193                         gtt->base.pte_encode = iris_pte_encode;
3194                 else if (IS_HASWELL(dev))
3195                         gtt->base.pte_encode = hsw_pte_encode;
3196                 else if (IS_VALLEYVIEW(dev))
3197                         gtt->base.pte_encode = byt_pte_encode;
3198                 else if (INTEL_INFO(dev)->gen >= 7)
3199                         gtt->base.pte_encode = ivb_pte_encode;
3200                 else
3201                         gtt->base.pte_encode = snb_pte_encode;
3202         } else {
3203                 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3204                 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
3205         }
3206
3207         gtt->base.dev = dev;
3208
3209         ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
3210                              &gtt->mappable_base, &gtt->mappable_end);
3211         if (ret)
3212                 return ret;
3213
3214         /* GMADR is the PCI mmio aperture into the global GTT. */
3215         DRM_INFO("Memory usable by graphics device = %luM\n",
3216                  gtt->base.total >> 20);
3217         DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
3218         DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
3219 #ifdef CONFIG_INTEL_IOMMU
3220         if (intel_iommu_gfx_mapped)
3221                 DRM_INFO("VT-d active for gfx access\n");
3222 #endif
3223         /*
3224          * i915.enable_ppgtt is read-only, so do an early pass to validate the
3225          * user's requested state against the hardware/driver capabilities.  We
3226          * do this now so that we can print out any log messages once rather
3227          * than every time we check intel_enable_ppgtt().
3228          */
3229         i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3230         DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3231
3232         return 0;
3233 }
3234
3235 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3236 {
3237         struct drm_i915_private *dev_priv = dev->dev_private;
3238         struct drm_i915_gem_object *obj;
3239         struct i915_address_space *vm;
3240         struct i915_vma *vma;
3241         bool flush;
3242
3243         i915_check_and_clear_faults(dev);
3244
3245         /* First fill our portion of the GTT with scratch pages */
3246         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
3247                                        dev_priv->gtt.base.start,
3248                                        dev_priv->gtt.base.total,
3249                                        true);
3250
3251         /* Cache flush objects bound into GGTT and rebind them. */
3252         vm = &dev_priv->gtt.base;
3253         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3254                 flush = false;
3255                 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3256                         if (vma->vm != vm)
3257                                 continue;
3258
3259                         WARN_ON(i915_vma_bind(vma, obj->cache_level,
3260                                               PIN_UPDATE));
3261
3262                         flush = true;
3263                 }
3264
3265                 if (flush)
3266                         i915_gem_clflush_object(obj, obj->pin_display);
3267         }
3268
3269         if (INTEL_INFO(dev)->gen >= 8) {
3270                 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3271                         chv_setup_private_ppat(dev_priv);
3272                 else
3273                         bdw_setup_private_ppat(dev_priv);
3274
3275                 return;
3276         }
3277
3278         if (USES_PPGTT(dev)) {
3279                 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3280                         /* TODO: Perhaps it shouldn't be gen6 specific */
3281
3282                         struct i915_hw_ppgtt *ppgtt =
3283                                         container_of(vm, struct i915_hw_ppgtt,
3284                                                      base);
3285
3286                         if (i915_is_ggtt(vm))
3287                                 ppgtt = dev_priv->mm.aliasing_ppgtt;
3288
3289                         gen6_write_page_range(dev_priv, &ppgtt->pd,
3290                                               0, ppgtt->base.total);
3291                 }
3292         }
3293
3294         i915_ggtt_flush(dev_priv);
3295 }
3296
3297 static struct i915_vma *
3298 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
3299                       struct i915_address_space *vm,
3300                       const struct i915_ggtt_view *ggtt_view)
3301 {
3302         struct i915_vma *vma;
3303
3304         if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3305                 return ERR_PTR(-EINVAL);
3306
3307         vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3308         if (vma == NULL)
3309                 return ERR_PTR(-ENOMEM);
3310
3311         INIT_LIST_HEAD(&vma->vma_link);
3312         INIT_LIST_HEAD(&vma->mm_list);
3313         INIT_LIST_HEAD(&vma->exec_list);
3314         vma->vm = vm;
3315         vma->obj = obj;
3316
3317         if (i915_is_ggtt(vm))
3318                 vma->ggtt_view = *ggtt_view;
3319
3320         list_add_tail(&vma->vma_link, &obj->vma_list);
3321         if (!i915_is_ggtt(vm))
3322                 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3323
3324         return vma;
3325 }
3326
3327 struct i915_vma *
3328 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3329                                   struct i915_address_space *vm)
3330 {
3331         struct i915_vma *vma;
3332
3333         vma = i915_gem_obj_to_vma(obj, vm);
3334         if (!vma)
3335                 vma = __i915_gem_vma_create(obj, vm,
3336                                             i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);
3337
3338         return vma;
3339 }
3340
3341 struct i915_vma *
3342 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
3343                                        const struct i915_ggtt_view *view)
3344 {
3345         struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
3346         struct i915_vma *vma;
3347
3348         if (WARN_ON(!view))
3349                 return ERR_PTR(-EINVAL);
3350
3351         vma = i915_gem_obj_to_ggtt_view(obj, view);
3352
3353         if (IS_ERR(vma))
3354                 return vma;
3355
3356         if (!vma)
3357                 vma = __i915_gem_vma_create(obj, ggtt, view);
3358
3359         return vma;
3360
3361 }
3362
3363 static struct scatterlist *
3364 rotate_pages(dma_addr_t *in, unsigned int offset,
3365              unsigned int width, unsigned int height,
3366              struct sg_table *st, struct scatterlist *sg)
3367 {
3368         unsigned int column, row;
3369         unsigned int src_idx;
3370
3371         if (!sg) {
3372                 st->nents = 0;
3373                 sg = st->sgl;
3374         }
3375
3376         for (column = 0; column < width; column++) {
3377                 src_idx = width * (height - 1) + column;
3378                 for (row = 0; row < height; row++) {
3379                         st->nents++;
3380                         /* We don't need the pages, but need to initialize
3381                          * the entries so the sg list can be happily traversed.
3382                          * The only thing we need are DMA addresses.
3383                          */
3384                         sg_set_page(sg, NULL, PAGE_SIZE, 0);
3385                         sg_dma_address(sg) = in[offset + src_idx];
3386                         sg_dma_len(sg) = PAGE_SIZE;
3387                         sg = sg_next(sg);
3388                         src_idx -= width;
3389                 }
3390         }
3391
3392         return sg;
3393 }
3394
3395 static struct sg_table *
3396 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3397                           struct drm_i915_gem_object *obj)
3398 {
3399         struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3400         unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3401         unsigned int size_pages_uv;
3402         struct sg_page_iter sg_iter;
3403         unsigned long i;
3404         dma_addr_t *page_addr_list;
3405         struct sg_table *st;
3406         unsigned int uv_start_page;
3407         struct scatterlist *sg;
3408         int ret = -ENOMEM;
3409
3410         /* Allocate a temporary list of source pages for random access. */
3411         page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
3412                                        sizeof(dma_addr_t));
3413         if (!page_addr_list)
3414                 return ERR_PTR(ret);
3415
3416         /* Account for UV plane with NV12. */
3417         if (rot_info->pixel_format == DRM_FORMAT_NV12)
3418                 size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
3419         else
3420                 size_pages_uv = 0;
3421
3422         /* Allocate target SG list. */
3423         st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3424         if (!st)
3425                 goto err_st_alloc;
3426
3427         ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
3428         if (ret)
3429                 goto err_sg_alloc;
3430
3431         /* Populate source page list from the object. */
3432         i = 0;
3433         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
3434                 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
3435                 i++;
3436         }
3437
3438         /* Rotate the pages. */
3439         sg = rotate_pages(page_addr_list, 0,
3440                      rot_info->width_pages, rot_info->height_pages,
3441                      st, NULL);
3442
3443         /* Append the UV plane if NV12. */
3444         if (rot_info->pixel_format == DRM_FORMAT_NV12) {
3445                 uv_start_page = size_pages;
3446
3447                 /* Check for tile-row un-alignment. */
3448                 if (offset_in_page(rot_info->uv_offset))
3449                         uv_start_page--;
3450
3451                 rot_info->uv_start_page = uv_start_page;
3452
3453                 rotate_pages(page_addr_list, uv_start_page,
3454                              rot_info->width_pages_uv,
3455                              rot_info->height_pages_uv,
3456                              st, sg);
3457         }
3458
3459         DRM_DEBUG_KMS(
3460                       "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
3461                       obj->base.size, rot_info->pitch, rot_info->height,
3462                       rot_info->pixel_format, rot_info->width_pages,
3463                       rot_info->height_pages, size_pages + size_pages_uv,
3464                       size_pages);
3465
3466         drm_free_large(page_addr_list);
3467
3468         return st;
3469
3470 err_sg_alloc:
3471         kfree(st);
3472 err_st_alloc:
3473         drm_free_large(page_addr_list);
3474
3475         DRM_DEBUG_KMS(
3476                       "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
3477                       obj->base.size, ret, rot_info->pitch, rot_info->height,
3478                       rot_info->pixel_format, rot_info->width_pages,
3479                       rot_info->height_pages, size_pages + size_pages_uv,
3480                       size_pages);
3481         return ERR_PTR(ret);
3482 }
3483
3484 static struct sg_table *
3485 intel_partial_pages(const struct i915_ggtt_view *view,
3486                     struct drm_i915_gem_object *obj)
3487 {
3488         struct sg_table *st;
3489         struct scatterlist *sg;
3490         struct sg_page_iter obj_sg_iter;
3491         int ret = -ENOMEM;
3492
3493         st = kmalloc(sizeof(*st), M_DRM, M_WAITOK);
3494         if (!st)
3495                 goto err_st_alloc;
3496
3497         ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3498         if (ret)
3499                 goto err_sg_alloc;
3500
3501         sg = st->sgl;
3502         st->nents = 0;
3503         for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3504                 view->params.partial.offset)
3505         {
3506                 if (st->nents >= view->params.partial.size)
3507                         break;
3508
3509                 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3510                 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3511                 sg_dma_len(sg) = PAGE_SIZE;
3512
3513                 sg = sg_next(sg);
3514                 st->nents++;
3515         }
3516
3517         return st;
3518
3519 err_sg_alloc:
3520         kfree(st);
3521 err_st_alloc:
3522         return ERR_PTR(ret);
3523 }
3524
3525 static int
3526 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3527 {
3528         int ret = 0;
3529
3530         if (vma->ggtt_view.pages)
3531                 return 0;
3532
3533         if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3534                 vma->ggtt_view.pages = vma->obj->pages;
3535         else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3536                 vma->ggtt_view.pages =
3537                         intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
3538         else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3539                 vma->ggtt_view.pages =
3540                         intel_partial_pages(&vma->ggtt_view, vma->obj);
3541         else
3542                 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3543                           vma->ggtt_view.type);
3544
3545         if (!vma->ggtt_view.pages) {
3546                 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3547                           vma->ggtt_view.type);
3548                 ret = -EINVAL;
3549         } else if (IS_ERR(vma->ggtt_view.pages)) {
3550                 ret = PTR_ERR(vma->ggtt_view.pages);
3551                 vma->ggtt_view.pages = NULL;
3552                 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3553                           vma->ggtt_view.type, ret);
3554         }
3555
3556         return ret;
3557 }
3558
3559 /**
3560  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3561  * @vma: VMA to map
3562  * @cache_level: mapping cache level
3563  * @flags: flags like global or local mapping
3564  *
3565  * DMA addresses are taken from the scatter-gather table of this object (or of
3566  * this VMA in case of non-default GGTT views) and PTE entries set up.
3567  * Note that DMA addresses are also the only part of the SG table we care about.
3568  */
3569 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3570                   u32 flags)
3571 {
3572         int ret;
3573         u32 bind_flags;
3574
3575         if (WARN_ON(flags == 0))
3576                 return -EINVAL;
3577
3578         bind_flags = 0;
3579         if (flags & PIN_GLOBAL)
3580                 bind_flags |= GLOBAL_BIND;
3581         if (flags & PIN_USER)
3582                 bind_flags |= LOCAL_BIND;
3583
3584         if (flags & PIN_UPDATE)
3585                 bind_flags |= vma->bound;
3586         else
3587                 bind_flags &= ~vma->bound;
3588
3589         if (bind_flags == 0)
3590                 return 0;
3591
3592         if (vma->bound == 0 && vma->vm->allocate_va_range) {
3593                 trace_i915_va_alloc(vma->vm,
3594                                     vma->node.start,
3595                                     vma->node.size,
3596                                     VM_TO_TRACE_NAME(vma->vm));
3597
3598                 /* XXX: i915_vma_pin() will fix this +- hack */
3599                 vma->pin_count++;
3600                 ret = vma->vm->allocate_va_range(vma->vm,
3601                                                  vma->node.start,
3602                                                  vma->node.size);
3603                 vma->pin_count--;
3604                 if (ret)
3605                         return ret;
3606         }
3607
3608         ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
3609         if (ret)
3610                 return ret;
3611
3612         vma->bound |= bind_flags;
3613
3614         return 0;
3615 }
3616
3617 /**
3618  * i915_ggtt_view_size - Get the size of a GGTT view.
3619  * @obj: Object the view is of.
3620  * @view: The view in question.
3621  *
3622  * @return The size of the GGTT view in bytes.
3623  */
3624 size_t
3625 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3626                     const struct i915_ggtt_view *view)
3627 {
3628         if (view->type == I915_GGTT_VIEW_NORMAL) {
3629                 return obj->base.size;
3630         } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3631                 return view->params.rotation_info.size;
3632         } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3633                 return view->params.partial.size << PAGE_SHIFT;
3634         } else {
3635                 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3636                 return obj->base.size;
3637         }
3638 }