2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * $FreeBSD: src/sys/dev/drm2/i915/i915_gem_gtt.c,v 1.1 2012/05/22 11:07:44 kib Exp $
26 #include <sys/sfbuf.h>
29 #include <drm/i915_drm.h>
31 #include "intel_drv.h"
33 typedef uint32_t gtt_pte_t;
36 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
38 #define GEN6_PDE_VALID (1 << 0)
39 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
40 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
42 #define GEN6_PTE_VALID (1 << 0)
43 #define GEN6_PTE_UNCACHED (1 << 1)
44 #define HSW_PTE_UNCACHED (0)
45 #define GEN6_PTE_CACHE_LLC (2 << 1)
46 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
47 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
49 static inline gtt_pte_t pte_encode(struct drm_device *dev,
51 enum i915_cache_level level)
53 gtt_pte_t pte = GEN6_PTE_VALID;
54 pte |= GEN6_PTE_ADDR_ENCODE(addr);
57 case I915_CACHE_LLC_MLC:
58 /* Haswell doesn't set L3 this way */
60 pte |= GEN6_PTE_CACHE_LLC;
62 pte |= GEN6_PTE_CACHE_LLC_MLC;
65 pte |= GEN6_PTE_CACHE_LLC;
69 pte |= HSW_PTE_UNCACHED;
71 pte |= GEN6_PTE_UNCACHED;
81 /* PPGTT support for Sandybdrige/Gen6 and later */
82 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
87 gtt_pte_t scratch_pte;
89 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
90 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
93 scratch_pte = GEN6_GTT_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
94 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
97 last_pte = first_pte + num_entries;
98 if (last_pte > I915_PPGTT_PT_ENTRIES)
99 last_pte = I915_PPGTT_PT_ENTRIES;
101 sf = sf_buf_alloc(ppgtt->pt_pages[act_pd]);
102 pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
104 for (i = first_pte; i < last_pte; i++)
105 pt_vaddr[i] = scratch_pte;
109 num_entries -= last_pte - first_pte;
115 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct i915_hw_ppgtt *ppgtt;
119 u_int first_pd_entry_in_global_pt, i;
122 * ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
123 * entries. For aliasing ppgtt support we just steal them at the end for
126 first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
128 ppgtt = kmalloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
130 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
131 ppgtt->pt_pages = kmalloc(sizeof(vm_page_t) * ppgtt->num_pd_entries,
132 DRM_I915_GEM, M_WAITOK | M_ZERO);
134 for (i = 0; i < ppgtt->num_pd_entries; i++) {
135 ppgtt->pt_pages[i] = vm_page_alloc(NULL, 0,
136 VM_ALLOC_NORMAL | VM_ALLOC_ZERO);
137 if (ppgtt->pt_pages[i] == NULL) {
138 dev_priv->mm.aliasing_ppgtt = ppgtt;
139 i915_gem_cleanup_aliasing_ppgtt(dev);
144 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
146 i915_ppgtt_clear_range(ppgtt, 0, ppgtt->num_pd_entries *
147 I915_PPGTT_PT_ENTRIES);
148 ppgtt->pd_offset = (first_pd_entry_in_global_pt) * sizeof(uint32_t);
149 dev_priv->mm.aliasing_ppgtt = ppgtt;
154 i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
156 struct drm_i915_private *dev_priv;
157 struct i915_hw_ppgtt *ppgtt;
161 dev_priv = dev->dev_private;
162 ppgtt = dev_priv->mm.aliasing_ppgtt;
165 dev_priv->mm.aliasing_ppgtt = NULL;
167 for (i = 0; i < ppgtt->num_pd_entries; i++) {
168 m = ppgtt->pt_pages[i];
170 vm_page_busy_wait(m, FALSE, "i915gem");
171 vm_page_unwire(m, 0);
175 drm_free(ppgtt->pt_pages, DRM_I915_GEM);
176 drm_free(ppgtt, DRM_I915_GEM);
181 i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
182 unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
184 uint32_t *pt_vaddr, pte;
186 unsigned act_pd, first_pte;
187 unsigned last_pte, i;
188 vm_paddr_t page_addr;
190 act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
191 first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
193 while (num_entries) {
194 last_pte = first_pte + num_entries;
195 if (last_pte > I915_PPGTT_PT_ENTRIES)
196 last_pte = I915_PPGTT_PT_ENTRIES;
198 sf = sf_buf_alloc(ppgtt->pt_pages[act_pd]);
199 pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);
201 for (i = first_pte; i < last_pte; i++) {
202 page_addr = VM_PAGE_TO_PHYS(*pages);
203 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
204 pt_vaddr[i] = pte | pte_flags;
211 num_entries -= last_pte - first_pte;
217 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
218 struct drm_i915_gem_object *obj,
219 enum i915_cache_level cache_level)
221 struct drm_device *dev;
222 struct drm_i915_private *dev_priv;
226 dev_priv = dev->dev_private;
227 pte_flags = GEN6_PTE_VALID;
229 switch (cache_level) {
230 case I915_CACHE_LLC_MLC:
231 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
234 pte_flags |= GEN6_PTE_CACHE_LLC;
236 case I915_CACHE_NONE:
237 pte_flags |= GEN6_PTE_UNCACHED;
243 i915_ppgtt_insert_pages(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
244 obj->base.size >> PAGE_SHIFT, obj->pages, pte_flags);
247 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
248 struct drm_i915_gem_object *obj)
250 i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
251 obj->base.size >> PAGE_SHIFT);
254 void i915_gem_init_ppgtt(struct drm_device *dev)
256 drm_i915_private_t *dev_priv = dev->dev_private;
258 struct intel_ring_buffer *ring;
259 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
260 uint32_t pd_entry, first_pd_entry_in_global_pt;
263 if (!dev_priv->mm.aliasing_ppgtt)
266 first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
267 for (i = 0; i < ppgtt->num_pd_entries; i++) {
270 pt_addr = VM_PAGE_TO_PHYS(ppgtt->pt_pages[i]);
271 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
272 pd_entry |= GEN6_PDE_VALID;
274 intel_gtt_write(first_pd_entry_in_global_pt + i, pd_entry);
276 intel_gtt_read_pte(first_pd_entry_in_global_pt);
278 pd_offset = ppgtt->pd_offset;
279 pd_offset /= 64; /* in cachelines, */
282 if (INTEL_INFO(dev)->gen == 6) {
283 uint32_t ecochk, gab_ctl, ecobits;
285 ecobits = I915_READ(GAC_ECO_BITS);
286 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
288 gab_ctl = I915_READ(GAB_CTL);
289 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
291 ecochk = I915_READ(GAM_ECOCHK);
292 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
293 ECOCHK_PPGTT_CACHE64B);
294 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
295 } else if (INTEL_INFO(dev)->gen >= 7) {
296 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
297 /* GFX_MODE is per-ring on gen7+ */
300 for_each_ring(ring, dev_priv, i) {
301 if (INTEL_INFO(dev)->gen >= 7)
302 I915_WRITE(RING_MODE_GEN7(ring),
303 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
305 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
306 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
310 static bool do_idling(struct drm_i915_private *dev_priv)
312 bool ret = dev_priv->mm.interruptible;
314 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
315 dev_priv->mm.interruptible = false;
316 if (i915_gpu_idle(dev_priv->dev, false)) {
317 DRM_ERROR("Couldn't idle GPU\n");
318 /* Wait a bit, in hopes it avoids the hang */
326 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
329 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
330 dev_priv->mm.interruptible = interruptible;
334 static void i915_ggtt_clear_range(struct drm_device *dev,
335 unsigned first_entry,
336 unsigned num_entries)
338 struct drm_i915_private *dev_priv = dev->dev_private;
339 gtt_pte_t scratch_pte;
340 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
341 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
344 if (INTEL_INFO(dev)->gen < 6) {
345 intel_gtt_clear_range(first_entry, num_entries);
349 if (WARN(num_entries > max_entries,
350 "First entry = %d; Num entries = %d (max=%d)\n",
351 first_entry, num_entries, max_entries))
352 num_entries = max_entries;
354 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
355 for (i = 0; i < num_entries; i++)
356 iowrite32(scratch_pte, >t_base[i]);
362 i915_gem_restore_gtt_mappings(struct drm_device *dev)
364 struct drm_i915_private *dev_priv;
365 struct drm_i915_gem_object *obj;
367 dev_priv = dev->dev_private;
369 /* First fill our portion of the GTT with scratch pages */
370 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
371 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
373 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
374 i915_gem_clflush_object(obj);
375 i915_gem_gtt_bind_object(obj, obj->cache_level);
378 intel_gtt_chipset_flush();
383 * Binds an object into the global gtt with the specified cache level. The object
384 * will be accessible to the GPU via commands whose operands reference offsets
385 * within the global GTT as well as accessible by the GPU through the GMADR
386 * mapped BAR (dev_priv->mm.gtt->gtt).
388 static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
389 enum i915_cache_level level)
391 struct drm_device *dev = obj->base.dev;
392 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct sg_table *st = obj->pages;
394 struct scatterlist *sg = st->sgl;
395 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
396 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
397 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
399 unsigned int len, m = 0;
402 for_each_sg(st->sgl, sg, st->nents, unused) {
403 len = sg_dma_len(sg) >> PAGE_SHIFT;
404 for (m = 0; m < len; m++) {
405 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
406 iowrite32(pte_encode(dev, addr, level), >t_entries[i]);
411 BUG_ON(i > max_entries);
412 BUG_ON(i != obj->base.size / PAGE_SIZE);
414 /* XXX: This serves as a posting read to make sure that the PTE has
415 * actually been updated. There is some concern that even though
416 * registers and PTEs are within the same BAR that they are potentially
417 * of NUMA access patterns. Therefore, even with the way we assume
418 * hardware should work, we must keep this posting read for paranoia.
421 WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level));
423 /* This next bit makes the above posting read even more important. We
424 * want to flush the TLBs only after we're certain all the PTE updates
427 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
428 POSTING_READ(GFX_FLSH_CNTL_GEN6);
432 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
433 enum i915_cache_level cache_level)
435 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
436 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
437 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
438 obj->base.size >> PAGE_SHIFT, obj->pages, flags);
440 obj->has_global_gtt_mapping = 1;
443 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
445 struct drm_device *dev = obj->base.dev;
446 struct drm_i915_private *dev_priv = dev->dev_private;
449 interruptible = do_idling(dev_priv);
451 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
452 obj->base.size >> PAGE_SHIFT);
454 undo_idling(dev_priv, interruptible);
455 obj->has_global_gtt_mapping = 0;