2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <machine/md_var.h>
58 #include <drm/drm_vma_manager.h>
59 #include <drm/i915_drm.h>
61 #include "i915_trace.h"
62 #include "intel_drv.h"
63 #include <linux/shmem_fs.h>
64 #include <linux/slab.h>
65 #include <linux/swap.h>
66 #include <linux/pci.h>
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
71 static __must_check int
72 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
74 static __must_check int
75 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
76 struct i915_address_space *vm,
78 bool map_and_fenceable,
80 static int i915_gem_phys_pwrite(struct drm_device *dev,
81 struct drm_i915_gem_object *obj,
82 struct drm_i915_gem_pwrite *args,
83 struct drm_file *file);
85 static void i915_gem_write_fence(struct drm_device *dev, int reg,
86 struct drm_i915_gem_object *obj);
87 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
88 struct drm_i915_fence_reg *fence,
91 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
92 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
93 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
95 static bool cpu_cache_is_coherent(struct drm_device *dev,
96 enum i915_cache_level level)
98 return HAS_LLC(dev) || level != I915_CACHE_NONE;
101 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
103 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
106 return obj->pin_display;
109 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
111 if (obj->tiling_mode)
112 i915_gem_release_mmap(obj);
114 /* As we do not have an associated fence register, we will force
115 * a tiling change if we ever need to acquire one.
117 obj->fence_dirty = false;
118 obj->fence_reg = I915_FENCE_REG_NONE;
121 /* some bookkeeping */
122 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
125 spin_lock(&dev_priv->mm.object_stat_lock);
126 dev_priv->mm.object_count++;
127 dev_priv->mm.object_memory += size;
128 spin_unlock(&dev_priv->mm.object_stat_lock);
131 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
134 spin_lock(&dev_priv->mm.object_stat_lock);
135 dev_priv->mm.object_count--;
136 dev_priv->mm.object_memory -= size;
137 spin_unlock(&dev_priv->mm.object_stat_lock);
141 i915_gem_wait_for_error(struct i915_gpu_error *error)
145 #define EXIT_COND (!i915_reset_in_progress(error) || \
146 i915_terminally_wedged(error))
151 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
152 * userspace. If it takes that long something really bad is going on and
153 * we should simply try to bail out and fail as gracefully as possible.
155 ret = wait_event_interruptible_timeout(error->reset_queue,
159 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
161 } else if (ret < 0) {
169 int i915_mutex_lock_interruptible(struct drm_device *dev)
171 struct drm_i915_private *dev_priv = dev->dev_private;
174 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
178 ret = lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_SLEEPFAIL);
182 WARN_ON(i915_verify_lists(dev));
187 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
189 return i915_gem_obj_bound_any(obj) && !obj->active;
193 i915_gem_init_ioctl(struct drm_device *dev, void *data,
194 struct drm_file *file)
196 struct drm_i915_private *dev_priv = dev->dev_private;
197 struct drm_i915_gem_init *args = data;
199 if (drm_core_check_feature(dev, DRIVER_MODESET))
202 if (args->gtt_start >= args->gtt_end ||
203 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
206 /* GEM with user mode setting was never supported on ilk and later. */
207 if (INTEL_INFO(dev)->gen >= 5)
210 mutex_lock(&dev->struct_mutex);
211 dev_priv->gtt.mappable_end = args->gtt_end;
212 kprintf("INITGLOBALGTT GTT_START %016jx\n", (uintmax_t)args->gtt_start);
213 i915_gem_init_global_gtt(dev);
215 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
218 mutex_unlock(&dev->struct_mutex);
224 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file)
227 struct drm_i915_private *dev_priv = dev->dev_private;
228 struct drm_i915_gem_get_aperture *args = data;
229 struct drm_i915_gem_object *obj;
233 mutex_lock(&dev->struct_mutex);
234 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
236 pinned += i915_gem_obj_ggtt_size(obj);
237 mutex_unlock(&dev->struct_mutex);
239 args->aper_size = dev_priv->gtt.base.total;
240 args->aper_available_size = args->aper_size - pinned;
245 void *i915_gem_object_alloc(struct drm_device *dev)
247 return kmalloc(sizeof(struct drm_i915_gem_object),
248 M_DRM, M_WAITOK | M_ZERO);
251 void i915_gem_object_free(struct drm_i915_gem_object *obj)
257 i915_gem_create(struct drm_file *file,
258 struct drm_device *dev,
262 struct drm_i915_gem_object *obj;
266 size = roundup(size, PAGE_SIZE);
270 /* Allocate the new object */
271 obj = i915_gem_alloc_object(dev, size);
275 ret = drm_gem_handle_create(file, &obj->base, &handle);
276 /* drop reference from allocate - handle holds it now */
277 drm_gem_object_unreference_unlocked(&obj->base);
286 i915_gem_dumb_create(struct drm_file *file,
287 struct drm_device *dev,
288 struct drm_mode_create_dumb *args)
290 /* have to work out size/pitch and return them */
291 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
292 args->size = args->pitch * args->height;
293 return i915_gem_create(file, dev,
294 args->size, &args->handle);
298 * Creates a new mm object and returns a handle to it.
301 i915_gem_create_ioctl(struct drm_device *dev, void *data,
302 struct drm_file *file)
304 struct drm_i915_gem_create *args = data;
306 return i915_gem_create(file, dev,
307 args->size, &args->handle);
311 __copy_to_user_swizzled(char __user *cpu_vaddr,
312 const char *gpu_vaddr, int gpu_offset,
315 int ret, cpu_offset = 0;
318 int cacheline_end = ALIGN(gpu_offset + 1, 64);
319 int this_length = min(cacheline_end - gpu_offset, length);
320 int swizzled_gpu_offset = gpu_offset ^ 64;
322 ret = __copy_to_user(cpu_vaddr + cpu_offset,
323 gpu_vaddr + swizzled_gpu_offset,
328 cpu_offset += this_length;
329 gpu_offset += this_length;
330 length -= this_length;
337 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
338 const char __user *cpu_vaddr,
341 int ret, cpu_offset = 0;
344 int cacheline_end = ALIGN(gpu_offset + 1, 64);
345 int this_length = min(cacheline_end - gpu_offset, length);
346 int swizzled_gpu_offset = gpu_offset ^ 64;
348 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
349 cpu_vaddr + cpu_offset,
354 cpu_offset += this_length;
355 gpu_offset += this_length;
356 length -= this_length;
362 /* Per-page copy function for the shmem pread fastpath.
363 * Flushes invalid cachelines before reading the target if
364 * needs_clflush is set. */
366 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
367 char __user *user_data,
368 bool page_do_bit17_swizzling, bool needs_clflush)
373 if (unlikely(page_do_bit17_swizzling))
376 vaddr = kmap_atomic(page);
378 drm_clflush_virt_range(vaddr + shmem_page_offset,
380 ret = __copy_to_user_inatomic(user_data,
381 vaddr + shmem_page_offset,
383 kunmap_atomic(vaddr);
385 return ret ? -EFAULT : 0;
389 shmem_clflush_swizzled_range(char *addr, unsigned long length,
392 if (unlikely(swizzled)) {
393 unsigned long start = (unsigned long) addr;
394 unsigned long end = (unsigned long) addr + length;
396 /* For swizzling simply ensure that we always flush both
397 * channels. Lame, but simple and it works. Swizzled
398 * pwrite/pread is far from a hotpath - current userspace
399 * doesn't use it at all. */
400 start = round_down(start, 128);
401 end = round_up(end, 128);
403 drm_clflush_virt_range((void *)start, end - start);
405 drm_clflush_virt_range(addr, length);
410 /* Only difference to the fast-path function is that this can handle bit17
411 * and uses non-atomic copy and kmap functions. */
413 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
414 char __user *user_data,
415 bool page_do_bit17_swizzling, bool needs_clflush)
422 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
424 page_do_bit17_swizzling);
426 if (page_do_bit17_swizzling)
427 ret = __copy_to_user_swizzled(user_data,
428 vaddr, shmem_page_offset,
431 ret = __copy_to_user(user_data,
432 vaddr + shmem_page_offset,
436 return ret ? - EFAULT : 0;
440 i915_gem_shmem_pread(struct drm_device *dev,
441 struct drm_i915_gem_object *obj,
442 struct drm_i915_gem_pread *args,
443 struct drm_file *file)
445 char __user *user_data;
448 int shmem_page_offset, page_length, ret = 0;
449 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
451 int needs_clflush = 0;
454 user_data = to_user_ptr(args->data_ptr);
457 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
459 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
460 /* If we're not in the cpu read domain, set ourself into the gtt
461 * read domain and manually flush cachelines (if required). This
462 * optimizes for the case when the gpu will dirty the data
463 * anyway again before the next pread happens. */
464 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
465 ret = i915_gem_object_wait_rendering(obj, true);
470 ret = i915_gem_object_get_pages(obj);
474 i915_gem_object_pin_pages(obj);
476 offset = args->offset;
478 for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
479 struct vm_page *page = obj->pages[i];
484 /* Operation in this page
486 * shmem_page_offset = offset within page in shmem file
487 * page_length = bytes to copy for this page
489 shmem_page_offset = offset_in_page(offset);
490 page_length = remain;
491 if ((shmem_page_offset + page_length) > PAGE_SIZE)
492 page_length = PAGE_SIZE - shmem_page_offset;
494 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
495 (page_to_phys(page) & (1 << 17)) != 0;
497 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
498 user_data, page_do_bit17_swizzling,
503 mutex_unlock(&dev->struct_mutex);
505 if (likely(!i915_prefault_disable) && !prefaulted) {
506 ret = fault_in_multipages_writeable(user_data, remain);
507 /* Userspace is tricking us, but we've already clobbered
508 * its pages with the prefault and promised to write the
509 * data up to the first fault. Hence ignore any errors
510 * and just continue. */
515 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
516 user_data, page_do_bit17_swizzling,
519 mutex_lock(&dev->struct_mutex);
522 mark_page_accessed(page);
527 remain -= page_length;
528 user_data += page_length;
529 offset += page_length;
533 i915_gem_object_unpin_pages(obj);
539 * Reads data from the object referenced by handle.
541 * On error, the contents of *data are undefined.
544 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
545 struct drm_file *file)
547 struct drm_i915_gem_pread *args = data;
548 struct drm_i915_gem_object *obj;
554 ret = i915_mutex_lock_interruptible(dev);
558 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
559 if (&obj->base == NULL) {
564 /* Bounds check source. */
565 if (args->offset > obj->base.size ||
566 args->size > obj->base.size - args->offset) {
571 trace_i915_gem_object_pread(obj, args->offset, args->size);
573 ret = i915_gem_shmem_pread(dev, obj, args, file);
576 drm_gem_object_unreference(&obj->base);
578 mutex_unlock(&dev->struct_mutex);
582 /* This is the fast write path which cannot handle
583 * page faults in the source data
586 #if 0 /* XXX: buggy on core2 machines */
588 fast_user_write(struct io_mapping *mapping,
589 loff_t page_base, int page_offset,
590 char __user *user_data,
593 void __iomem *vaddr_atomic;
595 unsigned long unwritten;
597 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
598 /* We can use the cpu mem copy function because this is X86. */
599 vaddr = (char __force*)vaddr_atomic + page_offset;
600 unwritten = __copy_from_user_inatomic_nocache(vaddr,
602 io_mapping_unmap_atomic(vaddr_atomic);
608 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
609 uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
615 * Pass the unaligned physical address and size to pmap_mapdev_attr()
616 * so it can properly calculate whether an extra page needs to be
617 * mapped or not to cover the requested range. The function will
618 * add the page offset into the returned mkva for us.
620 mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base +
621 i915_gem_obj_ggtt_offset(obj) + offset, size, PAT_WRITE_COMBINING);
622 ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
623 pmap_unmapdev(mkva, size);
628 * This is the fast pwrite path, where we copy the data directly from the
629 * user into the GTT, uncached.
632 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
633 struct drm_i915_gem_object *obj,
634 struct drm_i915_gem_pwrite *args,
635 struct drm_file *file)
638 loff_t offset, page_base;
639 char __user *user_data;
640 int page_offset, page_length, ret;
642 ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
646 ret = i915_gem_object_set_to_gtt_domain(obj, true);
650 ret = i915_gem_object_put_fence(obj);
654 user_data = to_user_ptr(args->data_ptr);
657 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
660 /* Operation in this page
662 * page_base = page offset within aperture
663 * page_offset = offset within page
664 * page_length = bytes to copy for this page
666 page_base = offset & ~PAGE_MASK;
667 page_offset = offset_in_page(offset);
668 page_length = remain;
669 if ((page_offset + remain) > PAGE_SIZE)
670 page_length = PAGE_SIZE - page_offset;
672 /* If we get a fault while copying data, then (presumably) our
673 * source page isn't available. Return the error and we'll
674 * retry in the slow path.
677 if (fast_user_write(dev_priv->gtt.mappable, page_base,
678 page_offset, user_data, page_length)) {
680 if (i915_gem_gtt_write(dev, obj, args->data_ptr, args->size, args->offset, file)) {
686 remain -= page_length;
687 user_data += page_length;
688 offset += page_length;
692 i915_gem_object_unpin(obj);
697 /* Per-page copy function for the shmem pwrite fastpath.
698 * Flushes invalid cachelines before writing to the target if
699 * needs_clflush_before is set and flushes out any written cachelines after
700 * writing if needs_clflush is set. */
702 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
703 char __user *user_data,
704 bool page_do_bit17_swizzling,
705 bool needs_clflush_before,
706 bool needs_clflush_after)
711 if (unlikely(page_do_bit17_swizzling))
714 vaddr = kmap_atomic(page);
715 if (needs_clflush_before)
716 drm_clflush_virt_range(vaddr + shmem_page_offset,
718 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
721 if (needs_clflush_after)
722 drm_clflush_virt_range(vaddr + shmem_page_offset,
724 kunmap_atomic(vaddr);
726 return ret ? -EFAULT : 0;
729 /* Only difference to the fast-path function is that this can handle bit17
730 * and uses non-atomic copy and kmap functions. */
732 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
733 char __user *user_data,
734 bool page_do_bit17_swizzling,
735 bool needs_clflush_before,
736 bool needs_clflush_after)
742 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
743 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
745 page_do_bit17_swizzling);
746 if (page_do_bit17_swizzling)
747 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
751 ret = __copy_from_user(vaddr + shmem_page_offset,
754 if (needs_clflush_after)
755 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
757 page_do_bit17_swizzling);
760 return ret ? -EFAULT : 0;
764 i915_gem_shmem_pwrite(struct drm_device *dev,
765 struct drm_i915_gem_object *obj,
766 struct drm_i915_gem_pwrite *args,
767 struct drm_file *file)
771 char __user *user_data;
772 int shmem_page_offset, page_length, ret = 0;
773 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
774 int hit_slowpath = 0;
775 int needs_clflush_after = 0;
776 int needs_clflush_before = 0;
779 user_data = to_user_ptr(args->data_ptr);
782 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
784 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
785 /* If we're not in the cpu write domain, set ourself into the gtt
786 * write domain and manually flush cachelines (if required). This
787 * optimizes for the case when the gpu will use the data
788 * right away and we therefore have to clflush anyway. */
789 needs_clflush_after = cpu_write_needs_clflush(obj);
790 ret = i915_gem_object_wait_rendering(obj, false);
794 /* Same trick applies to invalidate partially written cachelines read
796 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
797 needs_clflush_before =
798 !cpu_cache_is_coherent(dev, obj->cache_level);
800 ret = i915_gem_object_get_pages(obj);
804 i915_gem_object_pin_pages(obj);
806 offset = args->offset;
809 VM_OBJECT_LOCK(obj->base.vm_obj);
810 vm_object_pip_add(obj->base.vm_obj, 1);
811 for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
812 struct vm_page *page = obj->pages[i];
813 int partial_cacheline_write;
815 if (i < offset >> PAGE_SHIFT)
821 /* Operation in this page
823 * shmem_page_offset = offset within page in shmem file
824 * page_length = bytes to copy for this page
826 shmem_page_offset = offset_in_page(offset);
828 page_length = remain;
829 if ((shmem_page_offset + page_length) > PAGE_SIZE)
830 page_length = PAGE_SIZE - shmem_page_offset;
832 /* If we don't overwrite a cacheline completely we need to be
833 * careful to have up-to-date data by first clflushing. Don't
834 * overcomplicate things and flush the entire patch. */
835 partial_cacheline_write = needs_clflush_before &&
836 ((shmem_page_offset | page_length)
837 & (cpu_clflush_line_size - 1));
839 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
840 (page_to_phys(page) & (1 << 17)) != 0;
842 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
843 user_data, page_do_bit17_swizzling,
844 partial_cacheline_write,
845 needs_clflush_after);
850 mutex_unlock(&dev->struct_mutex);
851 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
852 user_data, page_do_bit17_swizzling,
853 partial_cacheline_write,
854 needs_clflush_after);
856 mutex_lock(&dev->struct_mutex);
859 set_page_dirty(page);
860 mark_page_accessed(page);
865 remain -= page_length;
866 user_data += page_length;
867 offset += page_length;
869 vm_object_pip_wakeup(obj->base.vm_obj);
870 VM_OBJECT_UNLOCK(obj->base.vm_obj);
873 i915_gem_object_unpin_pages(obj);
877 * Fixup: Flush cpu caches in case we didn't flush the dirty
878 * cachelines in-line while writing and the object moved
879 * out of the cpu write domain while we've dropped the lock.
881 if (!needs_clflush_after &&
882 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
883 if (i915_gem_clflush_object(obj, obj->pin_display))
884 i915_gem_chipset_flush(dev);
888 if (needs_clflush_after)
889 i915_gem_chipset_flush(dev);
895 * Writes data to the object referenced by handle.
897 * On error, the contents of the buffer that were to be modified are undefined.
900 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
901 struct drm_file *file)
903 struct drm_i915_gem_pwrite *args = data;
904 struct drm_i915_gem_object *obj;
910 if (likely(!i915_prefault_disable)) {
911 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
917 ret = i915_mutex_lock_interruptible(dev);
921 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
922 if (&obj->base == NULL) {
927 /* Bounds check destination. */
928 if (args->offset > obj->base.size ||
929 args->size > obj->base.size - args->offset) {
934 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
937 /* We can only do the GTT pwrite on untiled buffers, as otherwise
938 * it would end up going through the fenced access, and we'll get
939 * different detiling behavior between reading and writing.
940 * pread/pwrite currently are reading and writing from the CPU
941 * perspective, requiring manual detiling by the client.
944 ret = i915_gem_phys_pwrite(dev, obj, args, file);
948 if (obj->tiling_mode == I915_TILING_NONE &&
949 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
950 cpu_write_needs_clflush(obj)) {
951 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
952 /* Note that the gtt paths might fail with non-page-backed user
953 * pointers (e.g. gtt mappings when moving data between
954 * textures). Fallback to the shmem path in that case. */
957 if (ret == -EFAULT || ret == -ENOSPC)
958 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
961 drm_gem_object_unreference(&obj->base);
963 mutex_unlock(&dev->struct_mutex);
968 i915_gem_check_wedge(struct i915_gpu_error *error,
971 if (i915_reset_in_progress(error)) {
972 /* Non-interruptible callers can't handle -EAGAIN, hence return
973 * -EIO unconditionally for these. */
977 /* Recovery complete, but the reset failed ... */
978 if (i915_terminally_wedged(error))
988 * Compare seqno against outstanding lazy request. Emit a request if they are
992 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
996 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
999 if (seqno == ring->outstanding_lazy_seqno)
1000 ret = i915_add_request(ring, NULL);
1006 static void fake_irq(unsigned long data)
1008 wake_up_process((struct task_struct *)data);
1011 static bool missed_irq(struct drm_i915_private *dev_priv,
1012 struct intel_ring_buffer *ring)
1014 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1017 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1019 if (file_priv == NULL)
1022 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1027 * __wait_seqno - wait until execution of seqno has finished
1028 * @ring: the ring expected to report seqno
1030 * @reset_counter: reset sequence associated with the given seqno
1031 * @interruptible: do an interruptible wait (normally yes)
1032 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1034 * Note: It is of utmost importance that the passed in seqno and reset_counter
1035 * values have been read by the caller in an smp safe manner. Where read-side
1036 * locks are involved, it is sufficient to read the reset_counter before
1037 * unlocking the lock that protects the seqno. For lockless tricks, the
1038 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1041 * Returns 0 if the seqno was found within the alloted time. Else returns the
1042 * errno with remaining time filled in timeout argument.
1044 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 unsigned reset_counter,
1046 bool interruptible, struct timespec *timeout)
1048 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1049 struct timespec before, now, wait_time={1,0};
1050 unsigned long timeout_jiffies;
1052 bool wait_forever = true;
1055 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1057 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1060 trace_i915_gem_request_wait_begin(ring, seqno);
1062 if (timeout != NULL) {
1063 wait_time = *timeout;
1064 wait_forever = false;
1067 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
1069 if (WARN_ON(!ring->irq_get(ring)))
1072 /* Record current time in case interrupted by signal, or wedged * */
1073 getrawmonotonic(&before);
1076 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1077 i915_reset_in_progress(&dev_priv->gpu_error) || \
1078 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1081 end = wait_event_interruptible_timeout(ring->irq_queue,
1085 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1088 /* We need to check whether any gpu reset happened in between
1089 * the caller grabbing the seqno and now ... */
1090 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1093 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1095 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1098 } while (end == 0 && wait_forever);
1100 getrawmonotonic(&now);
1102 ring->irq_put(ring);
1103 trace_i915_gem_request_wait_end(ring, seqno);
1107 struct timespec sleep_time = timespec_sub(now, before);
1108 *timeout = timespec_sub(*timeout, sleep_time);
1109 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1110 set_normalized_timespec(timeout, 0, 0);
1115 case -EAGAIN: /* Wedged */
1116 case -ERESTARTSYS: /* Signal */
1118 case 0: /* Timeout */
1119 return -ETIMEDOUT; /* -ETIME on Linux */
1120 default: /* Completed */
1121 WARN_ON(end < 0); /* We're not aware of other errors */
1127 * Waits for a sequence number to be signaled, and cleans up the
1128 * request and object lists appropriately for that event.
1131 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1133 struct drm_device *dev = ring->dev;
1134 struct drm_i915_private *dev_priv = dev->dev_private;
1135 bool interruptible = dev_priv->mm.interruptible;
1138 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1141 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1145 ret = i915_gem_check_olr(ring, seqno);
1149 return __wait_seqno(ring, seqno,
1150 atomic_read(&dev_priv->gpu_error.reset_counter),
1151 interruptible, NULL);
1155 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1156 struct intel_ring_buffer *ring)
1158 i915_gem_retire_requests_ring(ring);
1160 /* Manually manage the write flush as we may have not yet
1161 * retired the buffer.
1163 * Note that the last_write_seqno is always the earlier of
1164 * the two (read/write) seqno, so if we haved successfully waited,
1165 * we know we have passed the last write.
1167 obj->last_write_seqno = 0;
1168 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1174 * Ensures that all rendering to the object has completed and the object is
1175 * safe to unbind from the GTT or access from the CPU.
1177 static __must_check int
1178 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1181 struct intel_ring_buffer *ring = obj->ring;
1185 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1189 ret = i915_wait_seqno(ring, seqno);
1193 return i915_gem_object_wait_rendering__tail(obj, ring);
1196 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1197 * as the object state may change during this call.
1199 static __must_check int
1200 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1203 struct drm_device *dev = obj->base.dev;
1204 struct drm_i915_private *dev_priv = dev->dev_private;
1205 struct intel_ring_buffer *ring = obj->ring;
1206 unsigned reset_counter;
1210 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1211 BUG_ON(!dev_priv->mm.interruptible);
1213 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1217 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1221 ret = i915_gem_check_olr(ring, seqno);
1225 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1226 mutex_unlock(&dev->struct_mutex);
1227 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1228 mutex_lock(&dev->struct_mutex);
1232 return i915_gem_object_wait_rendering__tail(obj, ring);
1236 * Called when user space prepares to use an object with the CPU, either
1237 * through the mmap ioctl's mapping or a GTT mapping.
1240 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1241 struct drm_file *file)
1243 struct drm_i915_gem_set_domain *args = data;
1244 struct drm_i915_gem_object *obj;
1245 uint32_t read_domains = args->read_domains;
1246 uint32_t write_domain = args->write_domain;
1249 /* Only handle setting domains to types used by the CPU. */
1250 if (write_domain & I915_GEM_GPU_DOMAINS)
1253 if (read_domains & I915_GEM_GPU_DOMAINS)
1256 /* Having something in the write domain implies it's in the read
1257 * domain, and only that read domain. Enforce that in the request.
1259 if (write_domain != 0 && read_domains != write_domain)
1262 ret = i915_mutex_lock_interruptible(dev);
1266 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1267 if (&obj->base == NULL) {
1272 /* Try to flush the object off the GPU without holding the lock.
1273 * We will repeat the flush holding the lock in the normal manner
1274 * to catch cases where we are gazumped.
1276 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1280 if (read_domains & I915_GEM_DOMAIN_GTT) {
1281 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1283 /* Silently promote "you're not bound, there was nothing to do"
1284 * to success, since the client was just asking us to
1285 * make sure everything was done.
1290 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1294 drm_gem_object_unreference(&obj->base);
1296 mutex_unlock(&dev->struct_mutex);
1301 * Called when user space has done writes to this buffer
1304 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1305 struct drm_file *file)
1307 struct drm_i915_gem_sw_finish *args = data;
1308 struct drm_i915_gem_object *obj;
1311 ret = i915_mutex_lock_interruptible(dev);
1315 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1316 if (&obj->base == NULL) {
1321 /* Pinned buffers may be scanout, so flush the cache */
1322 if (obj->pin_display)
1323 i915_gem_object_flush_cpu_write_domain(obj, true);
1325 drm_gem_object_unreference(&obj->base);
1327 mutex_unlock(&dev->struct_mutex);
1332 * Maps the contents of an object, returning the address it is mapped
1335 * While the mapping holds a reference on the contents of the object, it doesn't
1336 * imply a ref on the object itself.
1339 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1340 struct drm_file *file)
1342 struct drm_i915_gem_mmap *args = data;
1343 struct drm_gem_object *obj;
1345 struct proc *p = curproc;
1346 vm_map_t map = &p->p_vmspace->vm_map;
1350 obj = drm_gem_object_lookup(dev, file, args->handle);
1354 if (args->size == 0)
1357 size = round_page(args->size);
1358 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1364 * Call hint to ensure that NULL is not returned as a valid address
1365 * and to reduce vm_map traversals.
1367 addr = vm_map_hint(p, 0, PROT_READ|PROT_WRITE);
1368 vm_object_hold(obj->vm_obj);
1369 vm_object_reference_locked(obj->vm_obj);
1370 vm_object_drop(obj->vm_obj);
1371 rv = vm_map_find(map, obj->vm_obj, NULL,
1372 args->offset, &addr, args->size,
1373 PAGE_SIZE, /* align */
1375 VM_MAPTYPE_NORMAL, /* maptype */
1376 VM_PROT_READ | VM_PROT_WRITE, /* prot */
1377 VM_PROT_READ | VM_PROT_WRITE, /* max */
1378 MAP_SHARED /* cow */);
1379 if (rv != KERN_SUCCESS) {
1380 vm_object_deallocate(obj->vm_obj);
1381 error = -vm_mmap_to_errno(rv);
1383 args->addr_ptr = (uint64_t)addr;
1386 drm_gem_object_unreference(obj);
1391 * i915_gem_fault - fault a page into the GTT
1392 * vma: VMA in question
1395 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1396 * from userspace. The fault handler takes care of binding the object to
1397 * the GTT (if needed), allocating and programming a fence register (again,
1398 * only if needed based on whether the old reg is still valid or the object
1399 * is tiled) and inserting a new PTE into the faulting process.
1401 * Note that the faulting process may involve evicting existing objects
1402 * from the GTT and/or fence registers to make room. So performance may
1403 * suffer if the GTT working set is large or there are few fence registers
1406 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres)
1408 struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle);
1409 struct drm_device *dev = obj->base.dev;
1410 drm_i915_private_t *dev_priv = dev->dev_private;
1411 unsigned long page_offset;
1412 vm_page_t m, oldm = NULL;
1414 bool write = !!(prot & VM_PROT_WRITE);
1416 intel_runtime_pm_get(dev_priv);
1418 /* We don't use vmf->pgoff since that has the fake offset */
1419 page_offset = (unsigned long)offset;
1421 /* Magic FreeBSD VM stuff */
1422 vm_object_pip_add(vm_obj, 1);
1425 * Remove the placeholder page inserted by vm_fault() from the
1426 * object before dropping the object lock. If
1427 * i915_gem_release_mmap() is active in parallel on this gem
1428 * object, then it owns the drm device sx and might find the
1429 * placeholder already. Then, since the page is busy,
1430 * i915_gem_release_mmap() sleeps waiting for the busy state
1431 * of the page cleared. We will be not able to acquire drm
1432 * device lock until i915_gem_release_mmap() is able to make a
1435 if (*mres != NULL) {
1437 vm_page_remove(oldm);
1442 VM_OBJECT_UNLOCK(vm_obj);
1447 mutex_lock(&dev->struct_mutex);
1450 * Since the object lock was dropped, other thread might have
1451 * faulted on the same GTT address and instantiated the
1452 * mapping for the page. Recheck.
1454 VM_OBJECT_LOCK(vm_obj);
1455 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1457 if ((m->flags & PG_BUSY) != 0) {
1458 mutex_unlock(&dev->struct_mutex);
1463 VM_OBJECT_UNLOCK(vm_obj);
1464 /* End magic VM stuff */
1466 trace_i915_gem_object_fault(obj, page_offset, true, write);
1468 /* Access to snoopable pages through the GTT is incoherent. */
1469 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1474 /* Now bind it into the GTT if needed */
1475 ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
1479 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1483 ret = i915_gem_object_get_fence(obj);
1487 obj->fault_mappable = true;
1489 VM_OBJECT_LOCK(vm_obj);
1490 m = vm_phys_fictitious_to_vm_page(dev->agp->base +
1491 i915_gem_obj_ggtt_offset(obj) + offset);
1496 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1497 ("not fictitious %p", m));
1498 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1500 if ((m->flags & PG_BUSY) != 0) {
1501 i915_gem_object_unpin(obj);
1502 mutex_unlock(&dev->struct_mutex);
1505 m->valid = VM_PAGE_BITS_ALL;
1507 /* Finally, remap it using the new GTT offset */
1508 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1511 vm_page_busy_try(m, false);
1513 i915_gem_object_unpin(obj);
1514 mutex_unlock(&dev->struct_mutex);
1518 vm_object_pip_wakeup(vm_obj);
1519 return (VM_PAGER_OK);
1522 i915_gem_object_unpin(obj);
1524 mutex_unlock(&dev->struct_mutex);
1526 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1531 goto unlocked_vmobj;
1533 VM_OBJECT_LOCK(vm_obj);
1534 vm_object_pip_wakeup(vm_obj);
1535 ret = VM_PAGER_ERROR;
1538 intel_runtime_pm_put(dev_priv);
1542 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1544 struct i915_vma *vma;
1547 * Only the global gtt is relevant for gtt memory mappings, so restrict
1548 * list traversal to objects bound into the global address space. Note
1549 * that the active list should be empty, but better safe than sorry.
1551 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1552 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1553 i915_gem_release_mmap(vma->obj);
1554 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1555 i915_gem_release_mmap(vma->obj);
1559 * i915_gem_release_mmap - remove physical page mappings
1560 * @obj: obj in question
1562 * Preserve the reservation of the mmapping with the DRM core code, but
1563 * relinquish ownership of the pages back to the system.
1565 * It is vital that we remove the page mapping if we have mapped a tiled
1566 * object through the GTT and then lose the fence register due to
1567 * resource pressure. Similarly if the object has been moved out of the
1568 * aperture, than pages mapped into userspace must be revoked. Removing the
1569 * mapping will then trigger a page fault on the next user access, allowing
1570 * fixup by i915_gem_fault().
1573 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1579 if (!obj->fault_mappable)
1582 devobj = cdev_pager_lookup(obj);
1583 if (devobj != NULL) {
1584 page_count = OFF_TO_IDX(obj->base.size);
1586 VM_OBJECT_LOCK(devobj);
1587 for (i = 0; i < page_count; i++) {
1588 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1591 cdev_pager_free_page(devobj, m);
1593 VM_OBJECT_UNLOCK(devobj);
1594 vm_object_deallocate(devobj);
1597 obj->fault_mappable = false;
1601 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1605 if (INTEL_INFO(dev)->gen >= 4 ||
1606 tiling_mode == I915_TILING_NONE)
1609 /* Previous chips need a power-of-two fence region when tiling */
1610 if (INTEL_INFO(dev)->gen == 3)
1611 gtt_size = 1024*1024;
1613 gtt_size = 512*1024;
1615 while (gtt_size < size)
1622 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1623 * @obj: object to check
1625 * Return the required GTT alignment for an object, taking into account
1626 * potential fence register mapping.
1629 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1630 int tiling_mode, bool fenced)
1633 * Minimum alignment is 4k (GTT page size), but might be greater
1634 * if a fence register is needed for the object.
1636 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1637 tiling_mode == I915_TILING_NONE)
1641 * Previous chips need to be aligned to the size of the smallest
1642 * fence register that can contain the object.
1644 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1647 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1649 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1653 if (drm_vma_node_has_offset(&obj->base.vma_node))
1657 dev_priv->mm.shrinker_no_lock_stealing = true;
1659 ret = drm_gem_create_mmap_offset(&obj->base);
1663 /* Badly fragmented mmap space? The only way we can recover
1664 * space is by destroying unwanted objects. We can't randomly release
1665 * mmap_offsets as userspace expects them to be persistent for the
1666 * lifetime of the objects. The closest we can is to release the
1667 * offsets on purgeable objects by truncating it and marking it purged,
1668 * which prevents userspace from ever using that object again.
1670 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1671 ret = drm_gem_create_mmap_offset(&obj->base);
1675 i915_gem_shrink_all(dev_priv);
1676 ret = drm_gem_create_mmap_offset(&obj->base);
1678 dev_priv->mm.shrinker_no_lock_stealing = false;
1683 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1685 drm_gem_free_mmap_offset(&obj->base);
1689 i915_gem_mmap_gtt(struct drm_file *file,
1690 struct drm_device *dev,
1694 struct drm_i915_private *dev_priv = dev->dev_private;
1695 struct drm_i915_gem_object *obj;
1698 ret = i915_mutex_lock_interruptible(dev);
1702 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1703 if (&obj->base == NULL) {
1708 if (obj->base.size > dev_priv->gtt.mappable_end) {
1713 if (obj->madv != I915_MADV_WILLNEED) {
1714 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1719 ret = i915_gem_object_create_mmap_offset(obj);
1723 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1724 DRM_GEM_MAPPING_KEY;
1727 drm_gem_object_unreference(&obj->base);
1729 mutex_unlock(&dev->struct_mutex);
1734 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1736 * @data: GTT mapping ioctl data
1737 * @file: GEM object info
1739 * Simply returns the fake offset to userspace so it can mmap it.
1740 * The mmap call will end up in drm_gem_mmap(), which will set things
1741 * up so we can get faults in the handler above.
1743 * The fault handler will take care of binding the object into the GTT
1744 * (since it may have been evicted to make room for something), allocating
1745 * a fence register, and mapping the appropriate aperture address into
1749 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1750 struct drm_file *file)
1752 struct drm_i915_gem_mmap_gtt *args = data;
1754 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1757 /* Immediately discard the backing storage */
1759 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1763 vm_obj = obj->base.vm_obj;
1764 VM_OBJECT_LOCK(vm_obj);
1765 vm_object_page_remove(vm_obj, 0, 0, false);
1766 VM_OBJECT_UNLOCK(vm_obj);
1768 obj->madv = __I915_MADV_PURGED;
1772 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1774 return obj->madv == I915_MADV_DONTNEED;
1778 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1780 int page_count = obj->base.size / PAGE_SIZE;
1786 BUG_ON(obj->madv == __I915_MADV_PURGED);
1788 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1790 /* In the event of a disaster, abandon all caches and
1791 * hope for the best.
1793 WARN_ON(ret != -EIO);
1794 i915_gem_clflush_object(obj, true);
1795 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1798 if (i915_gem_object_needs_bit17_swizzle(obj))
1799 i915_gem_object_save_bit_17_swizzle(obj);
1801 if (obj->madv == I915_MADV_DONTNEED)
1804 for (i = 0; i < page_count; i++) {
1805 struct vm_page *page = obj->pages[i];
1808 set_page_dirty(page);
1810 if (obj->madv == I915_MADV_WILLNEED)
1811 mark_page_accessed(page);
1813 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1814 vm_page_unwire(obj->pages[i], 1);
1815 vm_page_wakeup(obj->pages[i]);
1824 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1826 const struct drm_i915_gem_object_ops *ops = obj->ops;
1828 if (obj->pages == NULL)
1831 if (obj->pages_pin_count)
1834 BUG_ON(i915_gem_obj_bound_any(obj));
1836 /* ->put_pages might need to allocate memory for the bit17 swizzle
1837 * array, hence protect them from being reaped by removing them from gtt
1839 list_del(&obj->global_list);
1841 ops->put_pages(obj);
1844 if (i915_gem_object_is_purgeable(obj))
1845 i915_gem_object_truncate(obj);
1850 static unsigned long
1851 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1852 bool purgeable_only)
1854 struct drm_i915_gem_object *obj, *next;
1855 unsigned long count = 0;
1857 list_for_each_entry_safe(obj, next,
1858 &dev_priv->mm.unbound_list,
1860 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1861 i915_gem_object_put_pages(obj) == 0) {
1862 count += obj->base.size >> PAGE_SHIFT;
1863 if (count >= target)
1868 list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
1870 struct i915_vma *vma, *v;
1872 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1875 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1876 if (i915_vma_unbind(vma))
1879 if (!i915_gem_object_put_pages(obj)) {
1880 count += obj->base.size >> PAGE_SHIFT;
1881 if (count >= target)
1889 static unsigned long
1890 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1892 return __i915_gem_shrink(dev_priv, target, true);
1895 static unsigned long
1896 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1898 struct drm_i915_gem_object *obj, *next;
1901 i915_gem_evict_everything(dev_priv->dev);
1903 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1905 if (i915_gem_object_put_pages(obj) == 0)
1906 freed += obj->base.size >> PAGE_SHIFT;
1912 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1914 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1915 int page_count, i, j;
1917 struct vm_page *page;
1919 /* Assert that the object is not currently in any GPU domain. As it
1920 * wasn't in the GTT, there shouldn't be any way it could have been in
1923 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1924 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1926 page_count = obj->base.size / PAGE_SIZE;
1927 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1930 /* Get the list of pages out of our struct file. They'll be pinned
1931 * at this point until we release them.
1933 * Fail silently without starting the shrinker
1935 vm_obj = obj->base.vm_obj;
1936 VM_OBJECT_LOCK(vm_obj);
1937 for (i = 0; i < page_count; i++) {
1938 page = shmem_read_mapping_page(vm_obj, i);
1940 i915_gem_purge(dev_priv, page_count);
1941 page = shmem_read_mapping_page(vm_obj, i);
1944 /* We've tried hard to allocate the memory by reaping
1945 * our own buffer, now let the real VM do its job and
1946 * go down in flames if truly OOM.
1949 i915_gem_shrink_all(dev_priv);
1950 page = shmem_read_mapping_page(vm_obj, i);
1954 #ifdef CONFIG_SWIOTLB
1955 if (swiotlb_nr_tbl()) {
1957 sg_set_page(sg, page, PAGE_SIZE, 0);
1962 obj->pages[i] = page;
1964 #ifdef CONFIG_SWIOTLB
1965 if (!swiotlb_nr_tbl())
1967 VM_OBJECT_UNLOCK(vm_obj);
1969 if (i915_gem_object_needs_bit17_swizzle(obj))
1970 i915_gem_object_do_bit_17_swizzle(obj);
1975 for (j = 0; j < i; j++) {
1976 page = obj->pages[j];
1977 vm_page_busy_wait(page, FALSE, "i915gem");
1978 vm_page_unwire(page, 0);
1979 vm_page_wakeup(page);
1981 VM_OBJECT_UNLOCK(vm_obj);
1987 /* Ensure that the associated pages are gathered from the backing storage
1988 * and pinned into our object. i915_gem_object_get_pages() may be called
1989 * multiple times before they are released by a single call to
1990 * i915_gem_object_put_pages() - once the pages are no longer referenced
1991 * either as a result of memory pressure (reaping pages under the shrinker)
1992 * or as the object is itself released.
1995 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1997 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1998 const struct drm_i915_gem_object_ops *ops = obj->ops;
2004 if (obj->madv != I915_MADV_WILLNEED) {
2005 DRM_ERROR("Attempting to obtain a purgeable object\n");
2009 BUG_ON(obj->pages_pin_count);
2011 ret = ops->get_pages(obj);
2015 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2020 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2021 struct intel_ring_buffer *ring)
2023 struct drm_device *dev = obj->base.dev;
2024 struct drm_i915_private *dev_priv = dev->dev_private;
2025 u32 seqno = intel_ring_get_seqno(ring);
2027 BUG_ON(ring == NULL);
2028 if (obj->ring != ring && obj->last_write_seqno) {
2029 /* Keep the seqno relative to the current ring */
2030 obj->last_write_seqno = seqno;
2034 /* Add a reference if we're newly entering the active list. */
2036 drm_gem_object_reference(&obj->base);
2040 list_move_tail(&obj->ring_list, &ring->active_list);
2042 obj->last_read_seqno = seqno;
2044 if (obj->fenced_gpu_access) {
2045 obj->last_fenced_seqno = seqno;
2047 /* Bump MRU to take account of the delayed flush */
2048 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2049 struct drm_i915_fence_reg *reg;
2051 reg = &dev_priv->fence_regs[obj->fence_reg];
2052 list_move_tail(®->lru_list,
2053 &dev_priv->mm.fence_list);
2058 void i915_vma_move_to_active(struct i915_vma *vma,
2059 struct intel_ring_buffer *ring)
2061 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2062 return i915_gem_object_move_to_active(vma->obj, ring);
2066 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2068 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2069 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2070 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2072 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2073 BUG_ON(!obj->active);
2075 list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2077 list_del_init(&obj->ring_list);
2080 obj->last_read_seqno = 0;
2081 obj->last_write_seqno = 0;
2082 obj->base.write_domain = 0;
2084 obj->last_fenced_seqno = 0;
2085 obj->fenced_gpu_access = false;
2088 drm_gem_object_unreference(&obj->base);
2090 WARN_ON(i915_verify_lists(dev));
2094 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2096 struct drm_i915_private *dev_priv = dev->dev_private;
2097 struct intel_ring_buffer *ring;
2100 /* Carefully retire all requests without writing to the rings */
2101 for_each_ring(ring, dev_priv, i) {
2102 ret = intel_ring_idle(ring);
2106 i915_gem_retire_requests(dev);
2108 /* Finally reset hw state */
2109 for_each_ring(ring, dev_priv, i) {
2110 intel_ring_init_seqno(ring, seqno);
2112 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
2113 ring->sync_seqno[j] = 0;
2119 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2121 struct drm_i915_private *dev_priv = dev->dev_private;
2127 /* HWS page needs to be set less than what we
2128 * will inject to ring
2130 ret = i915_gem_init_seqno(dev, seqno - 1);
2134 /* Carefully set the last_seqno value so that wrap
2135 * detection still works
2137 dev_priv->next_seqno = seqno;
2138 dev_priv->last_seqno = seqno - 1;
2139 if (dev_priv->last_seqno == 0)
2140 dev_priv->last_seqno--;
2146 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2148 struct drm_i915_private *dev_priv = dev->dev_private;
2150 /* reserve 0 for non-seqno */
2151 if (dev_priv->next_seqno == 0) {
2152 int ret = i915_gem_init_seqno(dev, 0);
2156 dev_priv->next_seqno = 1;
2159 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2163 int __i915_add_request(struct intel_ring_buffer *ring,
2164 struct drm_file *file,
2165 struct drm_i915_gem_object *obj,
2168 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2169 struct drm_i915_gem_request *request;
2170 u32 request_ring_position, request_start;
2174 request_start = intel_ring_get_tail(ring);
2176 * Emit any outstanding flushes - execbuf can fail to emit the flush
2177 * after having emitted the batchbuffer command. Hence we need to fix
2178 * things up similar to emitting the lazy request. The difference here
2179 * is that the flush _must_ happen before the next request, no matter
2182 ret = intel_ring_flush_all_caches(ring);
2186 request = ring->preallocated_lazy_request;
2187 if (WARN_ON(request == NULL))
2190 /* Record the position of the start of the request so that
2191 * should we detect the updated seqno part-way through the
2192 * GPU processing the request, we never over-estimate the
2193 * position of the head.
2195 request_ring_position = intel_ring_get_tail(ring);
2197 ret = ring->add_request(ring);
2201 request->seqno = intel_ring_get_seqno(ring);
2202 request->ring = ring;
2203 request->head = request_start;
2204 request->tail = request_ring_position;
2206 /* Whilst this request exists, batch_obj will be on the
2207 * active_list, and so will hold the active reference. Only when this
2208 * request is retired will the the batch_obj be moved onto the
2209 * inactive_list and lose its active reference. Hence we do not need
2210 * to explicitly hold another reference here.
2212 request->batch_obj = obj;
2214 /* Hold a reference to the current context so that we can inspect
2215 * it later in case a hangcheck error event fires.
2217 request->ctx = ring->last_context;
2219 i915_gem_context_reference(request->ctx);
2221 request->emitted_jiffies = jiffies;
2222 was_empty = list_empty(&ring->request_list);
2223 list_add_tail(&request->list, &ring->request_list);
2224 request->file_priv = NULL;
2227 struct drm_i915_file_private *file_priv = file->driver_priv;
2229 spin_lock(&file_priv->mm.lock);
2230 request->file_priv = file_priv;
2231 list_add_tail(&request->client_list,
2232 &file_priv->mm.request_list);
2233 spin_unlock(&file_priv->mm.lock);
2236 trace_i915_gem_request_add(ring, request->seqno);
2237 ring->outstanding_lazy_seqno = 0;
2238 ring->preallocated_lazy_request = NULL;
2240 if (!dev_priv->ums.mm_suspended) {
2241 i915_queue_hangcheck(ring->dev);
2244 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2245 queue_delayed_work(dev_priv->wq,
2246 &dev_priv->mm.retire_work,
2247 round_jiffies_up_relative(HZ));
2248 intel_mark_busy(dev_priv->dev);
2253 *out_seqno = request->seqno;
2258 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2260 struct drm_i915_file_private *file_priv = request->file_priv;
2265 spin_lock(&file_priv->mm.lock);
2266 if (request->file_priv) {
2267 list_del(&request->client_list);
2268 request->file_priv = NULL;
2270 spin_unlock(&file_priv->mm.lock);
2273 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2274 struct i915_address_space *vm)
2276 if (acthd >= i915_gem_obj_offset(obj, vm) &&
2277 acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2283 static bool i915_head_inside_request(const u32 acthd_unmasked,
2284 const u32 request_start,
2285 const u32 request_end)
2287 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2289 if (request_start < request_end) {
2290 if (acthd >= request_start && acthd < request_end)
2292 } else if (request_start > request_end) {
2293 if (acthd >= request_start || acthd < request_end)
2300 static struct i915_address_space *
2301 request_to_vm(struct drm_i915_gem_request *request)
2303 struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2304 struct i915_address_space *vm;
2306 vm = &dev_priv->gtt.base;
2311 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2312 const u32 acthd, bool *inside)
2314 /* There is a possibility that unmasked head address
2315 * pointing inside the ring, matches the batch_obj address range.
2316 * However this is extremely unlikely.
2318 if (request->batch_obj) {
2319 if (i915_head_inside_object(acthd, request->batch_obj,
2320 request_to_vm(request))) {
2326 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2334 static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
2336 const unsigned long elapsed = get_seconds() - hs->guilty_ts;
2341 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2342 DRM_ERROR("context hanging too fast, declaring banned!\n");
2349 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2350 struct drm_i915_gem_request *request,
2353 struct i915_ctx_hang_stats *hs = NULL;
2354 bool inside, guilty;
2355 unsigned long offset = 0;
2357 /* Innocent until proven guilty */
2360 if (request->batch_obj)
2361 offset = i915_gem_obj_offset(request->batch_obj,
2362 request_to_vm(request));
2364 if (ring->hangcheck.action != HANGCHECK_WAIT &&
2365 i915_request_guilty(request, acthd, &inside)) {
2366 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2368 inside ? "inside" : "flushing",
2370 request->ctx ? request->ctx->id : 0,
2376 /* If contexts are disabled or this is the default context, use
2377 * file_priv->reset_state
2379 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2380 hs = &request->ctx->hang_stats;
2381 else if (request->file_priv)
2382 hs = &request->file_priv->hang_stats;
2386 hs->banned = i915_context_is_banned(hs);
2388 hs->guilty_ts = get_seconds();
2390 hs->batch_pending++;
2395 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2397 list_del(&request->list);
2398 i915_gem_request_remove_from_client(request);
2401 i915_gem_context_unreference(request->ctx);
2406 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2407 struct intel_ring_buffer *ring)
2409 u32 completed_seqno = ring->get_seqno(ring, false);
2410 u32 acthd = intel_ring_get_active_head(ring);
2411 struct drm_i915_gem_request *request;
2413 list_for_each_entry(request, &ring->request_list, list) {
2414 if (i915_seqno_passed(completed_seqno, request->seqno))
2417 i915_set_reset_status(ring, request, acthd);
2421 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2422 struct intel_ring_buffer *ring)
2424 while (!list_empty(&ring->active_list)) {
2425 struct drm_i915_gem_object *obj;
2427 obj = list_first_entry(&ring->active_list,
2428 struct drm_i915_gem_object,
2431 i915_gem_object_move_to_inactive(obj);
2435 * We must free the requests after all the corresponding objects have
2436 * been moved off active lists. Which is the same order as the normal
2437 * retire_requests function does. This is important if object hold
2438 * implicit references on things like e.g. ppgtt address spaces through
2441 while (!list_empty(&ring->request_list)) {
2442 struct drm_i915_gem_request *request;
2444 request = list_first_entry(&ring->request_list,
2445 struct drm_i915_gem_request,
2448 i915_gem_free_request(request);
2452 void i915_gem_restore_fences(struct drm_device *dev)
2454 struct drm_i915_private *dev_priv = dev->dev_private;
2457 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2458 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2461 * Commit delayed tiling changes if we have an object still
2462 * attached to the fence, otherwise just clear the fence.
2465 i915_gem_object_update_fence(reg->obj, reg,
2466 reg->obj->tiling_mode);
2468 i915_gem_write_fence(dev, i, NULL);
2473 void i915_gem_reset(struct drm_device *dev)
2475 struct drm_i915_private *dev_priv = dev->dev_private;
2476 struct intel_ring_buffer *ring;
2480 * Before we free the objects from the requests, we need to inspect
2481 * them for finding the guilty party. As the requests only borrow
2482 * their reference to the objects, the inspection must be done first.
2484 for_each_ring(ring, dev_priv, i)
2485 i915_gem_reset_ring_status(dev_priv, ring);
2487 for_each_ring(ring, dev_priv, i)
2488 i915_gem_reset_ring_cleanup(dev_priv, ring);
2490 i915_gem_cleanup_ringbuffer(dev);
2492 i915_gem_restore_fences(dev);
2496 * This function clears the request list as sequence numbers are passed.
2499 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2503 if (list_empty(&ring->request_list))
2506 WARN_ON(i915_verify_lists(ring->dev));
2508 seqno = ring->get_seqno(ring, true);
2510 while (!list_empty(&ring->request_list)) {
2511 struct drm_i915_gem_request *request;
2513 request = list_first_entry(&ring->request_list,
2514 struct drm_i915_gem_request,
2517 if (!i915_seqno_passed(seqno, request->seqno))
2520 trace_i915_gem_request_retire(ring, request->seqno);
2521 /* We know the GPU must have read the request to have
2522 * sent us the seqno + interrupt, so use the position
2523 * of tail of the request to update the last known position
2526 ring->last_retired_head = request->tail;
2528 i915_gem_free_request(request);
2531 /* Move any buffers on the active list that are no longer referenced
2532 * by the ringbuffer to the flushing/inactive lists as appropriate.
2534 while (!list_empty(&ring->active_list)) {
2535 struct drm_i915_gem_object *obj;
2537 obj = list_first_entry(&ring->active_list,
2538 struct drm_i915_gem_object,
2541 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2544 i915_gem_object_move_to_inactive(obj);
2547 if (unlikely(ring->trace_irq_seqno &&
2548 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2549 ring->irq_put(ring);
2550 ring->trace_irq_seqno = 0;
2553 WARN_ON(i915_verify_lists(ring->dev));
2557 i915_gem_retire_requests(struct drm_device *dev)
2559 drm_i915_private_t *dev_priv = dev->dev_private;
2560 struct intel_ring_buffer *ring;
2564 for_each_ring(ring, dev_priv, i) {
2565 i915_gem_retire_requests_ring(ring);
2566 idle &= list_empty(&ring->request_list);
2573 i915_gem_retire_work_handler(struct work_struct *work)
2575 drm_i915_private_t *dev_priv;
2576 struct drm_device *dev;
2577 struct intel_ring_buffer *ring;
2581 dev_priv = container_of(work, drm_i915_private_t,
2582 mm.retire_work.work);
2583 dev = dev_priv->dev;
2585 /* Come back later if the device is busy... */
2586 if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT)) {
2587 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2588 round_jiffies_up_relative(HZ));
2592 i915_gem_retire_requests(dev);
2594 /* Send a periodic flush down the ring so we don't hold onto GEM
2595 * objects indefinitely.
2598 for_each_ring(ring, dev_priv, i) {
2599 if (ring->gpu_caches_dirty)
2600 i915_add_request(ring, NULL);
2602 idle &= list_empty(&ring->request_list);
2605 if (!dev_priv->ums.mm_suspended && !idle)
2606 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2607 round_jiffies_up_relative(HZ));
2609 intel_mark_idle(dev);
2611 mutex_unlock(&dev->struct_mutex);
2615 i915_gem_idle_work_handler(struct work_struct *work)
2617 struct drm_i915_private *dev_priv =
2618 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2620 intel_mark_idle(dev_priv->dev);
2624 * Ensures that an object will eventually get non-busy by flushing any required
2625 * write domains, emitting any outstanding lazy request and retiring and
2626 * completed requests.
2629 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2634 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2638 i915_gem_retire_requests_ring(obj->ring);
2645 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2646 * @DRM_IOCTL_ARGS: standard ioctl arguments
2648 * Returns 0 if successful, else an error is returned with the remaining time in
2649 * the timeout parameter.
2650 * -ETIME: object is still busy after timeout
2651 * -ERESTARTSYS: signal interrupted the wait
2652 * -ENONENT: object doesn't exist
2653 * Also possible, but rare:
2654 * -EAGAIN: GPU wedged
2656 * -ENODEV: Internal IRQ fail
2657 * -E?: The add request failed
2659 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2660 * non-zero timeout parameter the wait ioctl will wait for the given number of
2661 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2662 * without holding struct_mutex the object may become re-busied before this
2663 * function completes. A similar but shorter * race condition exists in the busy
2667 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2669 drm_i915_private_t *dev_priv = dev->dev_private;
2670 struct drm_i915_gem_wait *args = data;
2671 struct drm_i915_gem_object *obj;
2672 struct intel_ring_buffer *ring = NULL;
2673 struct timespec timeout_stack, *timeout = NULL;
2674 unsigned reset_counter;
2678 if (args->timeout_ns >= 0) {
2679 timeout_stack = ns_to_timespec(args->timeout_ns);
2680 timeout = &timeout_stack;
2683 ret = i915_mutex_lock_interruptible(dev);
2687 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2688 if (&obj->base == NULL) {
2689 mutex_unlock(&dev->struct_mutex);
2693 /* Need to make sure the object gets inactive eventually. */
2694 ret = i915_gem_object_flush_active(obj);
2699 seqno = obj->last_read_seqno;
2706 /* Do this after OLR check to make sure we make forward progress polling
2707 * on this IOCTL with a 0 timeout (like busy ioctl)
2709 if (!args->timeout_ns) {
2714 drm_gem_object_unreference(&obj->base);
2715 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2716 mutex_unlock(&dev->struct_mutex);
2718 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2720 args->timeout_ns = timespec_to_ns(timeout);
2724 drm_gem_object_unreference(&obj->base);
2725 mutex_unlock(&dev->struct_mutex);
2730 * i915_gem_object_sync - sync an object to a ring.
2732 * @obj: object which may be in use on another ring.
2733 * @to: ring we wish to use the object on. May be NULL.
2735 * This code is meant to abstract object synchronization with the GPU.
2736 * Calling with NULL implies synchronizing the object with the CPU
2737 * rather than a particular GPU ring.
2739 * Returns 0 if successful, else propagates up the lower layer error.
2742 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2743 struct intel_ring_buffer *to)
2745 struct intel_ring_buffer *from = obj->ring;
2749 if (from == NULL || to == from)
2752 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2753 return i915_gem_object_wait_rendering(obj, false);
2755 idx = intel_ring_sync_index(from, to);
2757 seqno = obj->last_read_seqno;
2758 if (seqno <= from->sync_seqno[idx])
2761 ret = i915_gem_check_olr(obj->ring, seqno);
2765 trace_i915_gem_ring_sync_to(from, to, seqno);
2766 ret = to->sync_to(to, from, seqno);
2768 /* We use last_read_seqno because sync_to()
2769 * might have just caused seqno wrap under
2772 from->sync_seqno[idx] = obj->last_read_seqno;
2777 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2779 u32 old_write_domain, old_read_domains;
2781 /* Force a pagefault for domain tracking on next user access */
2782 i915_gem_release_mmap(obj);
2784 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2787 /* Wait for any direct GTT access to complete */
2790 old_read_domains = obj->base.read_domains;
2791 old_write_domain = obj->base.write_domain;
2793 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2794 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2796 trace_i915_gem_object_change_domain(obj,
2801 int i915_vma_unbind(struct i915_vma *vma)
2803 struct drm_i915_gem_object *obj = vma->obj;
2804 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2807 /* For now we only ever use 1 vma per object */
2809 WARN_ON(!list_is_singular(&obj->vma_list));
2812 if (list_empty(&vma->vma_link))
2815 if (!drm_mm_node_allocated(&vma->node)) {
2816 i915_gem_vma_destroy(vma);
2824 BUG_ON(obj->pages == NULL);
2826 ret = i915_gem_object_finish_gpu(obj);
2829 /* Continue on if we fail due to EIO, the GPU is hung so we
2830 * should be safe and we need to cleanup or else we might
2831 * cause memory corruption through use-after-free.
2834 i915_gem_object_finish_gtt(obj);
2836 /* release the fence reg _after_ flushing */
2837 ret = i915_gem_object_put_fence(obj);
2841 trace_i915_vma_unbind(vma);
2843 if (obj->has_global_gtt_mapping)
2844 i915_gem_gtt_unbind_object(obj);
2845 if (obj->has_aliasing_ppgtt_mapping) {
2846 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2847 obj->has_aliasing_ppgtt_mapping = 0;
2849 i915_gem_gtt_finish_object(obj);
2851 list_del(&vma->mm_list);
2852 /* Avoid an unnecessary call to unbind on rebind. */
2853 if (i915_is_ggtt(vma->vm))
2854 obj->map_and_fenceable = true;
2856 drm_mm_remove_node(&vma->node);
2857 i915_gem_vma_destroy(vma);
2859 /* Since the unbound list is global, only move to that list if
2860 * no more VMAs exist. */
2861 if (list_empty(&obj->vma_list))
2862 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2864 /* And finally now the object is completely decoupled from this vma,
2865 * we can drop its hold on the backing storage and allow it to be
2866 * reaped by the shrinker.
2868 i915_gem_object_unpin_pages(obj);
2874 * Unbinds an object from the global GTT aperture.
2877 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2879 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2880 struct i915_address_space *ggtt = &dev_priv->gtt.base;
2882 if (!i915_gem_obj_ggtt_bound(obj))
2888 BUG_ON(obj->pages == NULL);
2890 return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2893 int i915_gpu_idle(struct drm_device *dev)
2895 drm_i915_private_t *dev_priv = dev->dev_private;
2896 struct intel_ring_buffer *ring;
2899 /* Flush everything onto the inactive list. */
2900 for_each_ring(ring, dev_priv, i) {
2901 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2905 ret = intel_ring_idle(ring);
2913 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2914 struct drm_i915_gem_object *obj)
2916 drm_i915_private_t *dev_priv = dev->dev_private;
2918 int fence_pitch_shift;
2920 if (INTEL_INFO(dev)->gen >= 6) {
2921 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2922 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2924 fence_reg = FENCE_REG_965_0;
2925 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2928 fence_reg += reg * 8;
2930 /* To w/a incoherency with non-atomic 64-bit register updates,
2931 * we split the 64-bit update into two 32-bit writes. In order
2932 * for a partial fence not to be evaluated between writes, we
2933 * precede the update with write to turn off the fence register,
2934 * and only enable the fence as the last step.
2936 * For extra levels of paranoia, we make sure each step lands
2937 * before applying the next step.
2939 I915_WRITE(fence_reg, 0);
2940 POSTING_READ(fence_reg);
2943 u32 size = i915_gem_obj_ggtt_size(obj);
2946 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
2948 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
2949 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2950 if (obj->tiling_mode == I915_TILING_Y)
2951 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2952 val |= I965_FENCE_REG_VALID;
2954 I915_WRITE(fence_reg + 4, val >> 32);
2955 POSTING_READ(fence_reg + 4);
2957 I915_WRITE(fence_reg + 0, val);
2958 POSTING_READ(fence_reg);
2960 I915_WRITE(fence_reg + 4, 0);
2961 POSTING_READ(fence_reg + 4);
2965 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2966 struct drm_i915_gem_object *obj)
2968 drm_i915_private_t *dev_priv = dev->dev_private;
2972 u32 size = i915_gem_obj_ggtt_size(obj);
2976 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
2977 (size & -size) != size ||
2978 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2979 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2980 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2982 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2987 /* Note: pitch better be a power of two tile widths */
2988 pitch_val = obj->stride / tile_width;
2989 pitch_val = ffs(pitch_val) - 1;
2991 val = i915_gem_obj_ggtt_offset(obj);
2992 if (obj->tiling_mode == I915_TILING_Y)
2993 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2994 val |= I915_FENCE_SIZE_BITS(size);
2995 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2996 val |= I830_FENCE_REG_VALID;
3001 reg = FENCE_REG_830_0 + reg * 4;
3003 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3005 I915_WRITE(reg, val);
3009 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3010 struct drm_i915_gem_object *obj)
3012 drm_i915_private_t *dev_priv = dev->dev_private;
3016 u32 size = i915_gem_obj_ggtt_size(obj);
3019 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3020 (size & -size) != size ||
3021 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3022 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3023 i915_gem_obj_ggtt_offset(obj), size);
3025 pitch_val = obj->stride / 128;
3026 pitch_val = ffs(pitch_val) - 1;
3028 val = i915_gem_obj_ggtt_offset(obj);
3029 if (obj->tiling_mode == I915_TILING_Y)
3030 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3031 val |= I830_FENCE_SIZE_BITS(size);
3032 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3033 val |= I830_FENCE_REG_VALID;
3037 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3038 POSTING_READ(FENCE_REG_830_0 + reg * 4);
3041 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3043 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3046 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3047 struct drm_i915_gem_object *obj)
3049 struct drm_i915_private *dev_priv = dev->dev_private;
3051 /* Ensure that all CPU reads are completed before installing a fence
3052 * and all writes before removing the fence.
3054 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3057 WARN(obj && (!obj->stride || !obj->tiling_mode),
3058 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3059 obj->stride, obj->tiling_mode);
3061 switch (INTEL_INFO(dev)->gen) {
3066 case 4: i965_write_fence_reg(dev, reg, obj); break;
3067 case 3: i915_write_fence_reg(dev, reg, obj); break;
3068 case 2: i830_write_fence_reg(dev, reg, obj); break;
3072 /* And similarly be paranoid that no direct access to this region
3073 * is reordered to before the fence is installed.
3075 if (i915_gem_object_needs_mb(obj))
3079 static inline int fence_number(struct drm_i915_private *dev_priv,
3080 struct drm_i915_fence_reg *fence)
3082 return fence - dev_priv->fence_regs;
3085 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3086 struct drm_i915_fence_reg *fence,
3089 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3090 int reg = fence_number(dev_priv, fence);
3092 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3095 obj->fence_reg = reg;
3097 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3099 obj->fence_reg = I915_FENCE_REG_NONE;
3101 list_del_init(&fence->lru_list);
3103 obj->fence_dirty = false;
3107 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3109 if (obj->last_fenced_seqno) {
3110 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3114 obj->last_fenced_seqno = 0;
3117 obj->fenced_gpu_access = false;
3122 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3124 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3125 struct drm_i915_fence_reg *fence;
3128 ret = i915_gem_object_wait_fence(obj);
3132 if (obj->fence_reg == I915_FENCE_REG_NONE)
3135 fence = &dev_priv->fence_regs[obj->fence_reg];
3137 i915_gem_object_fence_lost(obj);
3138 i915_gem_object_update_fence(obj, fence, false);
3143 static struct drm_i915_fence_reg *
3144 i915_find_fence_reg(struct drm_device *dev)
3146 struct drm_i915_private *dev_priv = dev->dev_private;
3147 struct drm_i915_fence_reg *reg, *avail;
3150 /* First try to find a free reg */
3152 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3153 reg = &dev_priv->fence_regs[i];
3157 if (!reg->pin_count)
3164 /* None available, try to steal one or wait for a user to finish */
3165 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3173 /* Wait for completion of pending flips which consume fences */
3174 if (intel_has_pending_fb_unpin(dev))
3175 return ERR_PTR(-EAGAIN);
3177 return ERR_PTR(-EDEADLK);
3181 * i915_gem_object_get_fence - set up fencing for an object
3182 * @obj: object to map through a fence reg
3184 * When mapping objects through the GTT, userspace wants to be able to write
3185 * to them without having to worry about swizzling if the object is tiled.
3186 * This function walks the fence regs looking for a free one for @obj,
3187 * stealing one if it can't find any.
3189 * It then sets up the reg based on the object's properties: address, pitch
3190 * and tiling format.
3192 * For an untiled surface, this removes any existing fence.
3195 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3197 struct drm_device *dev = obj->base.dev;
3198 struct drm_i915_private *dev_priv = dev->dev_private;
3199 bool enable = obj->tiling_mode != I915_TILING_NONE;
3200 struct drm_i915_fence_reg *reg;
3203 /* Have we updated the tiling parameters upon the object and so
3204 * will need to serialise the write to the associated fence register?
3206 if (obj->fence_dirty) {
3207 ret = i915_gem_object_wait_fence(obj);
3212 /* Just update our place in the LRU if our fence is getting reused. */
3213 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3214 reg = &dev_priv->fence_regs[obj->fence_reg];
3215 if (!obj->fence_dirty) {
3216 list_move_tail(®->lru_list,
3217 &dev_priv->mm.fence_list);
3220 } else if (enable) {
3221 reg = i915_find_fence_reg(dev);
3223 return PTR_ERR(reg);
3226 struct drm_i915_gem_object *old = reg->obj;
3228 ret = i915_gem_object_wait_fence(old);
3232 i915_gem_object_fence_lost(old);
3237 i915_gem_object_update_fence(obj, reg, enable);
3242 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3243 struct drm_mm_node *gtt_space,
3244 unsigned long cache_level)
3246 struct drm_mm_node *other;
3248 /* On non-LLC machines we have to be careful when putting differing
3249 * types of snoopable memory together to avoid the prefetcher
3250 * crossing memory domains and dying.
3255 if (!drm_mm_node_allocated(gtt_space))
3258 if (list_empty(>t_space->node_list))
3261 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3262 if (other->allocated && !other->hole_follows && other->color != cache_level)
3265 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3266 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3272 static void i915_gem_verify_gtt(struct drm_device *dev)
3275 struct drm_i915_private *dev_priv = dev->dev_private;
3276 struct drm_i915_gem_object *obj;
3279 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3280 if (obj->gtt_space == NULL) {
3281 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3286 if (obj->cache_level != obj->gtt_space->color) {
3287 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3288 i915_gem_obj_ggtt_offset(obj),
3289 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3291 obj->gtt_space->color);
3296 if (!i915_gem_valid_gtt_space(dev,
3298 obj->cache_level)) {
3299 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3300 i915_gem_obj_ggtt_offset(obj),
3301 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3313 * Finds free space in the GTT aperture and binds the object there.
3316 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3317 struct i915_address_space *vm,
3319 bool map_and_fenceable,
3322 struct drm_device *dev = obj->base.dev;
3323 drm_i915_private_t *dev_priv = dev->dev_private;
3324 u32 size, fence_size, fence_alignment, unfenced_alignment;
3326 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3327 struct i915_vma *vma;
3330 fence_size = i915_gem_get_gtt_size(dev,
3333 fence_alignment = i915_gem_get_gtt_alignment(dev,
3335 obj->tiling_mode, true);
3336 unfenced_alignment =
3337 i915_gem_get_gtt_alignment(dev,
3339 obj->tiling_mode, false);
3342 alignment = map_and_fenceable ? fence_alignment :
3344 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3345 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3349 size = map_and_fenceable ? fence_size : obj->base.size;
3351 /* If the object is bigger than the entire aperture, reject it early
3352 * before evicting everything in a vain attempt to find space.
3354 if (obj->base.size > gtt_max) {
3355 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3357 map_and_fenceable ? "mappable" : "total",
3362 ret = i915_gem_object_get_pages(obj);
3366 i915_gem_object_pin_pages(obj);
3368 BUG_ON(!i915_is_ggtt(vm));
3370 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3376 /* For now we only ever use 1 vma per object */
3378 WARN_ON(!list_is_singular(&obj->vma_list));
3382 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3384 obj->cache_level, 0, gtt_max,
3385 DRM_MM_SEARCH_DEFAULT);
3387 ret = i915_gem_evict_something(dev, vm, size, alignment,
3396 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3397 obj->cache_level))) {
3399 goto err_remove_node;
3402 ret = i915_gem_gtt_prepare_object(obj);
3404 goto err_remove_node;
3406 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3407 list_add_tail(&vma->mm_list, &vm->inactive_list);
3409 if (i915_is_ggtt(vm)) {
3410 bool mappable, fenceable;
3412 fenceable = (vma->node.size == fence_size &&
3413 (vma->node.start & (fence_alignment - 1)) == 0);
3415 mappable = (vma->node.start + obj->base.size <=
3416 dev_priv->gtt.mappable_end);
3418 obj->map_and_fenceable = mappable && fenceable;
3421 WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3423 trace_i915_vma_bind(vma, map_and_fenceable);
3424 i915_gem_verify_gtt(dev);
3428 drm_mm_remove_node(&vma->node);
3430 i915_gem_vma_destroy(vma);
3432 i915_gem_object_unpin_pages(obj);
3437 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3440 /* If we don't have a page list set up, then we're not pinned
3441 * to GPU, and we can ignore the cache flush because it'll happen
3442 * again at bind time.
3444 if (obj->pages == NULL)
3448 * Stolen memory is always coherent with the GPU as it is explicitly
3449 * marked as wc by the system, or the system is cache-coherent.
3454 /* If the GPU is snooping the contents of the CPU cache,
3455 * we do not need to manually clear the CPU cache lines. However,
3456 * the caches are only snooped when the render cache is
3457 * flushed/invalidated. As we always have to emit invalidations
3458 * and flushes when moving into and out of the RENDER domain, correct
3459 * snooping behaviour occurs naturally as the result of our domain
3462 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3465 trace_i915_gem_object_clflush(obj);
3466 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3471 /** Flushes the GTT write domain for the object if it's dirty. */
3473 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3475 uint32_t old_write_domain;
3477 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3480 /* No actual flushing is required for the GTT write domain. Writes
3481 * to it immediately go to main memory as far as we know, so there's
3482 * no chipset flush. It also doesn't land in render cache.
3484 * However, we do have to enforce the order so that all writes through
3485 * the GTT land before any writes to the device, such as updates to
3490 old_write_domain = obj->base.write_domain;
3491 obj->base.write_domain = 0;
3493 trace_i915_gem_object_change_domain(obj,
3494 obj->base.read_domains,
3498 /** Flushes the CPU write domain for the object if it's dirty. */
3500 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3503 uint32_t old_write_domain;
3505 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3508 if (i915_gem_clflush_object(obj, force))
3509 i915_gem_chipset_flush(obj->base.dev);
3511 old_write_domain = obj->base.write_domain;
3512 obj->base.write_domain = 0;
3514 trace_i915_gem_object_change_domain(obj,
3515 obj->base.read_domains,
3520 * Moves a single object to the GTT read, and possibly write domain.
3522 * This function returns when the move is complete, including waiting on
3526 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3528 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3529 uint32_t old_write_domain, old_read_domains;
3532 /* Not valid to be called on unbound objects. */
3533 if (!i915_gem_obj_bound_any(obj))
3536 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3539 ret = i915_gem_object_wait_rendering(obj, !write);
3543 i915_gem_object_flush_cpu_write_domain(obj, false);
3545 /* Serialise direct access to this object with the barriers for
3546 * coherent writes from the GPU, by effectively invalidating the
3547 * GTT domain upon first access.
3549 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3552 old_write_domain = obj->base.write_domain;
3553 old_read_domains = obj->base.read_domains;
3555 /* It should now be out of any other write domains, and we can update
3556 * the domain values for our changes.
3558 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3559 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3561 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3562 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3566 trace_i915_gem_object_change_domain(obj,
3570 /* And bump the LRU for this access */
3571 if (i915_gem_object_is_inactive(obj)) {
3572 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3574 list_move_tail(&vma->mm_list,
3575 &dev_priv->gtt.base.inactive_list);
3582 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3583 enum i915_cache_level cache_level)
3585 struct drm_device *dev = obj->base.dev;
3586 drm_i915_private_t *dev_priv = dev->dev_private;
3587 struct i915_vma *vma;
3590 if (obj->cache_level == cache_level)
3593 if (obj->pin_count) {
3594 DRM_DEBUG("can not change the cache level of pinned objects\n");
3598 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3599 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3600 ret = i915_vma_unbind(vma);
3608 if (i915_gem_obj_bound_any(obj)) {
3609 ret = i915_gem_object_finish_gpu(obj);
3613 i915_gem_object_finish_gtt(obj);
3615 /* Before SandyBridge, you could not use tiling or fence
3616 * registers with snooped memory, so relinquish any fences
3617 * currently pointing to our region in the aperture.
3619 if (INTEL_INFO(dev)->gen < 6) {
3620 ret = i915_gem_object_put_fence(obj);
3625 if (obj->has_global_gtt_mapping)
3626 i915_gem_gtt_bind_object(obj, cache_level);
3627 if (obj->has_aliasing_ppgtt_mapping)
3628 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3632 list_for_each_entry(vma, &obj->vma_list, vma_link)
3633 vma->node.color = cache_level;
3634 obj->cache_level = cache_level;
3636 if (cpu_write_needs_clflush(obj)) {
3637 u32 old_read_domains, old_write_domain;
3639 /* If we're coming from LLC cached, then we haven't
3640 * actually been tracking whether the data is in the
3641 * CPU cache or not, since we only allow one bit set
3642 * in obj->write_domain and have been skipping the clflushes.
3643 * Just set it to the CPU cache for now.
3645 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3647 old_read_domains = obj->base.read_domains;
3648 old_write_domain = obj->base.write_domain;
3650 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3651 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3653 trace_i915_gem_object_change_domain(obj,
3658 i915_gem_verify_gtt(dev);
3662 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3663 struct drm_file *file)
3665 struct drm_i915_gem_caching *args = data;
3666 struct drm_i915_gem_object *obj;
3669 ret = i915_mutex_lock_interruptible(dev);
3673 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3674 if (&obj->base == NULL) {
3679 switch (obj->cache_level) {
3680 case I915_CACHE_LLC:
3681 case I915_CACHE_L3_LLC:
3682 args->caching = I915_CACHING_CACHED;
3686 args->caching = I915_CACHING_DISPLAY;
3690 args->caching = I915_CACHING_NONE;
3694 drm_gem_object_unreference(&obj->base);
3696 mutex_unlock(&dev->struct_mutex);
3700 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3701 struct drm_file *file)
3703 struct drm_i915_gem_caching *args = data;
3704 struct drm_i915_gem_object *obj;
3705 enum i915_cache_level level;
3708 switch (args->caching) {
3709 case I915_CACHING_NONE:
3710 level = I915_CACHE_NONE;
3712 case I915_CACHING_CACHED:
3713 level = I915_CACHE_LLC;
3715 case I915_CACHING_DISPLAY:
3716 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3722 ret = i915_mutex_lock_interruptible(dev);
3726 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3727 if (&obj->base == NULL) {
3732 ret = i915_gem_object_set_cache_level(obj, level);
3734 drm_gem_object_unreference(&obj->base);
3736 mutex_unlock(&dev->struct_mutex);
3740 static bool is_pin_display(struct drm_i915_gem_object *obj)
3742 /* There are 3 sources that pin objects:
3743 * 1. The display engine (scanouts, sprites, cursors);
3744 * 2. Reservations for execbuffer;
3747 * We can ignore reservations as we hold the struct_mutex and
3748 * are only called outside of the reservation path. The user
3749 * can only increment pin_count once, and so if after
3750 * subtracting the potential reference by the user, any pin_count
3751 * remains, it must be due to another use by the display engine.
3753 return obj->pin_count - !!obj->user_pin_count;
3757 * Prepare buffer for display plane (scanout, cursors, etc).
3758 * Can be called from an uninterruptible phase (modesetting) and allows
3759 * any flushes to be pipelined (for pageflips).
3762 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3764 struct intel_ring_buffer *pipelined)
3766 u32 old_read_domains, old_write_domain;
3769 if (pipelined != obj->ring) {
3770 ret = i915_gem_object_sync(obj, pipelined);
3775 /* Mark the pin_display early so that we account for the
3776 * display coherency whilst setting up the cache domains.
3778 obj->pin_display = true;
3780 /* The display engine is not coherent with the LLC cache on gen6. As
3781 * a result, we make sure that the pinning that is about to occur is
3782 * done with uncached PTEs. This is lowest common denominator for all
3785 * However for gen6+, we could do better by using the GFDT bit instead
3786 * of uncaching, which would allow us to flush all the LLC-cached data
3787 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3789 ret = i915_gem_object_set_cache_level(obj,
3790 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3792 goto err_unpin_display;
3794 /* As the user may map the buffer once pinned in the display plane
3795 * (e.g. libkms for the bootup splash), we have to ensure that we
3796 * always use map_and_fenceable for all scanout buffers.
3798 ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
3800 goto err_unpin_display;
3802 i915_gem_object_flush_cpu_write_domain(obj, true);
3804 old_write_domain = obj->base.write_domain;
3805 old_read_domains = obj->base.read_domains;
3807 /* It should now be out of any other write domains, and we can update
3808 * the domain values for our changes.
3810 obj->base.write_domain = 0;
3811 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3813 trace_i915_gem_object_change_domain(obj,
3820 obj->pin_display = is_pin_display(obj);
3825 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3827 i915_gem_object_unpin(obj);
3828 obj->pin_display = is_pin_display(obj);
3832 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3836 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3839 ret = i915_gem_object_wait_rendering(obj, false);
3843 /* Ensure that we invalidate the GPU's caches and TLBs. */
3844 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3849 * Moves a single object to the CPU read, and possibly write domain.
3851 * This function returns when the move is complete, including waiting on
3855 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3857 uint32_t old_write_domain, old_read_domains;
3860 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3863 ret = i915_gem_object_wait_rendering(obj, !write);
3867 i915_gem_object_flush_gtt_write_domain(obj);
3869 old_write_domain = obj->base.write_domain;
3870 old_read_domains = obj->base.read_domains;
3872 /* Flush the CPU cache if it's still invalid. */
3873 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3874 i915_gem_clflush_object(obj, false);
3876 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3879 /* It should now be out of any other write domains, and we can update
3880 * the domain values for our changes.
3882 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3884 /* If we're writing through the CPU, then the GPU read domains will
3885 * need to be invalidated at next use.
3888 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3889 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3892 trace_i915_gem_object_change_domain(obj,
3899 /* Throttle our rendering by waiting until the ring has completed our requests
3900 * emitted over 20 msec ago.
3902 * Note that if we were to use the current jiffies each time around the loop,
3903 * we wouldn't escape the function with any frames outstanding if the time to
3904 * render a frame was over 20ms.
3906 * This should get us reasonable parallelism between CPU and GPU but also
3907 * relatively low latency when blocking on a particular request to finish.
3910 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3913 struct drm_i915_file_private *file_priv = file->driver_priv;
3914 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3915 struct drm_i915_gem_request *request;
3916 struct intel_ring_buffer *ring = NULL;
3917 unsigned reset_counter;
3921 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3925 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3929 spin_lock(&file_priv->mm.lock);
3930 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3931 if (time_after_eq(request->emitted_jiffies, recent_enough))
3934 ring = request->ring;
3935 seqno = request->seqno;
3937 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3938 spin_unlock(&file_priv->mm.lock);
3943 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3945 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3951 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3952 struct i915_address_space *vm,
3954 bool map_and_fenceable,
3957 struct i915_vma *vma;
3960 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3963 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3965 vma = i915_gem_obj_to_vma(obj, vm);
3969 vma->node.start & (alignment - 1)) ||
3970 (map_and_fenceable && !obj->map_and_fenceable)) {
3971 WARN(obj->pin_count,
3972 "bo is already pinned with incorrect alignment:"
3973 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3974 " obj->map_and_fenceable=%d\n",
3975 i915_gem_obj_offset(obj, vm), alignment,
3977 obj->map_and_fenceable);
3978 ret = i915_vma_unbind(vma);
3984 if (!i915_gem_obj_bound(obj, vm)) {
3985 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3987 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3993 if (!dev_priv->mm.aliasing_ppgtt)
3994 i915_gem_gtt_bind_object(obj, obj->cache_level);
3997 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3998 i915_gem_gtt_bind_object(obj, obj->cache_level);
4001 obj->pin_mappable |= map_and_fenceable;
4007 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4009 BUG_ON(obj->pin_count == 0);
4010 BUG_ON(!i915_gem_obj_bound_any(obj));
4012 if (--obj->pin_count == 0)
4013 obj->pin_mappable = false;
4017 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4018 struct drm_file *file)
4020 struct drm_i915_gem_pin *args = data;
4021 struct drm_i915_gem_object *obj;
4024 ret = i915_mutex_lock_interruptible(dev);
4028 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4029 if (&obj->base == NULL) {
4034 if (obj->madv != I915_MADV_WILLNEED) {
4035 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4040 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4041 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4047 if (obj->user_pin_count == ULONG_MAX) {
4052 if (obj->user_pin_count == 0) {
4053 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
4058 obj->user_pin_count++;
4059 obj->pin_filp = file;
4061 args->offset = i915_gem_obj_ggtt_offset(obj);
4063 drm_gem_object_unreference(&obj->base);
4065 mutex_unlock(&dev->struct_mutex);
4070 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4071 struct drm_file *file)
4073 struct drm_i915_gem_pin *args = data;
4074 struct drm_i915_gem_object *obj;
4077 ret = i915_mutex_lock_interruptible(dev);
4081 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4082 if (&obj->base == NULL) {
4087 if (obj->pin_filp != file) {
4088 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4093 obj->user_pin_count--;
4094 if (obj->user_pin_count == 0) {
4095 obj->pin_filp = NULL;
4096 i915_gem_object_unpin(obj);
4100 drm_gem_object_unreference(&obj->base);
4102 mutex_unlock(&dev->struct_mutex);
4107 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4108 struct drm_file *file)
4110 struct drm_i915_gem_busy *args = data;
4111 struct drm_i915_gem_object *obj;
4114 ret = i915_mutex_lock_interruptible(dev);
4118 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4119 if (&obj->base == NULL) {
4124 /* Count all active objects as busy, even if they are currently not used
4125 * by the gpu. Users of this interface expect objects to eventually
4126 * become non-busy without any further actions, therefore emit any
4127 * necessary flushes here.
4129 ret = i915_gem_object_flush_active(obj);
4131 args->busy = obj->active;
4133 args->busy |= intel_ring_flag(obj->ring) << 16;
4136 drm_gem_object_unreference(&obj->base);
4138 mutex_unlock(&dev->struct_mutex);
4143 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4144 struct drm_file *file_priv)
4146 return i915_gem_ring_throttle(dev, file_priv);
4150 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4151 struct drm_file *file_priv)
4153 struct drm_i915_gem_madvise *args = data;
4154 struct drm_i915_gem_object *obj;
4157 switch (args->madv) {
4158 case I915_MADV_DONTNEED:
4159 case I915_MADV_WILLNEED:
4165 ret = i915_mutex_lock_interruptible(dev);
4169 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4170 if (&obj->base == NULL) {
4175 if (obj->pin_count) {
4180 if (obj->madv != __I915_MADV_PURGED)
4181 obj->madv = args->madv;
4183 /* if the object is no longer attached, discard its backing storage */
4184 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4185 i915_gem_object_truncate(obj);
4187 args->retained = obj->madv != __I915_MADV_PURGED;
4190 drm_gem_object_unreference(&obj->base);
4192 mutex_unlock(&dev->struct_mutex);
4196 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4197 const struct drm_i915_gem_object_ops *ops)
4199 INIT_LIST_HEAD(&obj->global_list);
4200 INIT_LIST_HEAD(&obj->ring_list);
4201 INIT_LIST_HEAD(&obj->obj_exec_link);
4202 INIT_LIST_HEAD(&obj->vma_list);
4206 obj->fence_reg = I915_FENCE_REG_NONE;
4207 obj->madv = I915_MADV_WILLNEED;
4208 /* Avoid an unnecessary call to unbind on the first bind. */
4209 obj->map_and_fenceable = true;
4211 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4214 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4215 .get_pages = i915_gem_object_get_pages_gtt,
4216 .put_pages = i915_gem_object_put_pages_gtt,
4219 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4222 struct drm_i915_gem_object *obj;
4224 struct address_space *mapping;
4228 obj = i915_gem_object_alloc(dev);
4232 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4233 i915_gem_object_free(obj);
4238 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4239 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4240 /* 965gm cannot relocate objects above 4GiB. */
4241 mask &= ~__GFP_HIGHMEM;
4242 mask |= __GFP_DMA32;
4245 mapping = file_inode(obj->base.filp)->i_mapping;
4246 mapping_set_gfp_mask(mapping, mask);
4249 i915_gem_object_init(obj, &i915_gem_object_ops);
4251 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4252 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4255 /* On some devices, we can have the GPU use the LLC (the CPU
4256 * cache) for about a 10% performance improvement
4257 * compared to uncached. Graphics requests other than
4258 * display scanout are coherent with the CPU in
4259 * accessing this cache. This means in this mode we
4260 * don't need to clflush on the CPU side, and on the
4261 * GPU side we only need to flush internal caches to
4262 * get data visible to the CPU.
4264 * However, we maintain the display planes as UC, and so
4265 * need to rebind when first used as such.
4267 obj->cache_level = I915_CACHE_LLC;
4269 obj->cache_level = I915_CACHE_NONE;
4271 trace_i915_gem_object_create(obj);
4276 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4278 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4279 struct drm_device *dev = obj->base.dev;
4280 drm_i915_private_t *dev_priv = dev->dev_private;
4281 struct i915_vma *vma, *next;
4283 intel_runtime_pm_get(dev_priv);
4285 trace_i915_gem_object_destroy(obj);
4288 i915_gem_detach_phys_object(dev, obj);
4291 /* NB: 0 or 1 elements */
4293 WARN_ON(!list_empty(&obj->vma_list) &&
4294 !list_is_singular(&obj->vma_list));
4296 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4297 int ret = i915_vma_unbind(vma);
4298 if (WARN_ON(ret == -ERESTARTSYS)) {
4299 bool was_interruptible;
4301 was_interruptible = dev_priv->mm.interruptible;
4302 dev_priv->mm.interruptible = false;
4304 WARN_ON(i915_vma_unbind(vma));
4306 dev_priv->mm.interruptible = was_interruptible;
4310 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4311 * before progressing. */
4313 i915_gem_object_unpin_pages(obj);
4315 if (WARN_ON(obj->pages_pin_count))
4316 obj->pages_pin_count = 0;
4317 i915_gem_object_put_pages(obj);
4318 i915_gem_object_free_mmap_offset(obj);
4323 if (obj->base.import_attach)
4324 drm_prime_gem_destroy(&obj->base, NULL);
4327 drm_gem_object_release(&obj->base);
4328 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4331 i915_gem_object_free(obj);
4333 intel_runtime_pm_put(dev_priv);
4336 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4337 struct i915_address_space *vm)
4339 struct i915_vma *vma;
4340 list_for_each_entry(vma, &obj->vma_list, vma_link)
4347 static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
4348 struct i915_address_space *vm)
4350 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4352 return ERR_PTR(-ENOMEM);
4354 INIT_LIST_HEAD(&vma->vma_link);
4355 INIT_LIST_HEAD(&vma->mm_list);
4356 INIT_LIST_HEAD(&vma->exec_list);
4360 /* Keep GGTT vmas first to make debug easier */
4361 if (i915_is_ggtt(vm))
4362 list_add(&vma->vma_link, &obj->vma_list);
4364 list_add_tail(&vma->vma_link, &obj->vma_list);
4370 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4371 struct i915_address_space *vm)
4373 struct i915_vma *vma;
4375 vma = i915_gem_obj_to_vma(obj, vm);
4377 vma = __i915_gem_vma_create(obj, vm);
4382 void i915_gem_vma_destroy(struct i915_vma *vma)
4384 WARN_ON(vma->node.allocated);
4386 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4387 if (!list_empty(&vma->exec_list))
4390 list_del(&vma->vma_link);
4396 i915_gem_suspend(struct drm_device *dev)
4398 drm_i915_private_t *dev_priv = dev->dev_private;
4401 mutex_lock(&dev->struct_mutex);
4402 if (dev_priv->ums.mm_suspended)
4405 ret = i915_gpu_idle(dev);
4409 i915_gem_retire_requests(dev);
4411 /* Under UMS, be paranoid and evict. */
4412 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4413 i915_gem_evict_everything(dev);
4415 i915_kernel_lost_context(dev);
4416 i915_gem_cleanup_ringbuffer(dev);
4418 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4419 * We need to replace this with a semaphore, or something.
4420 * And not confound ums.mm_suspended!
4422 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4424 mutex_unlock(&dev->struct_mutex);
4426 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4427 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4428 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4433 mutex_unlock(&dev->struct_mutex);
4437 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4439 struct drm_device *dev = ring->dev;
4440 drm_i915_private_t *dev_priv = dev->dev_private;
4441 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4442 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4445 if (!HAS_L3_DPF(dev) || !remap_info)
4448 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4453 * Note: We do not worry about the concurrent register cacheline hang
4454 * here because no other code should access these registers other than
4455 * at initialization time.
4457 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4458 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4459 intel_ring_emit(ring, reg_base + i);
4460 intel_ring_emit(ring, remap_info[i/4]);
4463 intel_ring_advance(ring);
4468 void i915_gem_init_swizzling(struct drm_device *dev)
4470 drm_i915_private_t *dev_priv = dev->dev_private;
4472 if (INTEL_INFO(dev)->gen < 5 ||
4473 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4476 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4477 DISP_TILE_SURFACE_SWIZZLING);
4482 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4484 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4485 else if (IS_GEN7(dev))
4486 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4487 else if (IS_GEN8(dev))
4488 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4494 intel_enable_blt(struct drm_device *dev)
4501 /* The blitter was dysfunctional on early prototypes */
4502 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
4503 if (IS_GEN6(dev) && revision < 8) {
4504 DRM_INFO("BLT not supported on this pre-production hardware;"
4505 " graphics performance will be degraded.\n");
4512 static int i915_gem_init_rings(struct drm_device *dev)
4514 struct drm_i915_private *dev_priv = dev->dev_private;
4517 ret = intel_init_render_ring_buffer(dev);
4522 ret = intel_init_bsd_ring_buffer(dev);
4524 goto cleanup_render_ring;
4527 if (intel_enable_blt(dev)) {
4528 ret = intel_init_blt_ring_buffer(dev);
4530 goto cleanup_bsd_ring;
4533 if (HAS_VEBOX(dev)) {
4534 ret = intel_init_vebox_ring_buffer(dev);
4536 goto cleanup_blt_ring;
4540 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4542 goto cleanup_vebox_ring;
4547 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4549 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4551 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4552 cleanup_render_ring:
4553 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4559 i915_gem_init_hw(struct drm_device *dev)
4561 drm_i915_private_t *dev_priv = dev->dev_private;
4565 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4569 if (dev_priv->ellc_size)
4570 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4572 if (IS_HASWELL(dev))
4573 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4574 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4576 if (HAS_PCH_NOP(dev)) {
4577 u32 temp = I915_READ(GEN7_MSG_CTL);
4578 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4579 I915_WRITE(GEN7_MSG_CTL, temp);
4582 i915_gem_init_swizzling(dev);
4584 ret = i915_gem_init_rings(dev);
4588 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4589 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4592 * XXX: There was some w/a described somewhere suggesting loading
4593 * contexts before PPGTT.
4595 ret = i915_gem_context_init(dev);
4597 i915_gem_cleanup_ringbuffer(dev);
4598 DRM_ERROR("Context initialization failed %d\n", ret);
4602 if (dev_priv->mm.aliasing_ppgtt) {
4603 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4605 i915_gem_cleanup_aliasing_ppgtt(dev);
4606 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4613 int i915_gem_init(struct drm_device *dev)
4615 struct drm_i915_private *dev_priv = dev->dev_private;
4618 mutex_lock(&dev->struct_mutex);
4620 if (IS_VALLEYVIEW(dev)) {
4621 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4622 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4623 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4624 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4627 i915_gem_init_global_gtt(dev);
4629 ret = i915_gem_init_hw(dev);
4630 mutex_unlock(&dev->struct_mutex);
4632 i915_gem_cleanup_aliasing_ppgtt(dev);
4636 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4637 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4638 dev_priv->dri1.allow_batchbuffer = 1;
4643 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4645 drm_i915_private_t *dev_priv = dev->dev_private;
4646 struct intel_ring_buffer *ring;
4649 for_each_ring(ring, dev_priv, i)
4650 intel_cleanup_ring_buffer(ring);
4654 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4655 struct drm_file *file_priv)
4657 struct drm_i915_private *dev_priv = dev->dev_private;
4660 if (drm_core_check_feature(dev, DRIVER_MODESET))
4663 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4664 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4665 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4668 mutex_lock(&dev->struct_mutex);
4669 dev_priv->ums.mm_suspended = 0;
4671 ret = i915_gem_init_hw(dev);
4673 mutex_unlock(&dev->struct_mutex);
4677 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4678 mutex_unlock(&dev->struct_mutex);
4680 ret = drm_irq_install(dev);
4682 goto cleanup_ringbuffer;
4687 mutex_lock(&dev->struct_mutex);
4688 i915_gem_cleanup_ringbuffer(dev);
4689 dev_priv->ums.mm_suspended = 1;
4690 mutex_unlock(&dev->struct_mutex);
4696 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4697 struct drm_file *file_priv)
4699 if (drm_core_check_feature(dev, DRIVER_MODESET))
4702 drm_irq_uninstall(dev);
4704 return i915_gem_suspend(dev);
4708 i915_gem_lastclose(struct drm_device *dev)
4712 if (drm_core_check_feature(dev, DRIVER_MODESET))
4715 ret = i915_gem_suspend(dev);
4717 DRM_ERROR("failed to idle hardware: %d\n", ret);
4721 init_ring_lists(struct intel_ring_buffer *ring)
4723 INIT_LIST_HEAD(&ring->active_list);
4724 INIT_LIST_HEAD(&ring->request_list);
4727 static void i915_init_vm(struct drm_i915_private *dev_priv,
4728 struct i915_address_space *vm)
4730 vm->dev = dev_priv->dev;
4731 INIT_LIST_HEAD(&vm->active_list);
4732 INIT_LIST_HEAD(&vm->inactive_list);
4733 INIT_LIST_HEAD(&vm->global_link);
4734 list_add(&vm->global_link, &dev_priv->vm_list);
4738 i915_gem_load(struct drm_device *dev)
4740 drm_i915_private_t *dev_priv = dev->dev_private;
4743 INIT_LIST_HEAD(&dev_priv->vm_list);
4744 i915_init_vm(dev_priv, &dev_priv->gtt.base);
4746 INIT_LIST_HEAD(&dev_priv->context_list);
4747 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4748 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4749 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4750 for (i = 0; i < I915_NUM_RINGS; i++)
4751 init_ring_lists(&dev_priv->ring[i]);
4752 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4753 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4754 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4755 i915_gem_retire_work_handler);
4756 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4757 i915_gem_idle_work_handler);
4758 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4760 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4762 I915_WRITE(MI_ARB_STATE,
4763 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4766 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4768 /* Old X drivers will take 0-2 for front, back, depth buffers */
4769 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4770 dev_priv->fence_reg_start = 3;
4772 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4773 dev_priv->num_fence_regs = 32;
4774 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4775 dev_priv->num_fence_regs = 16;
4777 dev_priv->num_fence_regs = 8;
4779 /* Initialize fence registers to zero */
4780 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4781 i915_gem_restore_fences(dev);
4783 i915_gem_detect_bit_6_swizzle(dev);
4784 init_waitqueue_head(&dev_priv->pending_flip_queue);
4786 dev_priv->mm.interruptible = true;
4789 dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4790 dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
4791 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4792 register_shrinker(&dev_priv->mm.inactive_shrinker);
4793 /* Old FreeBSD code */
4794 dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4795 i915_gem_inactive_shrink, dev, EVENTHANDLER_PRI_ANY);
4800 * Create a physically contiguous memory object for this object
4801 * e.g. for cursor + overlay regs
4803 static int i915_gem_init_phys_object(struct drm_device *dev,
4804 int id, int size, int align)
4806 drm_i915_private_t *dev_priv = dev->dev_private;
4807 struct drm_i915_gem_phys_object *phys_obj;
4810 if (dev_priv->mm.phys_objs[id - 1] || !size)
4813 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4819 phys_obj->handle = drm_pci_alloc(dev, size, align);
4820 if (!phys_obj->handle) {
4825 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4827 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4828 size / PAGE_SIZE, PAT_WRITE_COMBINING);
4830 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4838 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4840 drm_i915_private_t *dev_priv = dev->dev_private;
4841 struct drm_i915_gem_phys_object *phys_obj;
4843 if (!dev_priv->mm.phys_objs[id - 1])
4846 phys_obj = dev_priv->mm.phys_objs[id - 1];
4847 if (phys_obj->cur_obj) {
4848 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4852 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4854 drm_pci_free(dev, phys_obj->handle);
4856 dev_priv->mm.phys_objs[id - 1] = NULL;
4859 void i915_gem_free_all_phys_object(struct drm_device *dev)
4863 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4864 i915_gem_free_phys_object(dev, i);
4867 void i915_gem_detach_phys_object(struct drm_device *dev,
4868 struct drm_i915_gem_object *obj)
4870 struct vm_object *mapping = obj->base.vm_obj;
4877 vaddr = obj->phys_obj->handle->vaddr;
4879 page_count = obj->base.size / PAGE_SIZE;
4880 for (i = 0; i < page_count; i++) {
4881 struct vm_page *page = shmem_read_mapping_page(mapping, i);
4882 if (!IS_ERR(page)) {
4883 char *dst = kmap_atomic(page);
4884 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4887 drm_clflush_pages(&page, 1);
4889 set_page_dirty(page);
4890 mark_page_accessed(page);
4892 page_cache_release(page);
4894 vm_page_busy_wait(page, FALSE, "i915gem");
4895 vm_page_unwire(page, 0);
4896 vm_page_wakeup(page);
4899 i915_gem_chipset_flush(dev);
4901 obj->phys_obj->cur_obj = NULL;
4902 obj->phys_obj = NULL;
4906 i915_gem_attach_phys_object(struct drm_device *dev,
4907 struct drm_i915_gem_object *obj,
4911 struct vm_object *mapping = obj->base.vm_obj;
4912 drm_i915_private_t *dev_priv = dev->dev_private;
4917 if (id > I915_MAX_PHYS_OBJECT)
4920 if (obj->phys_obj) {
4921 if (obj->phys_obj->id == id)
4923 i915_gem_detach_phys_object(dev, obj);
4926 /* create a new object */
4927 if (!dev_priv->mm.phys_objs[id - 1]) {
4928 ret = i915_gem_init_phys_object(dev, id,
4929 obj->base.size, align);
4931 DRM_ERROR("failed to init phys object %d size: %zu\n",
4932 id, obj->base.size);
4937 /* bind to the object */
4938 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4939 obj->phys_obj->cur_obj = obj;
4941 page_count = obj->base.size / PAGE_SIZE;
4943 for (i = 0; i < page_count; i++) {
4944 struct vm_page *page;
4947 page = shmem_read_mapping_page(mapping, i);
4949 return PTR_ERR(page);
4951 src = kmap_atomic(page);
4952 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4953 memcpy(dst, src, PAGE_SIZE);
4956 mark_page_accessed(page);
4958 page_cache_release(page);
4960 vm_page_busy_wait(page, FALSE, "i915gem");
4961 vm_page_unwire(page, 0);
4962 vm_page_wakeup(page);
4969 i915_gem_phys_pwrite(struct drm_device *dev,
4970 struct drm_i915_gem_object *obj,
4971 struct drm_i915_gem_pwrite *args,
4972 struct drm_file *file_priv)
4974 void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4975 char __user *user_data = to_user_ptr(args->data_ptr);
4977 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4978 unsigned long unwritten;
4980 /* The physical object once assigned is fixed for the lifetime
4981 * of the obj, so we can safely drop the lock and continue
4984 mutex_unlock(&dev->struct_mutex);
4985 unwritten = copy_from_user(vaddr, user_data, args->size);
4986 mutex_lock(&dev->struct_mutex);
4991 i915_gem_chipset_flush(dev);
4995 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4997 struct drm_i915_file_private *file_priv = file->driver_priv;
4999 /* Clean up our request list when the client is going away, so that
5000 * later retire_requests won't dereference our soon-to-be-gone
5003 spin_lock(&file_priv->mm.lock);
5004 while (!list_empty(&file_priv->mm.request_list)) {
5005 struct drm_i915_gem_request *request;
5007 request = list_first_entry(&file_priv->mm.request_list,
5008 struct drm_i915_gem_request,
5010 list_del(&request->client_list);
5011 request->file_priv = NULL;
5013 spin_unlock(&file_priv->mm.lock);
5017 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
5018 vm_ooffset_t foff, struct ucred *cred, u_short *color)
5020 *color = 0; /* XXXKIB */
5025 i915_gem_pager_dtor(void *handle)
5027 struct drm_gem_object *obj;
5028 struct drm_device *dev;
5033 mutex_lock(&dev->struct_mutex);
5034 drm_gem_free_mmap_offset(obj);
5035 i915_gem_release_mmap(to_intel_bo(obj));
5036 drm_gem_object_unreference(obj);
5037 mutex_unlock(&dev->struct_mutex);
5041 i915_gem_file_idle_work_handler(struct work_struct *work)
5043 struct drm_i915_file_private *file_priv =
5044 container_of(work, typeof(*file_priv), mm.idle_work.work);
5046 atomic_set(&file_priv->rps_wait_boost, false);
5049 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5051 struct drm_i915_file_private *file_priv;
5053 DRM_DEBUG_DRIVER("\n");
5055 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5059 file->driver_priv = file_priv;
5060 file_priv->dev_priv = dev->dev_private;
5062 spin_init(&file_priv->mm.lock, "i915_priv");
5063 INIT_LIST_HEAD(&file_priv->mm.request_list);
5064 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5065 i915_gem_file_idle_work_handler);
5067 idr_init(&file_priv->context_idr);
5073 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5075 if (!mutex_is_locked(mutex))
5078 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5079 return mutex->owner == task;
5081 /* Since UP may be pre-empted, we cannot assume that we own the lock */
5088 static unsigned long
5089 i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
5091 struct drm_i915_private *dev_priv =
5092 container_of(shrinker,
5093 struct drm_i915_private,
5094 mm.inactive_shrinker);
5095 struct drm_device *dev = dev_priv->dev;
5096 struct drm_i915_gem_object *obj;
5098 unsigned long count;
5100 if (!mutex_trylock(&dev->struct_mutex)) {
5101 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5104 if (dev_priv->mm.shrinker_no_lock_stealing)
5111 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5112 if (obj->pages_pin_count == 0)
5113 count += obj->base.size >> PAGE_SHIFT;
5115 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5119 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
5120 count += obj->base.size >> PAGE_SHIFT;
5124 mutex_unlock(&dev->struct_mutex);
5130 /* All the new VM stuff */
5131 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5132 struct i915_address_space *vm)
5134 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5135 struct i915_vma *vma;
5137 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5138 vm = &dev_priv->gtt.base;
5140 BUG_ON(list_empty(&o->vma_list));
5141 list_for_each_entry(vma, &o->vma_list, vma_link) {
5143 return vma->node.start;
5149 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5150 struct i915_address_space *vm)
5152 struct i915_vma *vma;
5154 list_for_each_entry(vma, &o->vma_list, vma_link)
5155 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5161 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5163 struct i915_vma *vma;
5165 list_for_each_entry(vma, &o->vma_list, vma_link)
5166 if (drm_mm_node_allocated(&vma->node))
5172 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5173 struct i915_address_space *vm)
5175 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5176 struct i915_vma *vma;
5178 if (vm == &dev_priv->mm.aliasing_ppgtt->base)
5179 vm = &dev_priv->gtt.base;
5181 BUG_ON(list_empty(&o->vma_list));
5183 list_for_each_entry(vma, &o->vma_list, vma_link)
5185 return vma->node.size;
5191 static unsigned long
5192 i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
5194 struct drm_i915_private *dev_priv =
5195 container_of(shrinker,
5196 struct drm_i915_private,
5197 mm.inactive_shrinker);
5198 struct drm_device *dev = dev_priv->dev;
5199 unsigned long freed;
5202 if (!mutex_trylock(&dev->struct_mutex)) {
5203 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5206 if (dev_priv->mm.shrinker_no_lock_stealing)
5212 freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5213 if (freed < sc->nr_to_scan)
5214 freed += __i915_gem_shrink(dev_priv,
5215 sc->nr_to_scan - freed,
5217 if (freed < sc->nr_to_scan)
5218 freed += i915_gem_shrink_all(dev_priv);
5221 mutex_unlock(&dev->struct_mutex);
5227 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5229 struct i915_vma *vma;
5231 if (WARN_ON(list_empty(&obj->vma_list)))
5234 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5235 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))