2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <machine/md_var.h>
58 #include <drm/drm_vma_manager.h>
59 #include <drm/i915_drm.h>
61 #include "i915_vgpu.h"
62 #include "i915_trace.h"
63 #include "intel_drv.h"
64 #include <linux/shmem_fs.h>
65 #include <linux/slab.h>
66 #include <linux/swap.h>
67 #include <linux/pci.h>
69 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
70 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
71 static __must_check int
72 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
75 i915_gem_object_retire(struct drm_i915_gem_object *obj);
77 static void i915_gem_write_fence(struct drm_device *dev, int reg,
78 struct drm_i915_gem_object *obj);
79 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
80 struct drm_i915_fence_reg *fence,
83 static bool cpu_cache_is_coherent(struct drm_device *dev,
84 enum i915_cache_level level)
86 return HAS_LLC(dev) || level != I915_CACHE_NONE;
89 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
91 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
94 return obj->pin_display;
97 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
100 i915_gem_release_mmap(obj);
102 /* As we do not have an associated fence register, we will force
103 * a tiling change if we ever need to acquire one.
105 obj->fence_dirty = false;
106 obj->fence_reg = I915_FENCE_REG_NONE;
109 /* some bookkeeping */
110 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
113 spin_lock(&dev_priv->mm.object_stat_lock);
114 dev_priv->mm.object_count++;
115 dev_priv->mm.object_memory += size;
116 spin_unlock(&dev_priv->mm.object_stat_lock);
119 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
122 spin_lock(&dev_priv->mm.object_stat_lock);
123 dev_priv->mm.object_count--;
124 dev_priv->mm.object_memory -= size;
125 spin_unlock(&dev_priv->mm.object_stat_lock);
129 i915_gem_wait_for_error(struct i915_gpu_error *error)
133 #define EXIT_COND (!i915_reset_in_progress(error) || \
134 i915_terminally_wedged(error))
139 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
140 * userspace. If it takes that long something really bad is going on and
141 * we should simply try to bail out and fail as gracefully as possible.
143 ret = wait_event_interruptible_timeout(error->reset_queue,
147 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
149 } else if (ret < 0) {
157 int i915_mutex_lock_interruptible(struct drm_device *dev)
159 struct drm_i915_private *dev_priv = dev->dev_private;
162 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
166 ret = mutex_lock_interruptible(&dev->struct_mutex);
170 WARN_ON(i915_verify_lists(dev));
175 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
176 struct drm_file *file)
178 struct drm_i915_private *dev_priv = dev->dev_private;
179 struct drm_i915_gem_get_aperture *args = data;
180 struct drm_i915_gem_object *obj;
184 mutex_lock(&dev->struct_mutex);
185 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
186 if (i915_gem_obj_is_pinned(obj))
187 pinned += i915_gem_obj_ggtt_size(obj);
188 mutex_unlock(&dev->struct_mutex);
190 args->aper_size = dev_priv->gtt.base.total;
191 args->aper_available_size = args->aper_size - pinned;
198 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
200 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
201 char *vaddr = obj->phys_handle->vaddr;
203 struct scatterlist *sg;
206 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
209 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
210 struct vm_page *page;
213 page = shmem_read_mapping_page(mapping, i);
215 return PTR_ERR(page);
217 src = kmap_atomic(page);
218 memcpy(vaddr, src, PAGE_SIZE);
219 drm_clflush_virt_range(vaddr, PAGE_SIZE);
222 page_cache_release(page);
226 i915_gem_chipset_flush(obj->base.dev);
228 st = kmalloc(sizeof(*st), GFP_KERNEL);
232 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
239 sg->length = obj->base.size;
241 sg_dma_address(sg) = obj->phys_handle->busaddr;
242 sg_dma_len(sg) = obj->base.size;
245 obj->has_dma_mapping = true;
250 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
254 BUG_ON(obj->madv == __I915_MADV_PURGED);
256 ret = i915_gem_object_set_to_cpu_domain(obj, true);
258 /* In the event of a disaster, abandon all caches and
261 WARN_ON(ret != -EIO);
262 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
265 if (obj->madv == I915_MADV_DONTNEED)
269 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
270 char *vaddr = obj->phys_handle->vaddr;
273 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 page = shmem_read_mapping_page(mapping, i);
281 dst = kmap_atomic(page);
282 drm_clflush_virt_range(vaddr, PAGE_SIZE);
283 memcpy(dst, vaddr, PAGE_SIZE);
286 set_page_dirty(page);
287 if (obj->madv == I915_MADV_WILLNEED)
288 mark_page_accessed(page);
289 page_cache_release(page);
295 sg_free_table(obj->pages);
298 obj->has_dma_mapping = false;
302 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
304 drm_pci_free(obj->base.dev, obj->phys_handle);
307 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
308 .get_pages = i915_gem_object_get_pages_phys,
309 .put_pages = i915_gem_object_put_pages_phys,
310 .release = i915_gem_object_release_phys,
315 drop_pages(struct drm_i915_gem_object *obj)
317 struct i915_vma *vma, *next;
320 drm_gem_object_reference(&obj->base);
321 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
322 if (i915_vma_unbind(vma))
325 ret = i915_gem_object_put_pages(obj);
326 drm_gem_object_unreference(&obj->base);
332 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
335 drm_dma_handle_t *phys;
338 if (obj->phys_handle) {
339 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
345 if (obj->madv != I915_MADV_WILLNEED)
349 if (obj->base.filp == NULL)
353 ret = drop_pages(obj);
357 /* create a new object */
358 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
362 obj->phys_handle = phys;
364 obj->ops = &i915_gem_phys_ops;
367 return i915_gem_object_get_pages(obj);
371 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
372 struct drm_i915_gem_pwrite *args,
373 struct drm_file *file_priv)
375 struct drm_device *dev = obj->base.dev;
376 void *vaddr = (char *)obj->phys_handle->vaddr + args->offset;
377 char __user *user_data = to_user_ptr(args->data_ptr);
380 /* We manually control the domain here and pretend that it
381 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
383 ret = i915_gem_object_wait_rendering(obj, false);
387 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
388 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
389 unsigned long unwritten;
391 /* The physical object once assigned is fixed for the lifetime
392 * of the obj, so we can safely drop the lock and continue
395 mutex_unlock(&dev->struct_mutex);
396 unwritten = copy_from_user(vaddr, user_data, args->size);
397 mutex_lock(&dev->struct_mutex);
404 drm_clflush_virt_range(vaddr, args->size);
405 i915_gem_chipset_flush(dev);
408 intel_fb_obj_flush(obj, false);
412 void *i915_gem_object_alloc(struct drm_device *dev)
414 return kmalloc(sizeof(struct drm_i915_gem_object),
415 M_DRM, M_WAITOK | M_ZERO);
418 void i915_gem_object_free(struct drm_i915_gem_object *obj)
424 i915_gem_create(struct drm_file *file,
425 struct drm_device *dev,
429 struct drm_i915_gem_object *obj;
433 size = roundup(size, PAGE_SIZE);
437 /* Allocate the new object */
438 obj = i915_gem_alloc_object(dev, size);
442 ret = drm_gem_handle_create(file, &obj->base, &handle);
443 /* drop reference from allocate - handle holds it now */
444 drm_gem_object_unreference_unlocked(&obj->base);
453 i915_gem_dumb_create(struct drm_file *file,
454 struct drm_device *dev,
455 struct drm_mode_create_dumb *args)
457 /* have to work out size/pitch and return them */
458 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
459 args->size = args->pitch * args->height;
460 return i915_gem_create(file, dev,
461 args->size, &args->handle);
465 * Creates a new mm object and returns a handle to it.
468 i915_gem_create_ioctl(struct drm_device *dev, void *data,
469 struct drm_file *file)
471 struct drm_i915_gem_create *args = data;
473 return i915_gem_create(file, dev,
474 args->size, &args->handle);
478 __copy_to_user_swizzled(char __user *cpu_vaddr,
479 const char *gpu_vaddr, int gpu_offset,
482 int ret, cpu_offset = 0;
485 int cacheline_end = ALIGN(gpu_offset + 1, 64);
486 int this_length = min(cacheline_end - gpu_offset, length);
487 int swizzled_gpu_offset = gpu_offset ^ 64;
489 ret = __copy_to_user(cpu_vaddr + cpu_offset,
490 gpu_vaddr + swizzled_gpu_offset,
495 cpu_offset += this_length;
496 gpu_offset += this_length;
497 length -= this_length;
504 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
505 const char __user *cpu_vaddr,
508 int ret, cpu_offset = 0;
511 int cacheline_end = ALIGN(gpu_offset + 1, 64);
512 int this_length = min(cacheline_end - gpu_offset, length);
513 int swizzled_gpu_offset = gpu_offset ^ 64;
515 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
516 cpu_vaddr + cpu_offset,
521 cpu_offset += this_length;
522 gpu_offset += this_length;
523 length -= this_length;
530 * Pins the specified object's pages and synchronizes the object with
531 * GPU accesses. Sets needs_clflush to non-zero if the caller should
532 * flush the object from the CPU cache.
534 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
546 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
547 /* If we're not in the cpu read domain, set ourself into the gtt
548 * read domain and manually flush cachelines (if required). This
549 * optimizes for the case when the gpu will dirty the data
550 * anyway again before the next pread happens. */
551 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
553 ret = i915_gem_object_wait_rendering(obj, true);
557 i915_gem_object_retire(obj);
560 ret = i915_gem_object_get_pages(obj);
564 i915_gem_object_pin_pages(obj);
569 /* Per-page copy function for the shmem pread fastpath.
570 * Flushes invalid cachelines before reading the target if
571 * needs_clflush is set. */
573 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
574 char __user *user_data,
575 bool page_do_bit17_swizzling, bool needs_clflush)
580 if (unlikely(page_do_bit17_swizzling))
583 vaddr = kmap_atomic(page);
585 drm_clflush_virt_range(vaddr + shmem_page_offset,
587 ret = __copy_to_user_inatomic(user_data,
588 vaddr + shmem_page_offset,
590 kunmap_atomic(vaddr);
592 return ret ? -EFAULT : 0;
596 shmem_clflush_swizzled_range(char *addr, unsigned long length,
599 if (unlikely(swizzled)) {
600 unsigned long start = (unsigned long) addr;
601 unsigned long end = (unsigned long) addr + length;
603 /* For swizzling simply ensure that we always flush both
604 * channels. Lame, but simple and it works. Swizzled
605 * pwrite/pread is far from a hotpath - current userspace
606 * doesn't use it at all. */
607 start = round_down(start, 128);
608 end = round_up(end, 128);
610 drm_clflush_virt_range((void *)start, end - start);
612 drm_clflush_virt_range(addr, length);
617 /* Only difference to the fast-path function is that this can handle bit17
618 * and uses non-atomic copy and kmap functions. */
620 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
621 char __user *user_data,
622 bool page_do_bit17_swizzling, bool needs_clflush)
629 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
631 page_do_bit17_swizzling);
633 if (page_do_bit17_swizzling)
634 ret = __copy_to_user_swizzled(user_data,
635 vaddr, shmem_page_offset,
638 ret = __copy_to_user(user_data,
639 vaddr + shmem_page_offset,
643 return ret ? - EFAULT : 0;
647 i915_gem_shmem_pread(struct drm_device *dev,
648 struct drm_i915_gem_object *obj,
649 struct drm_i915_gem_pread *args,
650 struct drm_file *file)
652 char __user *user_data;
655 int shmem_page_offset, page_length, ret = 0;
656 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
658 int needs_clflush = 0;
661 user_data = to_user_ptr(args->data_ptr);
664 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
666 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
670 offset = args->offset;
672 for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
673 struct vm_page *page = obj->pages[i];
678 /* Operation in this page
680 * shmem_page_offset = offset within page in shmem file
681 * page_length = bytes to copy for this page
683 shmem_page_offset = offset_in_page(offset);
684 page_length = remain;
685 if ((shmem_page_offset + page_length) > PAGE_SIZE)
686 page_length = PAGE_SIZE - shmem_page_offset;
688 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
689 (page_to_phys(page) & (1 << 17)) != 0;
691 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
692 user_data, page_do_bit17_swizzling,
697 mutex_unlock(&dev->struct_mutex);
699 if (likely(!i915.prefault_disable) && !prefaulted) {
700 ret = fault_in_multipages_writeable(user_data, remain);
701 /* Userspace is tricking us, but we've already clobbered
702 * its pages with the prefault and promised to write the
703 * data up to the first fault. Hence ignore any errors
704 * and just continue. */
709 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
710 user_data, page_do_bit17_swizzling,
713 mutex_lock(&dev->struct_mutex);
719 remain -= page_length;
720 user_data += page_length;
721 offset += page_length;
725 i915_gem_object_unpin_pages(obj);
731 * Reads data from the object referenced by handle.
733 * On error, the contents of *data are undefined.
736 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
737 struct drm_file *file)
739 struct drm_i915_gem_pread *args = data;
740 struct drm_i915_gem_object *obj;
746 ret = i915_mutex_lock_interruptible(dev);
750 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
751 if (&obj->base == NULL) {
756 /* Bounds check source. */
757 if (args->offset > obj->base.size ||
758 args->size > obj->base.size - args->offset) {
763 trace_i915_gem_object_pread(obj, args->offset, args->size);
765 ret = i915_gem_shmem_pread(dev, obj, args, file);
768 drm_gem_object_unreference(&obj->base);
770 mutex_unlock(&dev->struct_mutex);
774 /* This is the fast write path which cannot handle
775 * page faults in the source data
779 fast_user_write(struct io_mapping *mapping,
780 loff_t page_base, int page_offset,
781 char __user *user_data,
784 void __iomem *vaddr_atomic;
786 unsigned long unwritten;
788 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
789 /* We can use the cpu mem copy function because this is X86. */
790 vaddr = (char __force*)vaddr_atomic + page_offset;
791 unwritten = __copy_from_user_inatomic_nocache(vaddr,
793 io_mapping_unmap_atomic(vaddr_atomic);
798 * This is the fast pwrite path, where we copy the data directly from the
799 * user into the GTT, uncached.
802 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
803 struct drm_i915_gem_object *obj,
804 struct drm_i915_gem_pwrite *args,
805 struct drm_file *file)
807 struct drm_i915_private *dev_priv = dev->dev_private;
809 loff_t offset, page_base;
810 char __user *user_data;
811 int page_offset, page_length, ret;
813 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
817 ret = i915_gem_object_set_to_gtt_domain(obj, true);
821 ret = i915_gem_object_put_fence(obj);
825 user_data = to_user_ptr(args->data_ptr);
828 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
830 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
833 /* Operation in this page
835 * page_base = page offset within aperture
836 * page_offset = offset within page
837 * page_length = bytes to copy for this page
839 page_base = offset & ~PAGE_MASK;
840 page_offset = offset_in_page(offset);
841 page_length = remain;
842 if ((page_offset + remain) > PAGE_SIZE)
843 page_length = PAGE_SIZE - page_offset;
845 /* If we get a fault while copying data, then (presumably) our
846 * source page isn't available. Return the error and we'll
847 * retry in the slow path.
849 if (fast_user_write(dev_priv->gtt.mappable, page_base,
850 page_offset, user_data, page_length)) {
855 remain -= page_length;
856 user_data += page_length;
857 offset += page_length;
861 intel_fb_obj_flush(obj, false);
863 i915_gem_object_ggtt_unpin(obj);
868 /* Per-page copy function for the shmem pwrite fastpath.
869 * Flushes invalid cachelines before writing to the target if
870 * needs_clflush_before is set and flushes out any written cachelines after
871 * writing if needs_clflush is set. */
873 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
874 char __user *user_data,
875 bool page_do_bit17_swizzling,
876 bool needs_clflush_before,
877 bool needs_clflush_after)
882 if (unlikely(page_do_bit17_swizzling))
885 vaddr = kmap_atomic(page);
886 if (needs_clflush_before)
887 drm_clflush_virt_range(vaddr + shmem_page_offset,
889 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
890 user_data, page_length);
891 if (needs_clflush_after)
892 drm_clflush_virt_range(vaddr + shmem_page_offset,
894 kunmap_atomic(vaddr);
896 return ret ? -EFAULT : 0;
899 /* Only difference to the fast-path function is that this can handle bit17
900 * and uses non-atomic copy and kmap functions. */
902 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
903 char __user *user_data,
904 bool page_do_bit17_swizzling,
905 bool needs_clflush_before,
906 bool needs_clflush_after)
912 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
913 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
915 page_do_bit17_swizzling);
916 if (page_do_bit17_swizzling)
917 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
921 ret = __copy_from_user(vaddr + shmem_page_offset,
924 if (needs_clflush_after)
925 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
927 page_do_bit17_swizzling);
930 return ret ? -EFAULT : 0;
934 i915_gem_shmem_pwrite(struct drm_device *dev,
935 struct drm_i915_gem_object *obj,
936 struct drm_i915_gem_pwrite *args,
937 struct drm_file *file)
941 char __user *user_data;
942 int shmem_page_offset, page_length, ret = 0;
943 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
944 int hit_slowpath = 0;
945 int needs_clflush_after = 0;
946 int needs_clflush_before = 0;
949 user_data = to_user_ptr(args->data_ptr);
952 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
954 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
955 /* If we're not in the cpu write domain, set ourself into the gtt
956 * write domain and manually flush cachelines (if required). This
957 * optimizes for the case when the gpu will use the data
958 * right away and we therefore have to clflush anyway. */
959 needs_clflush_after = cpu_write_needs_clflush(obj);
960 ret = i915_gem_object_wait_rendering(obj, false);
964 i915_gem_object_retire(obj);
966 /* Same trick applies to invalidate partially written cachelines read
968 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
969 needs_clflush_before =
970 !cpu_cache_is_coherent(dev, obj->cache_level);
972 ret = i915_gem_object_get_pages(obj);
976 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
978 i915_gem_object_pin_pages(obj);
980 offset = args->offset;
983 VM_OBJECT_LOCK(obj->base.vm_obj);
984 vm_object_pip_add(obj->base.vm_obj, 1);
985 for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
986 struct vm_page *page = obj->pages[i];
987 int partial_cacheline_write;
989 if (i < offset >> PAGE_SHIFT)
995 /* Operation in this page
997 * shmem_page_offset = offset within page in shmem file
998 * page_length = bytes to copy for this page
1000 shmem_page_offset = offset_in_page(offset);
1002 page_length = remain;
1003 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1004 page_length = PAGE_SIZE - shmem_page_offset;
1006 /* If we don't overwrite a cacheline completely we need to be
1007 * careful to have up-to-date data by first clflushing. Don't
1008 * overcomplicate things and flush the entire patch. */
1009 partial_cacheline_write = needs_clflush_before &&
1010 ((shmem_page_offset | page_length)
1011 & (cpu_clflush_line_size - 1));
1013 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1014 (page_to_phys(page) & (1 << 17)) != 0;
1016 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1017 user_data, page_do_bit17_swizzling,
1018 partial_cacheline_write,
1019 needs_clflush_after);
1024 mutex_unlock(&dev->struct_mutex);
1025 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1026 user_data, page_do_bit17_swizzling,
1027 partial_cacheline_write,
1028 needs_clflush_after);
1030 mutex_lock(&dev->struct_mutex);
1036 remain -= page_length;
1037 user_data += page_length;
1038 offset += page_length;
1040 vm_object_pip_wakeup(obj->base.vm_obj);
1041 VM_OBJECT_UNLOCK(obj->base.vm_obj);
1044 i915_gem_object_unpin_pages(obj);
1048 * Fixup: Flush cpu caches in case we didn't flush the dirty
1049 * cachelines in-line while writing and the object moved
1050 * out of the cpu write domain while we've dropped the lock.
1052 if (!needs_clflush_after &&
1053 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1054 if (i915_gem_clflush_object(obj, obj->pin_display))
1055 i915_gem_chipset_flush(dev);
1059 if (needs_clflush_after)
1060 i915_gem_chipset_flush(dev);
1062 intel_fb_obj_flush(obj, false);
1067 * Writes data to the object referenced by handle.
1069 * On error, the contents of the buffer that were to be modified are undefined.
1072 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1073 struct drm_file *file)
1075 struct drm_i915_private *dev_priv = dev->dev_private;
1076 struct drm_i915_gem_pwrite *args = data;
1077 struct drm_i915_gem_object *obj;
1080 if (args->size == 0)
1083 if (likely(!i915.prefault_disable)) {
1084 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1090 intel_runtime_pm_get(dev_priv);
1092 ret = i915_mutex_lock_interruptible(dev);
1096 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1097 if (&obj->base == NULL) {
1102 /* Bounds check destination. */
1103 if (args->offset > obj->base.size ||
1104 args->size > obj->base.size - args->offset) {
1109 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1112 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1113 * it would end up going through the fenced access, and we'll get
1114 * different detiling behavior between reading and writing.
1115 * pread/pwrite currently are reading and writing from the CPU
1116 * perspective, requiring manual detiling by the client.
1119 if (obj->tiling_mode == I915_TILING_NONE &&
1120 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1121 cpu_write_needs_clflush(obj)) {
1122 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1123 /* Note that the gtt paths might fail with non-page-backed user
1124 * pointers (e.g. gtt mappings when moving data between
1125 * textures). Fallback to the shmem path in that case. */
1128 if (ret == -EFAULT || ret == -ENOSPC) {
1129 if (obj->phys_handle)
1130 ret = i915_gem_phys_pwrite(obj, args, file);
1132 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1136 drm_gem_object_unreference(&obj->base);
1138 mutex_unlock(&dev->struct_mutex);
1140 intel_runtime_pm_put(dev_priv);
1146 i915_gem_check_wedge(struct i915_gpu_error *error,
1149 if (i915_reset_in_progress(error)) {
1150 /* Non-interruptible callers can't handle -EAGAIN, hence return
1151 * -EIO unconditionally for these. */
1155 /* Recovery complete, but the reset failed ... */
1156 if (i915_terminally_wedged(error))
1160 * Check if GPU Reset is in progress - we need intel_ring_begin
1161 * to work properly to reinit the hw state while the gpu is
1162 * still marked as reset-in-progress. Handle this with a flag.
1164 if (!error->reload_in_reset)
1172 * Compare arbitrary request against outstanding lazy request. Emit on match.
1175 i915_gem_check_olr(struct drm_i915_gem_request *req)
1179 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
1182 if (req == req->ring->outstanding_lazy_request)
1183 ret = i915_add_request(req->ring);
1189 static void fake_irq(unsigned long data)
1191 wake_up_process((struct task_struct *)data);
1194 static bool missed_irq(struct drm_i915_private *dev_priv,
1195 struct intel_engine_cs *ring)
1197 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1201 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1203 if (file_priv == NULL)
1206 return !atomic_xchg(&file_priv->rps_wait_boost, true);
1210 * __i915_wait_request - wait until execution of request has finished
1212 * @reset_counter: reset sequence associated with the given request
1213 * @interruptible: do an interruptible wait (normally yes)
1214 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1216 * Note: It is of utmost importance that the passed in seqno and reset_counter
1217 * values have been read by the caller in an smp safe manner. Where read-side
1218 * locks are involved, it is sufficient to read the reset_counter before
1219 * unlocking the lock that protects the seqno. For lockless tricks, the
1220 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1223 * Returns 0 if the request was found within the alloted time. Else returns the
1224 * errno with remaining time filled in timeout argument.
1226 int __i915_wait_request(struct drm_i915_gem_request *req,
1227 unsigned reset_counter,
1230 struct drm_i915_file_private *file_priv)
1232 struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1233 struct drm_device *dev = ring->dev;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1237 unsigned long timeout_expire;
1239 bool wait_forever = true;
1243 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1245 if (i915_gem_request_completed(req, true))
1248 if (timeout != NULL)
1249 wait_forever = false;
1251 timeout_expire = timeout ?
1252 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1254 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1255 gen6_rps_boost(dev_priv);
1257 mod_delayed_work(dev_priv->wq,
1258 &file_priv->mm.idle_work,
1259 msecs_to_jiffies(100));
1262 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1265 /* Record current time in case interrupted by signal, or wedged */
1266 trace_i915_gem_request_wait_begin(req);
1267 before = ktime_get_raw_ns();
1270 (i915_seqno_passed(ring->get_seqno(ring, false), i915_gem_request_get_seqno(req)) || \
1271 i915_reset_in_progress(&dev_priv->gpu_error) || \
1272 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1275 end = wait_event_interruptible_timeout(ring->irq_queue,
1279 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1282 /* We need to check whether any gpu reset happened in between
1283 * the caller grabbing the seqno and now ... */
1284 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1287 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1289 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1292 } while (end == 0 && wait_forever);
1294 now = ktime_get_raw_ns();
1295 trace_i915_gem_request_wait_end(req);
1297 ring->irq_put(ring);
1301 s64 tres = *timeout - (now - before);
1303 *timeout = tres < 0 ? 0 : tres;
1308 case -EAGAIN: /* Wedged */
1309 case -ERESTARTSYS: /* Signal */
1311 case 0: /* Timeout */
1312 return -ETIMEDOUT; /* -ETIME on Linux */
1313 default: /* Completed */
1314 WARN_ON(end < 0); /* We're not aware of other errors */
1320 * Waits for a request to be signaled, and cleans up the
1321 * request and object lists appropriately for that event.
1324 i915_wait_request(struct drm_i915_gem_request *req)
1326 struct drm_device *dev;
1327 struct drm_i915_private *dev_priv;
1329 unsigned reset_counter;
1332 BUG_ON(req == NULL);
1334 dev = req->ring->dev;
1335 dev_priv = dev->dev_private;
1336 interruptible = dev_priv->mm.interruptible;
1338 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1340 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1344 ret = i915_gem_check_olr(req);
1348 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1349 i915_gem_request_reference(req);
1350 ret = __i915_wait_request(req, reset_counter,
1351 interruptible, NULL, NULL);
1352 i915_gem_request_unreference(req);
1357 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1362 /* Manually manage the write flush as we may have not yet
1363 * retired the buffer.
1365 * Note that the last_write_req is always the earlier of
1366 * the two (read/write) requests, so if we haved successfully waited,
1367 * we know we have passed the last write.
1369 i915_gem_request_assign(&obj->last_write_req, NULL);
1375 * Ensures that all rendering to the object has completed and the object is
1376 * safe to unbind from the GTT or access from the CPU.
1378 static __must_check int
1379 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1382 struct drm_i915_gem_request *req;
1385 req = readonly ? obj->last_write_req : obj->last_read_req;
1389 ret = i915_wait_request(req);
1393 return i915_gem_object_wait_rendering__tail(obj);
1396 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1397 * as the object state may change during this call.
1399 static __must_check int
1400 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1401 struct drm_i915_file_private *file_priv,
1404 struct drm_i915_gem_request *req;
1405 struct drm_device *dev = obj->base.dev;
1406 struct drm_i915_private *dev_priv = dev->dev_private;
1407 unsigned reset_counter;
1410 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1411 BUG_ON(!dev_priv->mm.interruptible);
1413 req = readonly ? obj->last_write_req : obj->last_read_req;
1417 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1421 ret = i915_gem_check_olr(req);
1425 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1426 i915_gem_request_reference(req);
1427 mutex_unlock(&dev->struct_mutex);
1428 ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
1429 mutex_lock(&dev->struct_mutex);
1430 i915_gem_request_unreference(req);
1434 return i915_gem_object_wait_rendering__tail(obj);
1438 * Called when user space prepares to use an object with the CPU, either
1439 * through the mmap ioctl's mapping or a GTT mapping.
1442 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1443 struct drm_file *file)
1445 struct drm_i915_gem_set_domain *args = data;
1446 struct drm_i915_gem_object *obj;
1447 uint32_t read_domains = args->read_domains;
1448 uint32_t write_domain = args->write_domain;
1451 /* Only handle setting domains to types used by the CPU. */
1452 if (write_domain & I915_GEM_GPU_DOMAINS)
1455 if (read_domains & I915_GEM_GPU_DOMAINS)
1458 /* Having something in the write domain implies it's in the read
1459 * domain, and only that read domain. Enforce that in the request.
1461 if (write_domain != 0 && read_domains != write_domain)
1464 ret = i915_mutex_lock_interruptible(dev);
1468 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1469 if (&obj->base == NULL) {
1474 /* Try to flush the object off the GPU without holding the lock.
1475 * We will repeat the flush holding the lock in the normal manner
1476 * to catch cases where we are gazumped.
1478 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1484 if (read_domains & I915_GEM_DOMAIN_GTT)
1485 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1487 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1490 drm_gem_object_unreference(&obj->base);
1492 mutex_unlock(&dev->struct_mutex);
1497 * Called when user space has done writes to this buffer
1500 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1501 struct drm_file *file)
1503 struct drm_i915_gem_sw_finish *args = data;
1504 struct drm_i915_gem_object *obj;
1507 ret = i915_mutex_lock_interruptible(dev);
1511 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1512 if (&obj->base == NULL) {
1517 /* Pinned buffers may be scanout, so flush the cache */
1518 if (obj->pin_display)
1519 i915_gem_object_flush_cpu_write_domain(obj);
1521 drm_gem_object_unreference(&obj->base);
1523 mutex_unlock(&dev->struct_mutex);
1528 * Maps the contents of an object, returning the address it is mapped
1531 * While the mapping holds a reference on the contents of the object, it doesn't
1532 * imply a ref on the object itself.
1536 * DRM driver writers who look a this function as an example for how to do GEM
1537 * mmap support, please don't implement mmap support like here. The modern way
1538 * to implement DRM mmap support is with an mmap offset ioctl (like
1539 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1540 * That way debug tooling like valgrind will understand what's going on, hiding
1541 * the mmap call in a driver private ioctl will break that. The i915 driver only
1542 * does cpu mmaps this way because we didn't know better.
1545 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1546 struct drm_file *file)
1548 struct drm_i915_gem_mmap *args = data;
1549 struct drm_gem_object *obj;
1551 struct proc *p = curproc;
1552 vm_map_t map = &p->p_vmspace->vm_map;
1556 obj = drm_gem_object_lookup(dev, file, args->handle);
1560 if (args->size == 0)
1563 size = round_page(args->size);
1564 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1570 * Call hint to ensure that NULL is not returned as a valid address
1571 * and to reduce vm_map traversals. XXX causes instability, use a
1572 * fixed low address as the start point instead to avoid the NULL
1578 * Use 256KB alignment. It is unclear why this matters for a
1579 * virtual address but it appears to fix a number of application/X
1580 * crashes and kms console switching is much faster.
1582 vm_object_hold(obj->vm_obj);
1583 vm_object_reference_locked(obj->vm_obj);
1584 vm_object_drop(obj->vm_obj);
1586 rv = vm_map_find(map, obj->vm_obj, NULL,
1587 args->offset, &addr, args->size,
1588 256 * 1024, /* align */
1590 VM_MAPTYPE_NORMAL, /* maptype */
1591 VM_PROT_READ | VM_PROT_WRITE, /* prot */
1592 VM_PROT_READ | VM_PROT_WRITE, /* max */
1593 MAP_SHARED /* cow */);
1594 if (rv != KERN_SUCCESS) {
1595 vm_object_deallocate(obj->vm_obj);
1596 error = -vm_mmap_to_errno(rv);
1598 args->addr_ptr = (uint64_t)addr;
1601 drm_gem_object_unreference(obj);
1606 * i915_gem_fault - fault a page into the GTT
1608 * vm_obj is locked on entry and expected to be locked on return.
1610 * The vm_pager has placemarked the object with an anonymous memory page
1611 * which we must replace atomically to avoid races against concurrent faults
1612 * on the same page. XXX we currently are unable to do this atomically.
1614 * If we are to return an error we should not touch the anonymous page,
1615 * the caller will deallocate it.
1617 * XXX Most GEM calls appear to be interruptable, but we can't hard loop
1618 * in that case. Release all resources and wait 1 tick before retrying.
1619 * This is a huge problem which needs to be fixed by getting rid of most
1620 * of the interruptability. The linux code does not retry but does appear
1621 * to have some sort of mechanism (VM_FAULT_NOPAGE ?) for the higher level
1622 * to be able to retry.
1626 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1627 * from userspace. The fault handler takes care of binding the object to
1628 * the GTT (if needed), allocating and programming a fence register (again,
1629 * only if needed based on whether the old reg is still valid or the object
1630 * is tiled) and inserting a new PTE into the faulting process.
1632 * Note that the faulting process may involve evicting existing objects
1633 * from the GTT and/or fence registers to make room. So performance may
1634 * suffer if the GTT working set is large or there are few fence registers
1637 * vm_obj is locked on entry and expected to be locked on return. The VM
1638 * pager has placed an anonymous memory page at (obj,offset) which we have
1641 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres)
1643 struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle);
1644 struct drm_device *dev = obj->base.dev;
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1646 unsigned long page_offset;
1647 vm_page_t m, oldm = NULL;
1650 bool write = !!(prot & VM_PROT_WRITE);
1652 intel_runtime_pm_get(dev_priv);
1654 /* We don't use vmf->pgoff since that has the fake offset */
1655 page_offset = (unsigned long)offset;
1658 ret = i915_mutex_lock_interruptible(dev);
1662 trace_i915_gem_object_fault(obj, page_offset, true, write);
1664 /* Try to flush the object off the GPU first without holding the lock.
1665 * Upon reacquiring the lock, we will perform our sanity checks and then
1666 * repeat the flush holding the lock in the normal manner to catch cases
1667 * where we are gazumped.
1669 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1673 /* Access to snoopable pages through the GTT is incoherent. */
1674 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1679 /* Now bind it into the GTT if needed */
1680 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1684 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1688 ret = i915_gem_object_get_fence(obj);
1693 * START FREEBSD MAGIC
1695 * Add a pip count to avoid destruction and certain other
1696 * complex operations (such as collapses?) while unlocked.
1699 vm_object_pip_add(vm_obj, 1);
1704 * XXX We must currently remove the placeholder page now to avoid
1705 * a deadlock against a concurrent i915_gem_release_mmap().
1706 * Otherwise concurrent operation will block on the busy page
1707 * while holding locks which we need to obtain.
1709 if (*mres != NULL) {
1711 if ((oldm->flags & PG_BUSY) == 0)
1712 kprintf("i915_gem_fault: Page was not busy\n");
1714 vm_page_remove(oldm);
1724 * Since the object lock was dropped, another thread might have
1725 * faulted on the same GTT address and instantiated the mapping.
1728 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1731 * Try to busy the page, retry on failure (non-zero ret).
1733 if (vm_page_busy_try(m, false)) {
1734 kprintf("i915_gem_fault: PG_BUSY\n");
1744 obj->fault_mappable = true;
1746 m = vm_phys_fictitious_to_vm_page(dev_priv->gtt.mappable_base +
1747 i915_gem_obj_ggtt_offset(obj) +
1753 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m));
1754 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1757 * Try to busy the page. Fails on non-zero return.
1759 if (vm_page_busy_try(m, false)) {
1760 kprintf("i915_gem_fault: PG_BUSY(2)\n");
1764 m->valid = VM_PAGE_BITS_ALL;
1767 * Finally, remap it using the new GTT offset.
1769 * (object expected to be in a locked state)
1771 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1775 i915_gem_object_ggtt_unpin(obj);
1776 mutex_unlock(&dev->struct_mutex);
1781 * ALTERNATIVE ERROR RETURN.
1783 * OBJECT EXPECTED TO BE LOCKED.
1786 i915_gem_object_ggtt_unpin(obj);
1788 mutex_unlock(&dev->struct_mutex);
1793 * We eat errors when the gpu is terminally wedged to avoid
1794 * userspace unduly crashing (gl has no provisions for mmaps to
1795 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1796 * and so needs to be reported.
1798 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1799 // ret = VM_FAULT_SIGBUS;
1805 * EAGAIN means the gpu is hung and we'll wait for the error
1806 * handler to reset everything when re-faulting in
1807 * i915_mutex_lock_interruptible.
1812 VM_OBJECT_UNLOCK(vm_obj);
1814 tsleep(&dummy, 0, "delay", 1); /* XXX */
1815 VM_OBJECT_LOCK(vm_obj);
1818 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1819 ret = VM_PAGER_ERROR;
1827 vm_object_pip_wakeup(vm_obj);
1829 intel_runtime_pm_put(dev_priv);
1834 * i915_gem_release_mmap - remove physical page mappings
1835 * @obj: obj in question
1837 * Preserve the reservation of the mmapping with the DRM core code, but
1838 * relinquish ownership of the pages back to the system.
1840 * It is vital that we remove the page mapping if we have mapped a tiled
1841 * object through the GTT and then lose the fence register due to
1842 * resource pressure. Similarly if the object has been moved out of the
1843 * aperture, than pages mapped into userspace must be revoked. Removing the
1844 * mapping will then trigger a page fault on the next user access, allowing
1845 * fixup by i915_gem_fault().
1848 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1854 if (!obj->fault_mappable)
1857 devobj = cdev_pager_lookup(obj);
1858 if (devobj != NULL) {
1859 page_count = OFF_TO_IDX(obj->base.size);
1861 VM_OBJECT_LOCK(devobj);
1862 for (i = 0; i < page_count; i++) {
1863 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1866 cdev_pager_free_page(devobj, m);
1868 VM_OBJECT_UNLOCK(devobj);
1869 vm_object_deallocate(devobj);
1872 obj->fault_mappable = false;
1876 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1878 struct drm_i915_gem_object *obj;
1880 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1881 i915_gem_release_mmap(obj);
1885 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1889 if (INTEL_INFO(dev)->gen >= 4 ||
1890 tiling_mode == I915_TILING_NONE)
1893 /* Previous chips need a power-of-two fence region when tiling */
1894 if (INTEL_INFO(dev)->gen == 3)
1895 gtt_size = 1024*1024;
1897 gtt_size = 512*1024;
1899 while (gtt_size < size)
1906 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1907 * @obj: object to check
1909 * Return the required GTT alignment for an object, taking into account
1910 * potential fence register mapping.
1913 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1914 int tiling_mode, bool fenced)
1917 * Minimum alignment is 4k (GTT page size), but might be greater
1918 * if a fence register is needed for the object.
1920 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1921 tiling_mode == I915_TILING_NONE)
1925 * Previous chips need to be aligned to the size of the smallest
1926 * fence register that can contain the object.
1928 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1931 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1933 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1937 if (drm_vma_node_has_offset(&obj->base.vma_node))
1941 dev_priv->mm.shrinker_no_lock_stealing = true;
1943 ret = drm_gem_create_mmap_offset(&obj->base);
1947 /* Badly fragmented mmap space? The only way we can recover
1948 * space is by destroying unwanted objects. We can't randomly release
1949 * mmap_offsets as userspace expects them to be persistent for the
1950 * lifetime of the objects. The closest we can is to release the
1951 * offsets on purgeable objects by truncating it and marking it purged,
1952 * which prevents userspace from ever using that object again.
1954 i915_gem_shrink(dev_priv,
1955 obj->base.size >> PAGE_SHIFT,
1957 I915_SHRINK_UNBOUND |
1958 I915_SHRINK_PURGEABLE);
1959 ret = drm_gem_create_mmap_offset(&obj->base);
1963 i915_gem_shrink_all(dev_priv);
1964 ret = drm_gem_create_mmap_offset(&obj->base);
1966 dev_priv->mm.shrinker_no_lock_stealing = false;
1971 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1973 drm_gem_free_mmap_offset(&obj->base);
1977 i915_gem_mmap_gtt(struct drm_file *file,
1978 struct drm_device *dev,
1982 struct drm_i915_private *dev_priv = dev->dev_private;
1983 struct drm_i915_gem_object *obj;
1986 ret = i915_mutex_lock_interruptible(dev);
1990 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1991 if (&obj->base == NULL) {
1996 if (obj->base.size > dev_priv->gtt.mappable_end) {
2001 if (obj->madv != I915_MADV_WILLNEED) {
2002 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2007 ret = i915_gem_object_create_mmap_offset(obj);
2011 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
2012 DRM_GEM_MAPPING_KEY;
2015 drm_gem_object_unreference(&obj->base);
2017 mutex_unlock(&dev->struct_mutex);
2022 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2024 * @data: GTT mapping ioctl data
2025 * @file: GEM object info
2027 * Simply returns the fake offset to userspace so it can mmap it.
2028 * The mmap call will end up in drm_gem_mmap(), which will set things
2029 * up so we can get faults in the handler above.
2031 * The fault handler will take care of binding the object into the GTT
2032 * (since it may have been evicted to make room for something), allocating
2033 * a fence register, and mapping the appropriate aperture address into
2037 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2038 struct drm_file *file)
2040 struct drm_i915_gem_mmap_gtt *args = data;
2042 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2045 /* Immediately discard the backing storage */
2047 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2051 vm_obj = obj->base.vm_obj;
2052 VM_OBJECT_LOCK(vm_obj);
2053 vm_object_page_remove(vm_obj, 0, 0, false);
2054 VM_OBJECT_UNLOCK(vm_obj);
2056 obj->madv = __I915_MADV_PURGED;
2059 /* Try to discard unwanted pages */
2061 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2064 struct address_space *mapping;
2067 switch (obj->madv) {
2068 case I915_MADV_DONTNEED:
2069 i915_gem_object_truncate(obj);
2070 case __I915_MADV_PURGED:
2075 if (obj->base.filp == NULL)
2078 mapping = file_inode(obj->base.filp)->i_mapping,
2079 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2084 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2086 int page_count = obj->base.size / PAGE_SIZE;
2092 BUG_ON(obj->madv == __I915_MADV_PURGED);
2094 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2096 /* In the event of a disaster, abandon all caches and
2097 * hope for the best.
2099 WARN_ON(ret != -EIO);
2100 i915_gem_clflush_object(obj, true);
2101 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2104 if (i915_gem_object_needs_bit17_swizzle(obj))
2105 i915_gem_object_save_bit_17_swizzle(obj);
2107 if (obj->madv == I915_MADV_DONTNEED)
2110 for (i = 0; i < page_count; i++) {
2111 struct vm_page *page = obj->pages[i];
2114 set_page_dirty(page);
2116 if (obj->madv == I915_MADV_WILLNEED)
2117 mark_page_accessed(page);
2119 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
2120 vm_page_unwire(obj->pages[i], 1);
2121 vm_page_wakeup(obj->pages[i]);
2130 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2132 const struct drm_i915_gem_object_ops *ops = obj->ops;
2134 if (obj->pages == NULL)
2137 if (obj->pages_pin_count)
2140 BUG_ON(i915_gem_obj_bound_any(obj));
2142 /* ->put_pages might need to allocate memory for the bit17 swizzle
2143 * array, hence protect them from being reaped by removing them from gtt
2145 list_del(&obj->global_list);
2147 ops->put_pages(obj);
2150 i915_gem_object_invalidate(obj);
2156 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2158 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2159 int page_count, i, j;
2161 struct vm_page *page;
2163 /* Assert that the object is not currently in any GPU domain. As it
2164 * wasn't in the GTT, there shouldn't be any way it could have been in
2167 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2168 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2170 page_count = obj->base.size / PAGE_SIZE;
2171 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
2174 /* Get the list of pages out of our struct file. They'll be pinned
2175 * at this point until we release them.
2177 * Fail silently without starting the shrinker
2179 vm_obj = obj->base.vm_obj;
2180 VM_OBJECT_LOCK(vm_obj);
2181 for (i = 0; i < page_count; i++) {
2182 page = shmem_read_mapping_page(vm_obj, i);
2184 i915_gem_shrink(dev_priv,
2187 I915_SHRINK_UNBOUND |
2188 I915_SHRINK_PURGEABLE);
2189 page = shmem_read_mapping_page(vm_obj, i);
2192 /* We've tried hard to allocate the memory by reaping
2193 * our own buffer, now let the real VM do its job and
2194 * go down in flames if truly OOM.
2197 i915_gem_shrink_all(dev_priv);
2198 page = shmem_read_mapping_page(vm_obj, i);
2202 #ifdef CONFIG_SWIOTLB
2203 if (swiotlb_nr_tbl()) {
2205 sg_set_page(sg, page, PAGE_SIZE, 0);
2210 obj->pages[i] = page;
2212 #ifdef CONFIG_SWIOTLB
2213 if (!swiotlb_nr_tbl())
2215 VM_OBJECT_UNLOCK(vm_obj);
2217 if (i915_gem_object_needs_bit17_swizzle(obj))
2218 i915_gem_object_do_bit_17_swizzle(obj);
2220 if (obj->tiling_mode != I915_TILING_NONE &&
2221 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2222 i915_gem_object_pin_pages(obj);
2227 for (j = 0; j < i; j++) {
2228 page = obj->pages[j];
2229 vm_page_busy_wait(page, FALSE, "i915gem");
2230 vm_page_unwire(page, 0);
2231 vm_page_wakeup(page);
2233 VM_OBJECT_UNLOCK(vm_obj);
2239 /* Ensure that the associated pages are gathered from the backing storage
2240 * and pinned into our object. i915_gem_object_get_pages() may be called
2241 * multiple times before they are released by a single call to
2242 * i915_gem_object_put_pages() - once the pages are no longer referenced
2243 * either as a result of memory pressure (reaping pages under the shrinker)
2244 * or as the object is itself released.
2247 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2249 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2250 const struct drm_i915_gem_object_ops *ops = obj->ops;
2256 if (obj->madv != I915_MADV_WILLNEED) {
2257 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2261 BUG_ON(obj->pages_pin_count);
2263 ret = ops->get_pages(obj);
2267 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2272 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2273 struct intel_engine_cs *ring)
2275 struct drm_i915_gem_request *req;
2276 struct intel_engine_cs *old_ring;
2278 BUG_ON(ring == NULL);
2280 req = intel_ring_get_request(ring);
2281 old_ring = i915_gem_request_get_ring(obj->last_read_req);
2283 if (old_ring != ring && obj->last_write_req) {
2284 /* Keep the request relative to the current ring */
2285 i915_gem_request_assign(&obj->last_write_req, req);
2288 /* Add a reference if we're newly entering the active list. */
2290 drm_gem_object_reference(&obj->base);
2294 list_move_tail(&obj->ring_list, &ring->active_list);
2296 i915_gem_request_assign(&obj->last_read_req, req);
2299 void i915_vma_move_to_active(struct i915_vma *vma,
2300 struct intel_engine_cs *ring)
2302 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2303 return i915_gem_object_move_to_active(vma->obj, ring);
2307 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2309 struct i915_vma *vma;
2311 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2312 BUG_ON(!obj->active);
2314 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2315 if (!list_empty(&vma->mm_list))
2316 list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
2319 intel_fb_obj_flush(obj, true);
2321 list_del_init(&obj->ring_list);
2323 i915_gem_request_assign(&obj->last_read_req, NULL);
2324 i915_gem_request_assign(&obj->last_write_req, NULL);
2325 obj->base.write_domain = 0;
2327 i915_gem_request_assign(&obj->last_fenced_req, NULL);
2330 drm_gem_object_unreference(&obj->base);
2332 WARN_ON(i915_verify_lists(dev));
2336 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2338 if (obj->last_read_req == NULL)
2341 if (i915_gem_request_completed(obj->last_read_req, true))
2342 i915_gem_object_move_to_inactive(obj);
2346 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2348 struct drm_i915_private *dev_priv = dev->dev_private;
2349 struct intel_engine_cs *ring;
2352 /* Carefully retire all requests without writing to the rings */
2353 for_each_ring(ring, dev_priv, i) {
2354 ret = intel_ring_idle(ring);
2358 i915_gem_retire_requests(dev);
2360 /* Finally reset hw state */
2361 for_each_ring(ring, dev_priv, i) {
2362 intel_ring_init_seqno(ring, seqno);
2364 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2365 ring->semaphore.sync_seqno[j] = 0;
2371 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2373 struct drm_i915_private *dev_priv = dev->dev_private;
2379 /* HWS page needs to be set less than what we
2380 * will inject to ring
2382 ret = i915_gem_init_seqno(dev, seqno - 1);
2386 /* Carefully set the last_seqno value so that wrap
2387 * detection still works
2389 dev_priv->next_seqno = seqno;
2390 dev_priv->last_seqno = seqno - 1;
2391 if (dev_priv->last_seqno == 0)
2392 dev_priv->last_seqno--;
2398 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2400 struct drm_i915_private *dev_priv = dev->dev_private;
2402 /* reserve 0 for non-seqno */
2403 if (dev_priv->next_seqno == 0) {
2404 int ret = i915_gem_init_seqno(dev, 0);
2408 dev_priv->next_seqno = 1;
2411 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2415 int __i915_add_request(struct intel_engine_cs *ring,
2416 struct drm_file *file,
2417 struct drm_i915_gem_object *obj)
2419 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2420 struct drm_i915_gem_request *request;
2421 struct intel_ringbuffer *ringbuf;
2425 request = ring->outstanding_lazy_request;
2426 if (WARN_ON(request == NULL))
2429 if (i915.enable_execlists) {
2430 ringbuf = request->ctx->engine[ring->id].ringbuf;
2432 ringbuf = ring->buffer;
2434 request_start = intel_ring_get_tail(ringbuf);
2436 * Emit any outstanding flushes - execbuf can fail to emit the flush
2437 * after having emitted the batchbuffer command. Hence we need to fix
2438 * things up similar to emitting the lazy request. The difference here
2439 * is that the flush _must_ happen before the next request, no matter
2442 if (i915.enable_execlists) {
2443 ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
2447 ret = intel_ring_flush_all_caches(ring);
2452 /* Record the position of the start of the request so that
2453 * should we detect the updated seqno part-way through the
2454 * GPU processing the request, we never over-estimate the
2455 * position of the head.
2457 request->postfix = intel_ring_get_tail(ringbuf);
2459 if (i915.enable_execlists) {
2460 ret = ring->emit_request(ringbuf, request);
2464 ret = ring->add_request(ring);
2468 request->tail = intel_ring_get_tail(ringbuf);
2471 request->head = request_start;
2473 /* Whilst this request exists, batch_obj will be on the
2474 * active_list, and so will hold the active reference. Only when this
2475 * request is retired will the the batch_obj be moved onto the
2476 * inactive_list and lose its active reference. Hence we do not need
2477 * to explicitly hold another reference here.
2479 request->batch_obj = obj;
2481 if (!i915.enable_execlists) {
2482 /* Hold a reference to the current context so that we can inspect
2483 * it later in case a hangcheck error event fires.
2485 request->ctx = ring->last_context;
2487 i915_gem_context_reference(request->ctx);
2490 request->emitted_jiffies = jiffies;
2491 list_add_tail(&request->list, &ring->request_list);
2492 request->file_priv = NULL;
2495 struct drm_i915_file_private *file_priv = file->driver_priv;
2497 spin_lock(&file_priv->mm.lock);
2498 request->file_priv = file_priv;
2499 list_add_tail(&request->client_list,
2500 &file_priv->mm.request_list);
2501 spin_unlock(&file_priv->mm.lock);
2503 request->pid = curproc->p_pid;
2506 trace_i915_gem_request_add(request);
2507 ring->outstanding_lazy_request = NULL;
2509 i915_queue_hangcheck(ring->dev);
2511 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2512 queue_delayed_work(dev_priv->wq,
2513 &dev_priv->mm.retire_work,
2514 round_jiffies_up_relative(HZ));
2515 intel_mark_busy(dev_priv->dev);
2521 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2523 struct drm_i915_file_private *file_priv = request->file_priv;
2528 spin_lock(&file_priv->mm.lock);
2529 list_del(&request->client_list);
2530 request->file_priv = NULL;
2531 spin_unlock(&file_priv->mm.lock);
2534 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2535 const struct intel_context *ctx)
2537 unsigned long elapsed;
2539 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2541 if (ctx->hang_stats.banned)
2544 if (ctx->hang_stats.ban_period_seconds &&
2545 elapsed <= ctx->hang_stats.ban_period_seconds) {
2546 if (!i915_gem_context_is_default(ctx)) {
2547 DRM_DEBUG("context hanging too fast, banning!\n");
2549 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2550 if (i915_stop_ring_allow_warn(dev_priv))
2551 DRM_ERROR("gpu hanging too fast, banning!\n");
2559 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2560 struct intel_context *ctx,
2563 struct i915_ctx_hang_stats *hs;
2568 hs = &ctx->hang_stats;
2571 hs->banned = i915_context_is_banned(dev_priv, ctx);
2573 hs->guilty_ts = get_seconds();
2575 hs->batch_pending++;
2579 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2581 list_del(&request->list);
2582 i915_gem_request_remove_from_client(request);
2585 put_pid(request->pid);
2588 i915_gem_request_unreference(request);
2591 void i915_gem_request_free(struct kref *req_ref)
2593 struct drm_i915_gem_request *req = container_of(req_ref,
2595 struct intel_context *ctx = req->ctx;
2598 if (i915.enable_execlists) {
2599 struct intel_engine_cs *ring = req->ring;
2601 if (ctx != ring->default_context)
2602 intel_lr_context_unpin(ring, ctx);
2605 i915_gem_context_unreference(ctx);
2611 struct drm_i915_gem_request *
2612 i915_gem_find_active_request(struct intel_engine_cs *ring)
2614 struct drm_i915_gem_request *request;
2616 list_for_each_entry(request, &ring->request_list, list) {
2617 if (i915_gem_request_completed(request, false))
2626 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2627 struct intel_engine_cs *ring)
2629 struct drm_i915_gem_request *request;
2632 request = i915_gem_find_active_request(ring);
2634 if (request == NULL)
2637 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2639 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2641 list_for_each_entry_continue(request, &ring->request_list, list)
2642 i915_set_reset_status(dev_priv, request->ctx, false);
2645 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2646 struct intel_engine_cs *ring)
2648 while (!list_empty(&ring->active_list)) {
2649 struct drm_i915_gem_object *obj;
2651 obj = list_first_entry(&ring->active_list,
2652 struct drm_i915_gem_object,
2655 i915_gem_object_move_to_inactive(obj);
2659 * Clear the execlists queue up before freeing the requests, as those
2660 * are the ones that keep the context and ringbuffer backing objects
2663 while (!list_empty(&ring->execlist_queue)) {
2664 struct drm_i915_gem_request *submit_req;
2666 submit_req = list_first_entry(&ring->execlist_queue,
2667 struct drm_i915_gem_request,
2669 list_del(&submit_req->execlist_link);
2670 intel_runtime_pm_put(dev_priv);
2672 if (submit_req->ctx != ring->default_context)
2673 intel_lr_context_unpin(ring, submit_req->ctx);
2675 i915_gem_request_unreference(submit_req);
2679 * We must free the requests after all the corresponding objects have
2680 * been moved off active lists. Which is the same order as the normal
2681 * retire_requests function does. This is important if object hold
2682 * implicit references on things like e.g. ppgtt address spaces through
2685 while (!list_empty(&ring->request_list)) {
2686 struct drm_i915_gem_request *request;
2688 request = list_first_entry(&ring->request_list,
2689 struct drm_i915_gem_request,
2692 i915_gem_free_request(request);
2695 /* This may not have been flushed before the reset, so clean it now */
2696 i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
2699 void i915_gem_restore_fences(struct drm_device *dev)
2701 struct drm_i915_private *dev_priv = dev->dev_private;
2704 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2705 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2708 * Commit delayed tiling changes if we have an object still
2709 * attached to the fence, otherwise just clear the fence.
2712 i915_gem_object_update_fence(reg->obj, reg,
2713 reg->obj->tiling_mode);
2715 i915_gem_write_fence(dev, i, NULL);
2720 void i915_gem_reset(struct drm_device *dev)
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 struct intel_engine_cs *ring;
2727 * Before we free the objects from the requests, we need to inspect
2728 * them for finding the guilty party. As the requests only borrow
2729 * their reference to the objects, the inspection must be done first.
2731 for_each_ring(ring, dev_priv, i)
2732 i915_gem_reset_ring_status(dev_priv, ring);
2734 for_each_ring(ring, dev_priv, i)
2735 i915_gem_reset_ring_cleanup(dev_priv, ring);
2737 i915_gem_context_reset(dev);
2739 i915_gem_restore_fences(dev);
2743 * This function clears the request list as sequence numbers are passed.
2746 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2748 if (list_empty(&ring->request_list))
2751 WARN_ON(i915_verify_lists(ring->dev));
2753 /* Retire requests first as we use it above for the early return.
2754 * If we retire requests last, we may use a later seqno and so clear
2755 * the requests lists without clearing the active list, leading to
2758 while (!list_empty(&ring->request_list)) {
2759 struct drm_i915_gem_request *request;
2761 request = list_first_entry(&ring->request_list,
2762 struct drm_i915_gem_request,
2765 if (!i915_gem_request_completed(request, true))
2768 trace_i915_gem_request_retire(request);
2770 /* We know the GPU must have read the request to have
2771 * sent us the seqno + interrupt, so use the position
2772 * of tail of the request to update the last known position
2775 request->ringbuf->last_retired_head = request->postfix;
2777 i915_gem_free_request(request);
2780 /* Move any buffers on the active list that are no longer referenced
2781 * by the ringbuffer to the flushing/inactive lists as appropriate,
2782 * before we free the context associated with the requests.
2784 while (!list_empty(&ring->active_list)) {
2785 struct drm_i915_gem_object *obj;
2787 obj = list_first_entry(&ring->active_list,
2788 struct drm_i915_gem_object,
2791 if (!i915_gem_request_completed(obj->last_read_req, true))
2794 i915_gem_object_move_to_inactive(obj);
2797 if (unlikely(ring->trace_irq_req &&
2798 i915_gem_request_completed(ring->trace_irq_req, true))) {
2799 ring->irq_put(ring);
2800 i915_gem_request_assign(&ring->trace_irq_req, NULL);
2803 WARN_ON(i915_verify_lists(ring->dev));
2807 i915_gem_retire_requests(struct drm_device *dev)
2809 struct drm_i915_private *dev_priv = dev->dev_private;
2810 struct intel_engine_cs *ring;
2814 for_each_ring(ring, dev_priv, i) {
2815 i915_gem_retire_requests_ring(ring);
2816 idle &= list_empty(&ring->request_list);
2817 if (i915.enable_execlists) {
2819 lockmgr(&ring->execlist_lock, LK_EXCLUSIVE);
2820 idle &= list_empty(&ring->execlist_queue);
2821 lockmgr(&ring->execlist_lock, LK_RELEASE);
2823 intel_execlists_retire_requests(ring);
2828 mod_delayed_work(dev_priv->wq,
2829 &dev_priv->mm.idle_work,
2830 msecs_to_jiffies(100));
2836 i915_gem_retire_work_handler(struct work_struct *work)
2838 struct drm_i915_private *dev_priv =
2839 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2840 struct drm_device *dev = dev_priv->dev;
2843 /* Come back later if the device is busy... */
2845 if (mutex_trylock(&dev->struct_mutex)) {
2846 idle = i915_gem_retire_requests(dev);
2847 mutex_unlock(&dev->struct_mutex);
2850 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2851 round_jiffies_up_relative(HZ));
2855 i915_gem_idle_work_handler(struct work_struct *work)
2857 struct drm_i915_private *dev_priv =
2858 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2860 intel_mark_idle(dev_priv->dev);
2864 * Ensures that an object will eventually get non-busy by flushing any required
2865 * write domains, emitting any outstanding lazy request and retiring and
2866 * completed requests.
2869 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2871 struct intel_engine_cs *ring;
2875 ring = i915_gem_request_get_ring(obj->last_read_req);
2877 ret = i915_gem_check_olr(obj->last_read_req);
2881 i915_gem_retire_requests_ring(ring);
2888 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2889 * @DRM_IOCTL_ARGS: standard ioctl arguments
2891 * Returns 0 if successful, else an error is returned with the remaining time in
2892 * the timeout parameter.
2893 * -ETIME: object is still busy after timeout
2894 * -ERESTARTSYS: signal interrupted the wait
2895 * -ENONENT: object doesn't exist
2896 * Also possible, but rare:
2897 * -EAGAIN: GPU wedged
2899 * -ENODEV: Internal IRQ fail
2900 * -E?: The add request failed
2902 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2903 * non-zero timeout parameter the wait ioctl will wait for the given number of
2904 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2905 * without holding struct_mutex the object may become re-busied before this
2906 * function completes. A similar but shorter * race condition exists in the busy
2910 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2912 struct drm_i915_private *dev_priv = dev->dev_private;
2913 struct drm_i915_gem_wait *args = data;
2914 struct drm_i915_gem_object *obj;
2915 struct drm_i915_gem_request *req;
2916 unsigned reset_counter;
2919 if (args->flags != 0)
2922 ret = i915_mutex_lock_interruptible(dev);
2926 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2927 if (&obj->base == NULL) {
2928 mutex_unlock(&dev->struct_mutex);
2932 /* Need to make sure the object gets inactive eventually. */
2933 ret = i915_gem_object_flush_active(obj);
2937 if (!obj->active || !obj->last_read_req)
2940 req = obj->last_read_req;
2942 /* Do this after OLR check to make sure we make forward progress polling
2943 * on this IOCTL with a timeout == 0 (like busy ioctl)
2945 if (args->timeout_ns == 0) {
2950 drm_gem_object_unreference(&obj->base);
2951 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2952 i915_gem_request_reference(req);
2953 mutex_unlock(&dev->struct_mutex);
2955 ret = __i915_wait_request(req, reset_counter, true,
2956 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
2958 mutex_lock(&dev->struct_mutex);
2959 i915_gem_request_unreference(req);
2960 mutex_unlock(&dev->struct_mutex);
2964 drm_gem_object_unreference(&obj->base);
2965 mutex_unlock(&dev->struct_mutex);
2970 * i915_gem_object_sync - sync an object to a ring.
2972 * @obj: object which may be in use on another ring.
2973 * @to: ring we wish to use the object on. May be NULL.
2975 * This code is meant to abstract object synchronization with the GPU.
2976 * Calling with NULL implies synchronizing the object with the CPU
2977 * rather than a particular GPU ring.
2979 * Returns 0 if successful, else propagates up the lower layer error.
2982 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2983 struct intel_engine_cs *to)
2985 struct intel_engine_cs *from;
2989 from = i915_gem_request_get_ring(obj->last_read_req);
2991 if (from == NULL || to == from)
2994 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2995 return i915_gem_object_wait_rendering(obj, false);
2997 idx = intel_ring_sync_index(from, to);
2999 seqno = i915_gem_request_get_seqno(obj->last_read_req);
3000 /* Optimization: Avoid semaphore sync when we are sure we already
3001 * waited for an object with higher seqno */
3002 if (seqno <= from->semaphore.sync_seqno[idx])
3005 ret = i915_gem_check_olr(obj->last_read_req);
3009 trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
3010 ret = to->semaphore.sync_to(to, from, seqno);
3012 /* We use last_read_req because sync_to()
3013 * might have just caused seqno wrap under
3016 from->semaphore.sync_seqno[idx] =
3017 i915_gem_request_get_seqno(obj->last_read_req);
3022 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3024 u32 old_write_domain, old_read_domains;
3026 /* Force a pagefault for domain tracking on next user access */
3027 i915_gem_release_mmap(obj);
3029 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3032 /* Wait for any direct GTT access to complete */
3035 old_read_domains = obj->base.read_domains;
3036 old_write_domain = obj->base.write_domain;
3038 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3039 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3041 trace_i915_gem_object_change_domain(obj,
3046 int i915_vma_unbind(struct i915_vma *vma)
3048 struct drm_i915_gem_object *obj = vma->obj;
3049 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3052 if (list_empty(&vma->vma_link))
3055 if (!drm_mm_node_allocated(&vma->node)) {
3056 i915_gem_vma_destroy(vma);
3063 BUG_ON(obj->pages == NULL);
3065 ret = i915_gem_object_finish_gpu(obj);
3068 /* Continue on if we fail due to EIO, the GPU is hung so we
3069 * should be safe and we need to cleanup or else we might
3070 * cause memory corruption through use-after-free.
3073 if (i915_is_ggtt(vma->vm) &&
3074 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3075 i915_gem_object_finish_gtt(obj);
3077 /* release the fence reg _after_ flushing */
3078 ret = i915_gem_object_put_fence(obj);
3083 trace_i915_vma_unbind(vma);
3085 vma->unbind_vma(vma);
3087 list_del_init(&vma->mm_list);
3088 if (i915_is_ggtt(vma->vm)) {
3089 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3090 obj->map_and_fenceable = false;
3091 } else if (vma->ggtt_view.pages) {
3092 kfree(vma->ggtt_view.pages);
3094 vma->ggtt_view.pages = NULL;
3097 drm_mm_remove_node(&vma->node);
3098 i915_gem_vma_destroy(vma);
3100 /* Since the unbound list is global, only move to that list if
3101 * no more VMAs exist. */
3102 if (list_empty(&obj->vma_list)) {
3103 /* Throw away the active reference before
3104 * moving to the unbound list. */
3105 i915_gem_object_retire(obj);
3107 i915_gem_gtt_finish_object(obj);
3108 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3111 /* And finally now the object is completely decoupled from this vma,
3112 * we can drop its hold on the backing storage and allow it to be
3113 * reaped by the shrinker.
3115 i915_gem_object_unpin_pages(obj);
3120 int i915_gpu_idle(struct drm_device *dev)
3122 struct drm_i915_private *dev_priv = dev->dev_private;
3123 struct intel_engine_cs *ring;
3126 /* Flush everything onto the inactive list. */
3127 for_each_ring(ring, dev_priv, i) {
3128 if (!i915.enable_execlists) {
3129 ret = i915_switch_context(ring, ring->default_context);
3134 ret = intel_ring_idle(ring);
3142 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3143 struct drm_i915_gem_object *obj)
3145 struct drm_i915_private *dev_priv = dev->dev_private;
3147 int fence_pitch_shift;
3149 if (INTEL_INFO(dev)->gen >= 6) {
3150 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3151 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3153 fence_reg = FENCE_REG_965_0;
3154 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3157 fence_reg += reg * 8;
3159 /* To w/a incoherency with non-atomic 64-bit register updates,
3160 * we split the 64-bit update into two 32-bit writes. In order
3161 * for a partial fence not to be evaluated between writes, we
3162 * precede the update with write to turn off the fence register,
3163 * and only enable the fence as the last step.
3165 * For extra levels of paranoia, we make sure each step lands
3166 * before applying the next step.
3168 I915_WRITE(fence_reg, 0);
3169 POSTING_READ(fence_reg);
3172 u32 size = i915_gem_obj_ggtt_size(obj);
3175 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3177 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3178 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3179 if (obj->tiling_mode == I915_TILING_Y)
3180 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3181 val |= I965_FENCE_REG_VALID;
3183 I915_WRITE(fence_reg + 4, val >> 32);
3184 POSTING_READ(fence_reg + 4);
3186 I915_WRITE(fence_reg + 0, val);
3187 POSTING_READ(fence_reg);
3189 I915_WRITE(fence_reg + 4, 0);
3190 POSTING_READ(fence_reg + 4);
3194 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3195 struct drm_i915_gem_object *obj)
3197 struct drm_i915_private *dev_priv = dev->dev_private;
3201 u32 size = i915_gem_obj_ggtt_size(obj);
3205 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3206 (size & -size) != size ||
3207 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3208 "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3209 i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3211 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3216 /* Note: pitch better be a power of two tile widths */
3217 pitch_val = obj->stride / tile_width;
3218 pitch_val = ffs(pitch_val) - 1;
3220 val = i915_gem_obj_ggtt_offset(obj);
3221 if (obj->tiling_mode == I915_TILING_Y)
3222 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3223 val |= I915_FENCE_SIZE_BITS(size);
3224 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3225 val |= I830_FENCE_REG_VALID;
3230 reg = FENCE_REG_830_0 + reg * 4;
3232 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3234 I915_WRITE(reg, val);
3238 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3239 struct drm_i915_gem_object *obj)
3241 struct drm_i915_private *dev_priv = dev->dev_private;
3245 u32 size = i915_gem_obj_ggtt_size(obj);
3248 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3249 (size & -size) != size ||
3250 (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3251 "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3252 i915_gem_obj_ggtt_offset(obj), size);
3254 pitch_val = obj->stride / 128;
3255 pitch_val = ffs(pitch_val) - 1;
3257 val = i915_gem_obj_ggtt_offset(obj);
3258 if (obj->tiling_mode == I915_TILING_Y)
3259 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3260 val |= I830_FENCE_SIZE_BITS(size);
3261 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3262 val |= I830_FENCE_REG_VALID;
3266 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3267 POSTING_READ(FENCE_REG_830_0 + reg * 4);
3270 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3272 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3275 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3276 struct drm_i915_gem_object *obj)
3278 struct drm_i915_private *dev_priv = dev->dev_private;
3280 /* Ensure that all CPU reads are completed before installing a fence
3281 * and all writes before removing the fence.
3283 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3286 WARN(obj && (!obj->stride || !obj->tiling_mode),
3287 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3288 obj->stride, obj->tiling_mode);
3291 i830_write_fence_reg(dev, reg, obj);
3292 else if (IS_GEN3(dev))
3293 i915_write_fence_reg(dev, reg, obj);
3294 else if (INTEL_INFO(dev)->gen >= 4)
3295 i965_write_fence_reg(dev, reg, obj);
3297 /* And similarly be paranoid that no direct access to this region
3298 * is reordered to before the fence is installed.
3300 if (i915_gem_object_needs_mb(obj))
3304 static inline int fence_number(struct drm_i915_private *dev_priv,
3305 struct drm_i915_fence_reg *fence)
3307 return fence - dev_priv->fence_regs;
3310 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3311 struct drm_i915_fence_reg *fence,
3314 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3315 int reg = fence_number(dev_priv, fence);
3317 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3320 obj->fence_reg = reg;
3322 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3324 obj->fence_reg = I915_FENCE_REG_NONE;
3326 list_del_init(&fence->lru_list);
3328 obj->fence_dirty = false;
3332 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3334 if (obj->last_fenced_req) {
3335 int ret = i915_wait_request(obj->last_fenced_req);
3339 i915_gem_request_assign(&obj->last_fenced_req, NULL);
3346 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3348 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3349 struct drm_i915_fence_reg *fence;
3352 ret = i915_gem_object_wait_fence(obj);
3356 if (obj->fence_reg == I915_FENCE_REG_NONE)
3359 fence = &dev_priv->fence_regs[obj->fence_reg];
3361 if (WARN_ON(fence->pin_count))
3364 i915_gem_object_fence_lost(obj);
3365 i915_gem_object_update_fence(obj, fence, false);
3370 static struct drm_i915_fence_reg *
3371 i915_find_fence_reg(struct drm_device *dev)
3373 struct drm_i915_private *dev_priv = dev->dev_private;
3374 struct drm_i915_fence_reg *reg, *avail;
3377 /* First try to find a free reg */
3379 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3380 reg = &dev_priv->fence_regs[i];
3384 if (!reg->pin_count)
3391 /* None available, try to steal one or wait for a user to finish */
3392 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3400 /* Wait for completion of pending flips which consume fences */
3401 if (intel_has_pending_fb_unpin(dev))
3402 return ERR_PTR(-EAGAIN);
3404 return ERR_PTR(-EDEADLK);
3408 * i915_gem_object_get_fence - set up fencing for an object
3409 * @obj: object to map through a fence reg
3411 * When mapping objects through the GTT, userspace wants to be able to write
3412 * to them without having to worry about swizzling if the object is tiled.
3413 * This function walks the fence regs looking for a free one for @obj,
3414 * stealing one if it can't find any.
3416 * It then sets up the reg based on the object's properties: address, pitch
3417 * and tiling format.
3419 * For an untiled surface, this removes any existing fence.
3422 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3424 struct drm_device *dev = obj->base.dev;
3425 struct drm_i915_private *dev_priv = dev->dev_private;
3426 bool enable = obj->tiling_mode != I915_TILING_NONE;
3427 struct drm_i915_fence_reg *reg;
3430 /* Have we updated the tiling parameters upon the object and so
3431 * will need to serialise the write to the associated fence register?
3433 if (obj->fence_dirty) {
3434 ret = i915_gem_object_wait_fence(obj);
3439 /* Just update our place in the LRU if our fence is getting reused. */
3440 if (obj->fence_reg != I915_FENCE_REG_NONE) {
3441 reg = &dev_priv->fence_regs[obj->fence_reg];
3442 if (!obj->fence_dirty) {
3443 list_move_tail(®->lru_list,
3444 &dev_priv->mm.fence_list);
3447 } else if (enable) {
3448 if (WARN_ON(!obj->map_and_fenceable))
3451 reg = i915_find_fence_reg(dev);
3453 return PTR_ERR(reg);
3456 struct drm_i915_gem_object *old = reg->obj;
3458 ret = i915_gem_object_wait_fence(old);
3462 i915_gem_object_fence_lost(old);
3467 i915_gem_object_update_fence(obj, reg, enable);
3472 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3473 unsigned long cache_level)
3475 struct drm_mm_node *gtt_space = &vma->node;
3476 struct drm_mm_node *other;
3479 * On some machines we have to be careful when putting differing types
3480 * of snoopable memory together to avoid the prefetcher crossing memory
3481 * domains and dying. During vm initialisation, we decide whether or not
3482 * these constraints apply and set the drm_mm.color_adjust
3485 if (vma->vm->mm.color_adjust == NULL)
3488 if (!drm_mm_node_allocated(gtt_space))
3491 if (list_empty(>t_space->node_list))
3494 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3495 if (other->allocated && !other->hole_follows && other->color != cache_level)
3498 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3499 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3506 * Finds free space in the GTT aperture and binds the object there.
3508 static struct i915_vma *
3509 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3510 struct i915_address_space *vm,
3511 const struct i915_ggtt_view *ggtt_view,
3515 struct drm_device *dev = obj->base.dev;
3516 struct drm_i915_private *dev_priv = dev->dev_private;
3517 u32 size, fence_size, fence_alignment, unfenced_alignment;
3518 unsigned long start =
3519 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3521 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3522 struct i915_vma *vma;
3525 if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
3526 return ERR_PTR(-EINVAL);
3528 fence_size = i915_gem_get_gtt_size(dev,
3531 fence_alignment = i915_gem_get_gtt_alignment(dev,
3533 obj->tiling_mode, true);
3534 unfenced_alignment =
3535 i915_gem_get_gtt_alignment(dev,
3537 obj->tiling_mode, false);
3540 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3542 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3543 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3544 return ERR_PTR(-EINVAL);
3547 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3549 /* If the object is bigger than the entire aperture, reject it early
3550 * before evicting everything in a vain attempt to find space.
3552 if (obj->base.size > end) {
3553 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3555 flags & PIN_MAPPABLE ? "mappable" : "total",
3557 return ERR_PTR(-E2BIG);
3560 ret = i915_gem_object_get_pages(obj);
3562 return ERR_PTR(ret);
3564 i915_gem_object_pin_pages(obj);
3566 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
3567 i915_gem_obj_lookup_or_create_vma(obj, vm);
3573 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3577 DRM_MM_SEARCH_DEFAULT,
3578 DRM_MM_CREATE_DEFAULT);
3580 ret = i915_gem_evict_something(dev, vm, size, alignment,
3589 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3591 goto err_remove_node;
3594 ret = i915_gem_gtt_prepare_object(obj);
3596 goto err_remove_node;
3598 /* allocate before insert / bind */
3599 if (vma->vm->allocate_va_range) {
3600 trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
3601 VM_TO_TRACE_NAME(vma->vm));
3602 ret = vma->vm->allocate_va_range(vma->vm,
3606 goto err_remove_node;
3609 trace_i915_vma_bind(vma, flags);
3610 ret = i915_vma_bind(vma, obj->cache_level,
3611 flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3613 goto err_finish_gtt;
3615 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3616 list_add_tail(&vma->mm_list, &vm->inactive_list);
3621 i915_gem_gtt_finish_object(obj);
3623 drm_mm_remove_node(&vma->node);
3625 i915_gem_vma_destroy(vma);
3628 i915_gem_object_unpin_pages(obj);
3633 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3636 /* If we don't have a page list set up, then we're not pinned
3637 * to GPU, and we can ignore the cache flush because it'll happen
3638 * again at bind time.
3640 if (obj->pages == NULL)
3644 * Stolen memory is always coherent with the GPU as it is explicitly
3645 * marked as wc by the system, or the system is cache-coherent.
3650 /* If the GPU is snooping the contents of the CPU cache,
3651 * we do not need to manually clear the CPU cache lines. However,
3652 * the caches are only snooped when the render cache is
3653 * flushed/invalidated. As we always have to emit invalidations
3654 * and flushes when moving into and out of the RENDER domain, correct
3655 * snooping behaviour occurs naturally as the result of our domain
3658 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3659 obj->cache_dirty = true;
3663 trace_i915_gem_object_clflush(obj);
3664 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3665 obj->cache_dirty = false;
3670 /** Flushes the GTT write domain for the object if it's dirty. */
3672 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3674 uint32_t old_write_domain;
3676 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3679 /* No actual flushing is required for the GTT write domain. Writes
3680 * to it immediately go to main memory as far as we know, so there's
3681 * no chipset flush. It also doesn't land in render cache.
3683 * However, we do have to enforce the order so that all writes through
3684 * the GTT land before any writes to the device, such as updates to
3689 old_write_domain = obj->base.write_domain;
3690 obj->base.write_domain = 0;
3692 intel_fb_obj_flush(obj, false);
3694 intel_fb_obj_flush(obj, false);
3696 trace_i915_gem_object_change_domain(obj,
3697 obj->base.read_domains,
3701 /** Flushes the CPU write domain for the object if it's dirty. */
3703 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3705 uint32_t old_write_domain;
3707 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3710 if (i915_gem_clflush_object(obj, obj->pin_display))
3711 i915_gem_chipset_flush(obj->base.dev);
3713 old_write_domain = obj->base.write_domain;
3714 obj->base.write_domain = 0;
3716 trace_i915_gem_object_change_domain(obj,
3717 obj->base.read_domains,
3722 * Moves a single object to the GTT read, and possibly write domain.
3724 * This function returns when the move is complete, including waiting on
3728 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3730 uint32_t old_write_domain, old_read_domains;
3731 struct i915_vma *vma;
3734 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3737 ret = i915_gem_object_wait_rendering(obj, !write);
3741 i915_gem_object_retire(obj);
3743 /* Flush and acquire obj->pages so that we are coherent through
3744 * direct access in memory with previous cached writes through
3745 * shmemfs and that our cache domain tracking remains valid.
3746 * For example, if the obj->filp was moved to swap without us
3747 * being notified and releasing the pages, we would mistakenly
3748 * continue to assume that the obj remained out of the CPU cached
3751 ret = i915_gem_object_get_pages(obj);
3755 i915_gem_object_flush_cpu_write_domain(obj);
3757 /* Serialise direct access to this object with the barriers for
3758 * coherent writes from the GPU, by effectively invalidating the
3759 * GTT domain upon first access.
3761 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3764 old_write_domain = obj->base.write_domain;
3765 old_read_domains = obj->base.read_domains;
3767 /* It should now be out of any other write domains, and we can update
3768 * the domain values for our changes.
3770 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3771 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3773 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3774 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3779 intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
3781 trace_i915_gem_object_change_domain(obj,
3785 /* And bump the LRU for this access */
3786 vma = i915_gem_obj_to_ggtt(obj);
3787 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
3788 list_move_tail(&vma->mm_list,
3789 &to_i915(obj->base.dev)->gtt.base.inactive_list);
3794 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3795 enum i915_cache_level cache_level)
3797 struct drm_device *dev = obj->base.dev;
3798 struct i915_vma *vma, *next;
3801 if (obj->cache_level == cache_level)
3804 if (i915_gem_obj_is_pinned(obj)) {
3805 DRM_DEBUG("can not change the cache level of pinned objects\n");
3809 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3810 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3811 ret = i915_vma_unbind(vma);
3817 if (i915_gem_obj_bound_any(obj)) {
3818 ret = i915_gem_object_finish_gpu(obj);
3822 i915_gem_object_finish_gtt(obj);
3824 /* Before SandyBridge, you could not use tiling or fence
3825 * registers with snooped memory, so relinquish any fences
3826 * currently pointing to our region in the aperture.
3828 if (INTEL_INFO(dev)->gen < 6) {
3829 ret = i915_gem_object_put_fence(obj);
3834 list_for_each_entry(vma, &obj->vma_list, vma_link)
3835 if (drm_mm_node_allocated(&vma->node)) {
3836 ret = i915_vma_bind(vma, cache_level,
3837 vma->bound & GLOBAL_BIND);
3843 list_for_each_entry(vma, &obj->vma_list, vma_link)
3844 vma->node.color = cache_level;
3845 obj->cache_level = cache_level;
3847 if (obj->cache_dirty &&
3848 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3849 cpu_write_needs_clflush(obj)) {
3850 if (i915_gem_clflush_object(obj, true))
3851 i915_gem_chipset_flush(obj->base.dev);
3857 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3858 struct drm_file *file)
3860 struct drm_i915_gem_caching *args = data;
3861 struct drm_i915_gem_object *obj;
3864 ret = i915_mutex_lock_interruptible(dev);
3868 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3869 if (&obj->base == NULL) {
3874 switch (obj->cache_level) {
3875 case I915_CACHE_LLC:
3876 case I915_CACHE_L3_LLC:
3877 args->caching = I915_CACHING_CACHED;
3881 args->caching = I915_CACHING_DISPLAY;
3885 args->caching = I915_CACHING_NONE;
3889 drm_gem_object_unreference(&obj->base);
3891 mutex_unlock(&dev->struct_mutex);
3895 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3896 struct drm_file *file)
3898 struct drm_i915_gem_caching *args = data;
3899 struct drm_i915_gem_object *obj;
3900 enum i915_cache_level level;
3903 switch (args->caching) {
3904 case I915_CACHING_NONE:
3905 level = I915_CACHE_NONE;
3907 case I915_CACHING_CACHED:
3908 level = I915_CACHE_LLC;
3910 case I915_CACHING_DISPLAY:
3911 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3917 ret = i915_mutex_lock_interruptible(dev);
3921 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3922 if (&obj->base == NULL) {
3927 ret = i915_gem_object_set_cache_level(obj, level);
3929 drm_gem_object_unreference(&obj->base);
3931 mutex_unlock(&dev->struct_mutex);
3935 static bool is_pin_display(struct drm_i915_gem_object *obj)
3937 struct i915_vma *vma;
3939 vma = i915_gem_obj_to_ggtt(obj);
3943 /* There are 2 sources that pin objects:
3944 * 1. The display engine (scanouts, sprites, cursors);
3945 * 2. Reservations for execbuffer;
3947 * We can ignore reservations as we hold the struct_mutex and
3948 * are only called outside of the reservation path.
3950 return vma->pin_count;
3954 * Prepare buffer for display plane (scanout, cursors, etc).
3955 * Can be called from an uninterruptible phase (modesetting) and allows
3956 * any flushes to be pipelined (for pageflips).
3959 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3961 struct intel_engine_cs *pipelined,
3962 const struct i915_ggtt_view *view)
3964 u32 old_read_domains, old_write_domain;
3965 bool was_pin_display;
3968 if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
3969 ret = i915_gem_object_sync(obj, pipelined);
3974 /* Mark the pin_display early so that we account for the
3975 * display coherency whilst setting up the cache domains.
3977 was_pin_display = obj->pin_display;
3978 obj->pin_display = true;
3980 /* The display engine is not coherent with the LLC cache on gen6. As
3981 * a result, we make sure that the pinning that is about to occur is
3982 * done with uncached PTEs. This is lowest common denominator for all
3985 * However for gen6+, we could do better by using the GFDT bit instead
3986 * of uncaching, which would allow us to flush all the LLC-cached data
3987 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3989 ret = i915_gem_object_set_cache_level(obj,
3990 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3992 goto err_unpin_display;
3994 /* As the user may map the buffer once pinned in the display plane
3995 * (e.g. libkms for the bootup splash), we have to ensure that we
3996 * always use map_and_fenceable for all scanout buffers.
3998 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
3999 view->type == I915_GGTT_VIEW_NORMAL ?
4002 goto err_unpin_display;
4004 i915_gem_object_flush_cpu_write_domain(obj);
4006 old_write_domain = obj->base.write_domain;
4007 old_read_domains = obj->base.read_domains;
4009 /* It should now be out of any other write domains, and we can update
4010 * the domain values for our changes.
4012 obj->base.write_domain = 0;
4013 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4015 trace_i915_gem_object_change_domain(obj,
4022 WARN_ON(was_pin_display != is_pin_display(obj));
4023 obj->pin_display = was_pin_display;
4028 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4029 const struct i915_ggtt_view *view)
4031 i915_gem_object_ggtt_unpin_view(obj, view);
4033 obj->pin_display = is_pin_display(obj);
4037 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
4041 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
4044 ret = i915_gem_object_wait_rendering(obj, false);
4048 /* Ensure that we invalidate the GPU's caches and TLBs. */
4049 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
4054 * Moves a single object to the CPU read, and possibly write domain.
4056 * This function returns when the move is complete, including waiting on
4060 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4062 uint32_t old_write_domain, old_read_domains;
4065 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4068 ret = i915_gem_object_wait_rendering(obj, !write);
4072 i915_gem_object_retire(obj);
4073 i915_gem_object_flush_gtt_write_domain(obj);
4075 old_write_domain = obj->base.write_domain;
4076 old_read_domains = obj->base.read_domains;
4078 /* Flush the CPU cache if it's still invalid. */
4079 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4080 i915_gem_clflush_object(obj, false);
4082 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4085 /* It should now be out of any other write domains, and we can update
4086 * the domain values for our changes.
4088 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4090 /* If we're writing through the CPU, then the GPU read domains will
4091 * need to be invalidated at next use.
4094 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4095 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4099 intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
4101 trace_i915_gem_object_change_domain(obj,
4108 /* Throttle our rendering by waiting until the ring has completed our requests
4109 * emitted over 20 msec ago.
4111 * Note that if we were to use the current jiffies each time around the loop,
4112 * we wouldn't escape the function with any frames outstanding if the time to
4113 * render a frame was over 20ms.
4115 * This should get us reasonable parallelism between CPU and GPU but also
4116 * relatively low latency when blocking on a particular request to finish.
4119 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4121 struct drm_i915_private *dev_priv = dev->dev_private;
4122 struct drm_i915_file_private *file_priv = file->driver_priv;
4123 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4124 struct drm_i915_gem_request *request, *target = NULL;
4125 unsigned reset_counter;
4128 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4132 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4136 spin_lock(&file_priv->mm.lock);
4137 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4138 if (time_after_eq(request->emitted_jiffies, recent_enough))
4143 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4145 i915_gem_request_reference(target);
4146 spin_unlock(&file_priv->mm.lock);
4151 ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4153 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4155 mutex_lock(&dev->struct_mutex);
4156 i915_gem_request_unreference(target);
4157 mutex_unlock(&dev->struct_mutex);
4163 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4165 struct drm_i915_gem_object *obj = vma->obj;
4168 vma->node.start & (alignment - 1))
4171 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4174 if (flags & PIN_OFFSET_BIAS &&
4175 vma->node.start < (flags & PIN_OFFSET_MASK))
4182 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4183 struct i915_address_space *vm,
4184 const struct i915_ggtt_view *ggtt_view,
4188 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4189 struct i915_vma *vma;
4193 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4196 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4199 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4202 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4205 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4206 i915_gem_obj_to_vma(obj, vm);
4209 return PTR_ERR(vma);
4212 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4215 if (i915_vma_misplaced(vma, alignment, flags)) {
4216 unsigned long offset;
4217 offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
4218 i915_gem_obj_offset(obj, vm);
4219 WARN(vma->pin_count,
4220 "bo is already pinned in %s with incorrect alignment:"
4221 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4222 " obj->map_and_fenceable=%d\n",
4223 ggtt_view ? "ggtt" : "ppgtt",
4226 !!(flags & PIN_MAPPABLE),
4227 obj->map_and_fenceable);
4228 ret = i915_vma_unbind(vma);
4236 bound = vma ? vma->bound : 0;
4237 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4238 /* In true PPGTT, bind has possibly changed PDEs, which
4239 * means we must do a context switch before the GPU can
4240 * accurately read some of the VMAs.
4242 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4245 return PTR_ERR(vma);
4248 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
4249 ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
4254 if ((bound ^ vma->bound) & GLOBAL_BIND) {
4255 bool mappable, fenceable;
4256 u32 fence_size, fence_alignment;
4258 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4261 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4266 fenceable = (vma->node.size == fence_size &&
4267 (vma->node.start & (fence_alignment - 1)) == 0);
4269 mappable = (vma->node.start + fence_size <=
4270 dev_priv->gtt.mappable_end);
4272 obj->map_and_fenceable = mappable && fenceable;
4275 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4278 if (flags & PIN_MAPPABLE)
4279 obj->pin_mappable |= true;
4285 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4286 struct i915_address_space *vm,
4290 return i915_gem_object_do_pin(obj, vm,
4291 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4296 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4297 const struct i915_ggtt_view *view,
4301 if (WARN_ONCE(!view, "no view specified"))
4304 return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4305 alignment, flags | PIN_GLOBAL);
4309 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4310 const struct i915_ggtt_view *view)
4312 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4315 WARN_ON(vma->pin_count == 0);
4316 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4318 if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
4319 obj->pin_mappable = false;
4323 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4325 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4326 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4327 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4329 WARN_ON(!ggtt_vma ||
4330 dev_priv->fence_regs[obj->fence_reg].pin_count >
4331 ggtt_vma->pin_count);
4332 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4339 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4341 if (obj->fence_reg != I915_FENCE_REG_NONE) {
4342 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4343 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4344 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4349 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4350 struct drm_file *file)
4352 struct drm_i915_gem_busy *args = data;
4353 struct drm_i915_gem_object *obj;
4356 ret = i915_mutex_lock_interruptible(dev);
4360 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4361 if (&obj->base == NULL) {
4366 /* Count all active objects as busy, even if they are currently not used
4367 * by the gpu. Users of this interface expect objects to eventually
4368 * become non-busy without any further actions, therefore emit any
4369 * necessary flushes here.
4371 ret = i915_gem_object_flush_active(obj);
4373 args->busy = obj->active;
4374 if (obj->last_read_req) {
4375 struct intel_engine_cs *ring;
4376 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4377 ring = i915_gem_request_get_ring(obj->last_read_req);
4378 args->busy |= intel_ring_flag(ring) << 16;
4381 drm_gem_object_unreference(&obj->base);
4383 mutex_unlock(&dev->struct_mutex);
4388 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4389 struct drm_file *file_priv)
4391 return i915_gem_ring_throttle(dev, file_priv);
4395 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4396 struct drm_file *file_priv)
4398 struct drm_i915_private *dev_priv = dev->dev_private;
4399 struct drm_i915_gem_madvise *args = data;
4400 struct drm_i915_gem_object *obj;
4403 switch (args->madv) {
4404 case I915_MADV_DONTNEED:
4405 case I915_MADV_WILLNEED:
4411 ret = i915_mutex_lock_interruptible(dev);
4415 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4416 if (&obj->base == NULL) {
4421 if (i915_gem_obj_is_pinned(obj)) {
4427 obj->tiling_mode != I915_TILING_NONE &&
4428 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4429 if (obj->madv == I915_MADV_WILLNEED)
4430 i915_gem_object_unpin_pages(obj);
4431 if (args->madv == I915_MADV_WILLNEED)
4432 i915_gem_object_pin_pages(obj);
4435 if (obj->madv != __I915_MADV_PURGED)
4436 obj->madv = args->madv;
4438 /* if the object is no longer attached, discard its backing storage */
4439 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4440 i915_gem_object_truncate(obj);
4442 args->retained = obj->madv != __I915_MADV_PURGED;
4445 drm_gem_object_unreference(&obj->base);
4447 mutex_unlock(&dev->struct_mutex);
4451 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4452 const struct drm_i915_gem_object_ops *ops)
4454 INIT_LIST_HEAD(&obj->global_list);
4455 INIT_LIST_HEAD(&obj->ring_list);
4456 INIT_LIST_HEAD(&obj->obj_exec_link);
4457 INIT_LIST_HEAD(&obj->vma_list);
4458 INIT_LIST_HEAD(&obj->batch_pool_list);
4462 obj->fence_reg = I915_FENCE_REG_NONE;
4463 obj->madv = I915_MADV_WILLNEED;
4465 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4468 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4469 .get_pages = i915_gem_object_get_pages_gtt,
4470 .put_pages = i915_gem_object_put_pages_gtt,
4473 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4476 struct drm_i915_gem_object *obj;
4478 struct address_space *mapping;
4482 obj = i915_gem_object_alloc(dev);
4486 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4487 i915_gem_object_free(obj);
4492 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4493 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4494 /* 965gm cannot relocate objects above 4GiB. */
4495 mask &= ~__GFP_HIGHMEM;
4496 mask |= __GFP_DMA32;
4499 mapping = file_inode(obj->base.filp)->i_mapping;
4500 mapping_set_gfp_mask(mapping, mask);
4503 i915_gem_object_init(obj, &i915_gem_object_ops);
4505 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4506 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4509 /* On some devices, we can have the GPU use the LLC (the CPU
4510 * cache) for about a 10% performance improvement
4511 * compared to uncached. Graphics requests other than
4512 * display scanout are coherent with the CPU in
4513 * accessing this cache. This means in this mode we
4514 * don't need to clflush on the CPU side, and on the
4515 * GPU side we only need to flush internal caches to
4516 * get data visible to the CPU.
4518 * However, we maintain the display planes as UC, and so
4519 * need to rebind when first used as such.
4521 obj->cache_level = I915_CACHE_LLC;
4523 obj->cache_level = I915_CACHE_NONE;
4525 trace_i915_gem_object_create(obj);
4530 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4532 /* If we are the last user of the backing storage (be it shmemfs
4533 * pages or stolen etc), we know that the pages are going to be
4534 * immediately released. In this case, we can then skip copying
4535 * back the contents from the GPU.
4538 if (obj->madv != I915_MADV_WILLNEED)
4541 if (obj->base.vm_obj == NULL)
4544 /* At first glance, this looks racy, but then again so would be
4545 * userspace racing mmap against close. However, the first external
4546 * reference to the filp can only be obtained through the
4547 * i915_gem_mmap_ioctl() which safeguards us against the user
4548 * acquiring such a reference whilst we are in the middle of
4549 * freeing the object.
4552 return atomic_long_read(&obj->base.filp->f_count) == 1;
4558 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4560 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4561 struct drm_device *dev = obj->base.dev;
4562 struct drm_i915_private *dev_priv = dev->dev_private;
4563 struct i915_vma *vma, *next;
4565 intel_runtime_pm_get(dev_priv);
4567 trace_i915_gem_object_destroy(obj);
4569 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4573 ret = i915_vma_unbind(vma);
4574 if (WARN_ON(ret == -ERESTARTSYS)) {
4575 bool was_interruptible;
4577 was_interruptible = dev_priv->mm.interruptible;
4578 dev_priv->mm.interruptible = false;
4580 WARN_ON(i915_vma_unbind(vma));
4582 dev_priv->mm.interruptible = was_interruptible;
4586 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4587 * before progressing. */
4589 i915_gem_object_unpin_pages(obj);
4591 WARN_ON(obj->frontbuffer_bits);
4593 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4594 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4595 obj->tiling_mode != I915_TILING_NONE)
4596 i915_gem_object_unpin_pages(obj);
4598 if (WARN_ON(obj->pages_pin_count))
4599 obj->pages_pin_count = 0;
4600 if (discard_backing_storage(obj))
4601 obj->madv = I915_MADV_DONTNEED;
4602 i915_gem_object_put_pages(obj);
4603 i915_gem_object_free_mmap_offset(obj);
4608 if (obj->base.import_attach)
4609 drm_prime_gem_destroy(&obj->base, NULL);
4612 if (obj->ops->release)
4613 obj->ops->release(obj);
4615 drm_gem_object_release(&obj->base);
4616 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4619 i915_gem_object_free(obj);
4621 intel_runtime_pm_put(dev_priv);
4624 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4625 struct i915_address_space *vm)
4627 struct i915_vma *vma;
4628 list_for_each_entry(vma, &obj->vma_list, vma_link) {
4629 if (i915_is_ggtt(vma->vm) &&
4630 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4638 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4639 const struct i915_ggtt_view *view)
4641 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
4642 struct i915_vma *vma;
4644 if (WARN_ONCE(!view, "no view specified"))
4645 return ERR_PTR(-EINVAL);
4647 list_for_each_entry(vma, &obj->vma_list, vma_link)
4648 if (vma->vm == ggtt &&
4649 i915_ggtt_view_equal(&vma->ggtt_view, view))
4654 void i915_gem_vma_destroy(struct i915_vma *vma)
4656 struct i915_address_space *vm = NULL;
4657 WARN_ON(vma->node.allocated);
4659 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4660 if (!list_empty(&vma->exec_list))
4665 if (!i915_is_ggtt(vm))
4666 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4668 list_del(&vma->vma_link);
4674 i915_gem_stop_ringbuffers(struct drm_device *dev)
4676 struct drm_i915_private *dev_priv = dev->dev_private;
4677 struct intel_engine_cs *ring;
4680 for_each_ring(ring, dev_priv, i)
4681 dev_priv->gt.stop_ring(ring);
4685 i915_gem_suspend(struct drm_device *dev)
4687 struct drm_i915_private *dev_priv = dev->dev_private;
4690 mutex_lock(&dev->struct_mutex);
4691 ret = i915_gpu_idle(dev);
4695 i915_gem_retire_requests(dev);
4697 i915_gem_stop_ringbuffers(dev);
4698 mutex_unlock(&dev->struct_mutex);
4700 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4701 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4703 flush_delayed_work(&dev_priv->mm.idle_work);
4706 /* Assert that we sucessfully flushed all the work and
4707 * reset the GPU back to its idle, low power state.
4709 WARN_ON(dev_priv->mm.busy);
4714 mutex_unlock(&dev->struct_mutex);
4718 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4720 struct drm_device *dev = ring->dev;
4721 struct drm_i915_private *dev_priv = dev->dev_private;
4722 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4723 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4726 if (!HAS_L3_DPF(dev) || !remap_info)
4729 ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4734 * Note: We do not worry about the concurrent register cacheline hang
4735 * here because no other code should access these registers other than
4736 * at initialization time.
4738 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4739 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4740 intel_ring_emit(ring, reg_base + i);
4741 intel_ring_emit(ring, remap_info[i/4]);
4744 intel_ring_advance(ring);
4749 void i915_gem_init_swizzling(struct drm_device *dev)
4751 struct drm_i915_private *dev_priv = dev->dev_private;
4753 if (INTEL_INFO(dev)->gen < 5 ||
4754 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4757 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4758 DISP_TILE_SURFACE_SWIZZLING);
4763 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4765 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4766 else if (IS_GEN7(dev))
4767 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4768 else if (IS_GEN8(dev))
4769 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4775 intel_enable_blt(struct drm_device *dev)
4780 /* The blitter was dysfunctional on early prototypes */
4781 if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4782 DRM_INFO("BLT not supported on this pre-production hardware;"
4783 " graphics performance will be degraded.\n");
4790 static void init_unused_ring(struct drm_device *dev, u32 base)
4792 struct drm_i915_private *dev_priv = dev->dev_private;
4794 I915_WRITE(RING_CTL(base), 0);
4795 I915_WRITE(RING_HEAD(base), 0);
4796 I915_WRITE(RING_TAIL(base), 0);
4797 I915_WRITE(RING_START(base), 0);
4800 static void init_unused_rings(struct drm_device *dev)
4803 init_unused_ring(dev, PRB1_BASE);
4804 init_unused_ring(dev, SRB0_BASE);
4805 init_unused_ring(dev, SRB1_BASE);
4806 init_unused_ring(dev, SRB2_BASE);
4807 init_unused_ring(dev, SRB3_BASE);
4808 } else if (IS_GEN2(dev)) {
4809 init_unused_ring(dev, SRB0_BASE);
4810 init_unused_ring(dev, SRB1_BASE);
4811 } else if (IS_GEN3(dev)) {
4812 init_unused_ring(dev, PRB1_BASE);
4813 init_unused_ring(dev, PRB2_BASE);
4817 int i915_gem_init_rings(struct drm_device *dev)
4819 struct drm_i915_private *dev_priv = dev->dev_private;
4822 ret = intel_init_render_ring_buffer(dev);
4827 ret = intel_init_bsd_ring_buffer(dev);
4829 goto cleanup_render_ring;
4832 if (intel_enable_blt(dev)) {
4833 ret = intel_init_blt_ring_buffer(dev);
4835 goto cleanup_bsd_ring;
4838 if (HAS_VEBOX(dev)) {
4839 ret = intel_init_vebox_ring_buffer(dev);
4841 goto cleanup_blt_ring;
4844 if (HAS_BSD2(dev)) {
4845 ret = intel_init_bsd2_ring_buffer(dev);
4847 goto cleanup_vebox_ring;
4850 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4852 goto cleanup_bsd2_ring;
4857 intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4859 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4861 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4863 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4864 cleanup_render_ring:
4865 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4871 i915_gem_init_hw(struct drm_device *dev)
4873 struct drm_i915_private *dev_priv = dev->dev_private;
4874 struct intel_engine_cs *ring;
4878 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4882 /* Double layer security blanket, see i915_gem_init() */
4883 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4885 if (dev_priv->ellc_size)
4886 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4888 if (IS_HASWELL(dev))
4889 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4890 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4892 if (HAS_PCH_NOP(dev)) {
4893 if (IS_IVYBRIDGE(dev)) {
4894 u32 temp = I915_READ(GEN7_MSG_CTL);
4895 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4896 I915_WRITE(GEN7_MSG_CTL, temp);
4897 } else if (INTEL_INFO(dev)->gen >= 7) {
4898 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4899 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4900 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4904 i915_gem_init_swizzling(dev);
4907 * At least 830 can leave some of the unused rings
4908 * "active" (ie. head != tail) after resume which
4909 * will prevent c3 entry. Makes sure all unused rings
4912 init_unused_rings(dev);
4914 for_each_ring(ring, dev_priv, i) {
4915 ret = ring->init_hw(ring);
4920 for (i = 0; i < NUM_L3_SLICES(dev); i++)
4921 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4923 ret = i915_ppgtt_init_hw(dev);
4924 if (ret && ret != -EIO) {
4925 DRM_ERROR("PPGTT enable failed %d\n", ret);
4926 i915_gem_cleanup_ringbuffer(dev);
4929 ret = i915_gem_context_enable(dev_priv);
4930 if (ret && ret != -EIO) {
4931 DRM_ERROR("Context enable failed %d\n", ret);
4932 i915_gem_cleanup_ringbuffer(dev);
4938 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4942 int i915_gem_init(struct drm_device *dev)
4944 struct drm_i915_private *dev_priv = dev->dev_private;
4947 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4948 i915.enable_execlists);
4950 mutex_lock(&dev->struct_mutex);
4952 if (IS_VALLEYVIEW(dev)) {
4953 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4954 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4955 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4956 VLV_GTLC_ALLOWWAKEACK), 10))
4957 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4960 if (!i915.enable_execlists) {
4961 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4962 dev_priv->gt.init_rings = i915_gem_init_rings;
4963 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4964 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4966 dev_priv->gt.do_execbuf = intel_execlists_submission;
4967 dev_priv->gt.init_rings = intel_logical_rings_init;
4968 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4969 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4972 /* This is just a security blanket to placate dragons.
4973 * On some systems, we very sporadically observe that the first TLBs
4974 * used by the CS may be stale, despite us poking the TLB reset. If
4975 * we hold the forcewake during initialisation these problems
4976 * just magically go away.
4978 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4980 ret = i915_gem_init_userptr(dev);
4984 i915_gem_init_global_gtt(dev);
4986 ret = i915_gem_context_init(dev);
4990 ret = dev_priv->gt.init_rings(dev);
4994 ret = i915_gem_init_hw(dev);
4996 /* Allow ring initialisation to fail by marking the GPU as
4997 * wedged. But we only want to do this where the GPU is angry,
4998 * for all other failure, such as an allocation failure, bail.
5000 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5001 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5006 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5007 mutex_unlock(&dev->struct_mutex);
5013 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
5015 struct drm_i915_private *dev_priv = dev->dev_private;
5016 struct intel_engine_cs *ring;
5019 for_each_ring(ring, dev_priv, i)
5020 dev_priv->gt.cleanup_ring(ring);
5024 init_ring_lists(struct intel_engine_cs *ring)
5026 INIT_LIST_HEAD(&ring->active_list);
5027 INIT_LIST_HEAD(&ring->request_list);
5030 void i915_init_vm(struct drm_i915_private *dev_priv,
5031 struct i915_address_space *vm)
5033 if (!i915_is_ggtt(vm))
5034 drm_mm_init(&vm->mm, vm->start, vm->total);
5035 vm->dev = dev_priv->dev;
5036 INIT_LIST_HEAD(&vm->active_list);
5037 INIT_LIST_HEAD(&vm->inactive_list);
5038 INIT_LIST_HEAD(&vm->global_link);
5039 list_add_tail(&vm->global_link, &dev_priv->vm_list);
5043 i915_gem_load(struct drm_device *dev)
5045 struct drm_i915_private *dev_priv = dev->dev_private;
5048 INIT_LIST_HEAD(&dev_priv->vm_list);
5049 i915_init_vm(dev_priv, &dev_priv->gtt.base);
5051 INIT_LIST_HEAD(&dev_priv->context_list);
5052 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5053 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5054 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5055 for (i = 0; i < I915_NUM_RINGS; i++)
5056 init_ring_lists(&dev_priv->ring[i]);
5057 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5058 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5059 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5060 i915_gem_retire_work_handler);
5061 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5062 i915_gem_idle_work_handler);
5063 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5065 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5067 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5068 dev_priv->num_fence_regs = 32;
5069 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5070 dev_priv->num_fence_regs = 16;
5072 dev_priv->num_fence_regs = 8;
5074 if (intel_vgpu_active(dev))
5075 dev_priv->num_fence_regs =
5076 I915_READ(vgtif_reg(avail_rs.fence_num));
5078 /* Initialize fence registers to zero */
5079 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5080 i915_gem_restore_fences(dev);
5082 i915_gem_detect_bit_6_swizzle(dev);
5083 init_waitqueue_head(&dev_priv->pending_flip_queue);
5085 dev_priv->mm.interruptible = true;
5087 i915_gem_shrinker_init(dev_priv);
5089 i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
5091 lockinit(&dev_priv->fb_tracking.lock, "drmftl", 0, LK_CANRECURSE);
5094 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5096 struct drm_i915_file_private *file_priv = file->driver_priv;
5098 cancel_delayed_work_sync(&file_priv->mm.idle_work);
5100 /* Clean up our request list when the client is going away, so that
5101 * later retire_requests won't dereference our soon-to-be-gone
5104 spin_lock(&file_priv->mm.lock);
5105 while (!list_empty(&file_priv->mm.request_list)) {
5106 struct drm_i915_gem_request *request;
5108 request = list_first_entry(&file_priv->mm.request_list,
5109 struct drm_i915_gem_request,
5111 list_del(&request->client_list);
5112 request->file_priv = NULL;
5114 spin_unlock(&file_priv->mm.lock);
5118 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
5119 vm_ooffset_t foff, struct ucred *cred, u_short *color)
5121 *color = 0; /* XXXKIB */
5126 i915_gem_pager_dtor(void *handle)
5128 struct drm_gem_object *obj;
5129 struct drm_device *dev;
5134 mutex_lock(&dev->struct_mutex);
5135 drm_gem_free_mmap_offset(obj);
5136 i915_gem_release_mmap(to_intel_bo(obj));
5137 drm_gem_object_unreference(obj);
5138 mutex_unlock(&dev->struct_mutex);
5142 i915_gem_file_idle_work_handler(struct work_struct *work)
5144 struct drm_i915_file_private *file_priv =
5145 container_of(work, typeof(*file_priv), mm.idle_work.work);
5147 atomic_set(&file_priv->rps_wait_boost, false);
5150 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5152 struct drm_i915_file_private *file_priv;
5155 DRM_DEBUG_DRIVER("\n");
5157 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5161 file->driver_priv = file_priv;
5162 file_priv->dev_priv = dev->dev_private;
5163 file_priv->file = file;
5165 spin_init(&file_priv->mm.lock, "i915_priv");
5166 INIT_LIST_HEAD(&file_priv->mm.request_list);
5167 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5168 i915_gem_file_idle_work_handler);
5170 ret = i915_gem_context_open(dev, file);
5178 * i915_gem_track_fb - update frontbuffer tracking
5179 * old: current GEM buffer for the frontbuffer slots
5180 * new: new GEM buffer for the frontbuffer slots
5181 * frontbuffer_bits: bitmask of frontbuffer slots
5183 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5184 * from @old and setting them in @new. Both @old and @new can be NULL.
5186 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5187 struct drm_i915_gem_object *new,
5188 unsigned frontbuffer_bits)
5191 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5192 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5193 old->frontbuffer_bits &= ~frontbuffer_bits;
5197 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5198 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5199 new->frontbuffer_bits |= frontbuffer_bits;
5203 /* All the new VM stuff */
5205 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5206 struct i915_address_space *vm)
5208 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5209 struct i915_vma *vma;
5211 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5213 list_for_each_entry(vma, &o->vma_list, vma_link) {
5214 if (i915_is_ggtt(vma->vm) &&
5215 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5218 return vma->node.start;
5221 WARN(1, "%s vma for this object not found.\n",
5222 i915_is_ggtt(vm) ? "global" : "ppgtt");
5227 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5228 const struct i915_ggtt_view *view)
5230 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5231 struct i915_vma *vma;
5233 list_for_each_entry(vma, &o->vma_list, vma_link)
5234 if (vma->vm == ggtt &&
5235 i915_ggtt_view_equal(&vma->ggtt_view, view))
5236 return vma->node.start;
5238 WARN(1, "global vma for this object not found.\n");
5242 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5243 struct i915_address_space *vm)
5245 struct i915_vma *vma;
5247 list_for_each_entry(vma, &o->vma_list, vma_link) {
5248 if (i915_is_ggtt(vma->vm) &&
5249 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5251 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5258 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5259 const struct i915_ggtt_view *view)
5261 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5262 struct i915_vma *vma;
5264 list_for_each_entry(vma, &o->vma_list, vma_link)
5265 if (vma->vm == ggtt &&
5266 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5267 drm_mm_node_allocated(&vma->node))
5273 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5275 struct i915_vma *vma;
5277 list_for_each_entry(vma, &o->vma_list, vma_link)
5278 if (drm_mm_node_allocated(&vma->node))
5284 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5285 struct i915_address_space *vm)
5287 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5288 struct i915_vma *vma;
5290 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5292 BUG_ON(list_empty(&o->vma_list));
5294 list_for_each_entry(vma, &o->vma_list, vma_link) {
5295 if (i915_is_ggtt(vma->vm) &&
5296 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5299 return vma->node.size;
5304 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5306 struct i915_vma *vma;
5307 list_for_each_entry(vma, &obj->vma_list, vma_link) {
5308 if (i915_is_ggtt(vma->vm) &&
5309 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5311 if (vma->pin_count > 0)