2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
57 #include <machine/md_var.h>
60 #include <drm/i915_drm.h>
62 #include "i915_trace.h"
63 #include "intel_drv.h"
64 #include <linux/shmem_fs.h>
65 #include <linux/slab.h>
66 #include <linux/pci.h>
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
70 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
72 bool map_and_fenceable,
74 static int i915_gem_phys_pwrite(struct drm_device *dev,
75 struct drm_i915_gem_object *obj,
76 struct drm_i915_gem_pwrite *args,
77 struct drm_file *file);
79 static void i915_gem_write_fence(struct drm_device *dev, int reg,
80 struct drm_i915_gem_object *obj);
81 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
82 struct drm_i915_fence_reg *fence,
85 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
86 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
88 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
91 i915_gem_release_mmap(obj);
93 /* As we do not have an associated fence register, we will force
94 * a tiling change if we ever need to acquire one.
96 obj->fence_dirty = false;
97 obj->fence_reg = I915_FENCE_REG_NONE;
100 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
101 static void i915_gem_lowmem(void *arg);
103 /* some bookkeeping */
104 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
107 dev_priv->mm.object_count++;
108 dev_priv->mm.object_memory += size;
111 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
114 dev_priv->mm.object_count--;
115 dev_priv->mm.object_memory -= size;
119 i915_gem_wait_for_error(struct i915_gpu_error *error)
123 #define EXIT_COND (!i915_reset_in_progress(error) || \
124 i915_terminally_wedged(error))
129 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
130 * userspace. If it takes that long something really bad is going on and
131 * we should simply try to bail out and fail as gracefully as possible.
133 ret = wait_event_interruptible_timeout(error->reset_queue,
137 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
139 } else if (ret < 0) {
147 int i915_mutex_lock_interruptible(struct drm_device *dev)
149 struct drm_i915_private *dev_priv = dev->dev_private;
152 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
156 ret = lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_SLEEPFAIL);
160 WARN_ON(i915_verify_lists(dev));
165 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
171 i915_gem_init_ioctl(struct drm_device *dev, void *data,
172 struct drm_file *file)
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct drm_i915_gem_init *args = data;
177 if (drm_core_check_feature(dev, DRIVER_MODESET))
180 if (args->gtt_start >= args->gtt_end ||
181 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
184 /* GEM with user mode setting was never supported on ilk and later. */
185 if (INTEL_INFO(dev)->gen >= 5)
188 mutex_lock(&dev->struct_mutex);
189 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
191 dev_priv->gtt.mappable_end = args->gtt_end;
192 mutex_unlock(&dev->struct_mutex);
198 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *file)
201 struct drm_i915_private *dev_priv = dev->dev_private;
202 struct drm_i915_gem_get_aperture *args = data;
203 struct drm_i915_gem_object *obj;
207 mutex_lock(&dev->struct_mutex);
208 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
210 pinned += obj->gtt_space->size;
211 mutex_unlock(&dev->struct_mutex);
213 args->aper_size = dev_priv->gtt.total;
214 args->aper_available_size = args->aper_size - pinned;
219 void i915_gem_object_free(struct drm_i915_gem_object *obj)
225 i915_gem_create(struct drm_file *file,
226 struct drm_device *dev,
230 struct drm_i915_gem_object *obj;
234 size = roundup(size, PAGE_SIZE);
238 /* Allocate the new object */
239 obj = i915_gem_alloc_object(dev, size);
243 ret = drm_gem_handle_create(file, &obj->base, &handle);
245 drm_gem_object_release(&obj->base);
246 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
247 i915_gem_object_free(obj);
251 /* drop reference from allocate - handle holds it now */
252 drm_gem_object_unreference(&obj->base);
253 trace_i915_gem_object_create(obj);
260 i915_gem_dumb_create(struct drm_file *file,
261 struct drm_device *dev,
262 struct drm_mode_create_dumb *args)
265 /* have to work out size/pitch and return them */
266 args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
267 args->size = args->pitch * args->height;
268 return i915_gem_create(file, dev,
269 args->size, &args->handle);
272 int i915_gem_dumb_destroy(struct drm_file *file,
273 struct drm_device *dev,
277 return drm_gem_handle_delete(file, handle);
281 * Creates a new mm object and returns a handle to it.
284 i915_gem_create_ioctl(struct drm_device *dev, void *data,
285 struct drm_file *file)
287 struct drm_i915_gem_create *args = data;
289 return i915_gem_create(file, dev,
290 args->size, &args->handle);
294 __copy_to_user_swizzled(char __user *cpu_vaddr,
295 const char *gpu_vaddr, int gpu_offset,
298 int ret, cpu_offset = 0;
301 int cacheline_end = ALIGN(gpu_offset + 1, 64);
302 int this_length = min(cacheline_end - gpu_offset, length);
303 int swizzled_gpu_offset = gpu_offset ^ 64;
305 ret = __copy_to_user(cpu_vaddr + cpu_offset,
306 gpu_vaddr + swizzled_gpu_offset,
311 cpu_offset += this_length;
312 gpu_offset += this_length;
313 length -= this_length;
320 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
321 const char __user *cpu_vaddr,
324 int ret, cpu_offset = 0;
327 int cacheline_end = ALIGN(gpu_offset + 1, 64);
328 int this_length = min(cacheline_end - gpu_offset, length);
329 int swizzled_gpu_offset = gpu_offset ^ 64;
331 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
332 cpu_vaddr + cpu_offset,
337 cpu_offset += this_length;
338 gpu_offset += this_length;
339 length -= this_length;
345 /* Per-page copy function for the shmem pread fastpath.
346 * Flushes invalid cachelines before reading the target if
347 * needs_clflush is set. */
349 shmem_pread_fast(struct vm_page *page, int shmem_page_offset, int page_length,
350 char __user *user_data,
351 bool page_do_bit17_swizzling, bool needs_clflush)
356 if (unlikely(page_do_bit17_swizzling))
359 vaddr = kmap_atomic(page);
361 drm_clflush_virt_range(vaddr + shmem_page_offset,
363 ret = __copy_to_user_inatomic(user_data,
364 vaddr + shmem_page_offset,
366 kunmap_atomic(vaddr);
368 return ret ? -EFAULT : 0;
372 shmem_clflush_swizzled_range(char *addr, unsigned long length,
375 if (unlikely(swizzled)) {
376 unsigned long start = (unsigned long) addr;
377 unsigned long end = (unsigned long) addr + length;
379 /* For swizzling simply ensure that we always flush both
380 * channels. Lame, but simple and it works. Swizzled
381 * pwrite/pread is far from a hotpath - current userspace
382 * doesn't use it at all. */
383 start = round_down(start, 128);
384 end = round_up(end, 128);
386 drm_clflush_virt_range((void *)start, end - start);
388 drm_clflush_virt_range(addr, length);
393 /* Only difference to the fast-path function is that this can handle bit17
394 * and uses non-atomic copy and kmap functions. */
396 shmem_pread_slow(struct vm_page *page, int shmem_page_offset, int page_length,
397 char __user *user_data,
398 bool page_do_bit17_swizzling, bool needs_clflush)
405 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
407 page_do_bit17_swizzling);
409 if (page_do_bit17_swizzling)
410 ret = __copy_to_user_swizzled(user_data,
411 vaddr, shmem_page_offset,
414 ret = __copy_to_user(user_data,
415 vaddr + shmem_page_offset,
419 return ret ? - EFAULT : 0;
422 static inline void vm_page_reference(vm_page_t m)
424 vm_page_flag_set(m, PG_REFERENCED);
428 i915_gem_shmem_pread(struct drm_device *dev,
429 struct drm_i915_gem_object *obj,
430 struct drm_i915_gem_pread *args,
431 struct drm_file *file)
433 char __user *user_data;
436 int shmem_page_offset, page_length, ret = 0;
437 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
438 int hit_slowpath = 0;
439 int needs_clflush = 0;
442 user_data = (char __user *) (uintptr_t) args->data_ptr;
445 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
447 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
448 /* If we're not in the cpu read domain, set ourself into the gtt
449 * read domain and manually flush cachelines (if required). This
450 * optimizes for the case when the gpu will dirty the data
451 * anyway again before the next pread happens. */
452 if (obj->cache_level == I915_CACHE_NONE)
454 if (obj->gtt_space) {
455 ret = i915_gem_object_set_to_gtt_domain(obj, false);
461 ret = i915_gem_object_get_pages(obj);
465 i915_gem_object_pin_pages(obj);
467 offset = args->offset;
469 for (i = 0; i < (obj->base.size >> PAGE_SHIFT); i++) {
470 struct vm_page *page;
472 if (i < offset >> PAGE_SHIFT)
478 /* Operation in this page
480 * shmem_page_offset = offset within page in shmem file
481 * page_length = bytes to copy for this page
483 shmem_page_offset = offset_in_page(offset);
484 page_length = remain;
485 if ((shmem_page_offset + page_length) > PAGE_SIZE)
486 page_length = PAGE_SIZE - shmem_page_offset;
490 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
491 (page_to_phys(page) & (1 << 17)) != 0;
493 page = obj->pages[i];
494 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
495 (VM_PAGE_TO_PHYS(page) & (1 << 17)) != 0;
498 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
499 user_data, page_do_bit17_swizzling,
505 mutex_unlock(&dev->struct_mutex);
509 ret = fault_in_multipages_writeable(user_data, remain);
510 /* Userspace is tricking us, but we've already clobbered
511 * its pages with the prefault and promised to write the
512 * data up to the first fault. Hence ignore any errors
513 * and just continue. */
519 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
520 user_data, page_do_bit17_swizzling,
523 mutex_lock(&dev->struct_mutex);
527 mark_page_accessed(page);
533 remain -= page_length;
534 user_data += page_length;
535 offset += page_length;
539 i915_gem_object_unpin_pages(obj);
542 /* Fixup: Kill any reinstated backing storage pages */
543 if (obj->madv == __I915_MADV_PURGED)
544 i915_gem_object_truncate(obj);
551 * Reads data from the object referenced by handle.
553 * On error, the contents of *data are undefined.
556 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
557 struct drm_file *file)
559 struct drm_i915_gem_pread *args = data;
560 struct drm_i915_gem_object *obj;
566 ret = i915_mutex_lock_interruptible(dev);
570 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
571 if (&obj->base == NULL) {
576 /* Bounds check source. */
577 if (args->offset > obj->base.size ||
578 args->size > obj->base.size - args->offset) {
583 ret = i915_gem_shmem_pread(dev, obj, args, file);
585 drm_gem_object_unreference(&obj->base);
587 mutex_unlock(&dev->struct_mutex);
591 /* This is the fast write path which cannot handle
592 * page faults in the source data
596 fast_user_write(struct io_mapping *mapping,
597 loff_t page_base, int page_offset,
598 char __user *user_data,
601 void __iomem *vaddr_atomic;
603 unsigned long unwritten;
605 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
606 /* We can use the cpu mem copy function because this is X86. */
607 vaddr = (char __force*)vaddr_atomic + page_offset;
608 unwritten = __copy_from_user_inatomic_nocache(vaddr,
610 io_mapping_unmap_atomic(vaddr_atomic);
615 * This is the fast pwrite path, where we copy the data directly from the
616 * user into the GTT, uncached.
619 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
620 struct drm_i915_gem_object *obj,
621 struct drm_i915_gem_pwrite *args,
622 struct drm_file *file)
624 drm_i915_private_t *dev_priv = dev->dev_private;
626 loff_t offset, page_base;
627 char __user *user_data;
628 int page_offset, page_length, ret;
630 ret = i915_gem_object_pin(obj, 0, true, true);
634 ret = i915_gem_object_set_to_gtt_domain(obj, true);
638 ret = i915_gem_object_put_fence(obj);
642 user_data = to_user_ptr(args->data_ptr);
645 offset = obj->gtt_offset + args->offset;
648 /* Operation in this page
650 * page_base = page offset within aperture
651 * page_offset = offset within page
652 * page_length = bytes to copy for this page
654 page_base = offset & PAGE_MASK;
655 page_offset = offset_in_page(offset);
656 page_length = remain;
657 if ((page_offset + remain) > PAGE_SIZE)
658 page_length = PAGE_SIZE - page_offset;
660 /* If we get a fault while copying data, then (presumably) our
661 * source page isn't available. Return the error and we'll
662 * retry in the slow path.
664 if (fast_user_write(dev_priv->gtt.mappable, page_base,
665 page_offset, user_data, page_length)) {
670 remain -= page_length;
671 user_data += page_length;
672 offset += page_length;
676 i915_gem_object_unpin(obj);
682 /* Per-page copy function for the shmem pwrite fastpath.
683 * Flushes invalid cachelines before writing to the target if
684 * needs_clflush_before is set and flushes out any written cachelines after
685 * writing if needs_clflush is set. */
687 shmem_pwrite_fast(struct vm_page *page, int shmem_page_offset, int page_length,
688 char __user *user_data,
689 bool page_do_bit17_swizzling,
690 bool needs_clflush_before,
691 bool needs_clflush_after)
696 if (unlikely(page_do_bit17_swizzling))
699 vaddr = kmap_atomic(page);
700 if (needs_clflush_before)
701 drm_clflush_virt_range(vaddr + shmem_page_offset,
703 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
706 if (needs_clflush_after)
707 drm_clflush_virt_range(vaddr + shmem_page_offset,
709 kunmap_atomic(vaddr);
711 return ret ? -EFAULT : 0;
714 /* Only difference to the fast-path function is that this can handle bit17
715 * and uses non-atomic copy and kmap functions. */
717 shmem_pwrite_slow(struct vm_page *page, int shmem_page_offset, int page_length,
718 char __user *user_data,
719 bool page_do_bit17_swizzling,
720 bool needs_clflush_before,
721 bool needs_clflush_after)
727 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
728 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
730 page_do_bit17_swizzling);
731 if (page_do_bit17_swizzling)
732 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
736 ret = __copy_from_user(vaddr + shmem_page_offset,
739 if (needs_clflush_after)
740 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
742 page_do_bit17_swizzling);
745 return ret ? -EFAULT : 0;
750 i915_gem_shmem_pwrite(struct drm_device *dev,
751 struct drm_i915_gem_object *obj,
752 struct drm_i915_gem_pwrite *args,
753 struct drm_file *file)
760 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
762 do_bit17_swizzling = 0;
765 vm_obj = obj->base.vm_obj;
768 VM_OBJECT_LOCK(vm_obj);
769 vm_object_pip_add(vm_obj, 1);
770 while (args->size > 0) {
771 obj_pi = OFF_TO_IDX(args->offset);
772 obj_po = args->offset & PAGE_MASK;
774 m = shmem_read_mapping_page(vm_obj, obj_pi);
775 VM_OBJECT_UNLOCK(vm_obj);
777 sf = sf_buf_alloc(m);
778 mkva = sf_buf_kva(sf);
779 length = min(args->size, PAGE_SIZE - obj_po);
781 if (do_bit17_swizzling &&
782 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
783 cnt = roundup2(obj_po + 1, 64);
784 cnt = min(cnt - obj_po, length);
785 swizzled_po = obj_po ^ 64;
788 swizzled_po = obj_po;
790 ret = -copyin_nofault(
791 (void *)(uintptr_t)args->data_ptr,
792 (char *)mkva + swizzled_po, cnt);
795 args->data_ptr += cnt;
802 VM_OBJECT_LOCK(vm_obj);
804 vm_page_reference(m);
805 vm_page_busy_wait(m, FALSE, "i915gem");
806 vm_page_unwire(m, 1);
812 vm_object_pip_wakeup(vm_obj);
813 VM_OBJECT_UNLOCK(vm_obj);
819 * Writes data to the object referenced by handle.
821 * On error, the contents of the buffer that were to be modified are undefined.
824 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
825 struct drm_file *file)
827 struct drm_i915_gem_pwrite *args = data;
828 struct drm_i915_gem_object *obj;
835 if (!access_ok(VERIFY_READ,
836 to_user_ptr(args->data_ptr),
840 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
846 ret = i915_mutex_lock_interruptible(dev);
850 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
851 if (&obj->base == NULL) {
856 /* Bounds check destination. */
857 if (args->offset > obj->base.size ||
858 args->size > obj->base.size - args->offset) {
863 /* prime objects have no backing filp to GEM pread/pwrite
867 if (!obj->base.filp) {
873 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
876 /* We can only do the GTT pwrite on untiled buffers, as otherwise
877 * it would end up going through the fenced access, and we'll get
878 * different detiling behavior between reading and writing.
879 * pread/pwrite currently are reading and writing from the CPU
880 * perspective, requiring manual detiling by the client.
883 ret = i915_gem_phys_pwrite(dev, obj, args, file);
887 if (obj->cache_level == I915_CACHE_NONE &&
888 obj->tiling_mode == I915_TILING_NONE &&
889 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
890 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
891 /* Note that the gtt paths might fail with non-page-backed user
892 * pointers (e.g. gtt mappings when moving data between
893 * textures). Fallback to the shmem path in that case. */
896 if (ret == -EFAULT || ret == -ENOSPC)
897 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
900 drm_gem_object_unreference(&obj->base);
902 mutex_unlock(&dev->struct_mutex);
907 i915_gem_check_wedge(struct i915_gpu_error *error,
910 if (i915_reset_in_progress(error)) {
911 /* Non-interruptible callers can't handle -EAGAIN, hence return
912 * -EIO unconditionally for these. */
916 /* Recovery complete, but the reset failed ... */
917 if (i915_terminally_wedged(error))
927 * Compare seqno against outstanding lazy request. Emit a request if they are
931 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
935 DRM_LOCK_ASSERT(ring->dev);
938 if (seqno == ring->outstanding_lazy_request)
939 ret = i915_add_request(ring, NULL);
945 * __wait_seqno - wait until execution of seqno has finished
946 * @ring: the ring expected to report seqno
948 * @reset_counter: reset sequence associated with the given seqno
949 * @interruptible: do an interruptible wait (normally yes)
950 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
952 * Note: It is of utmost importance that the passed in seqno and reset_counter
953 * values have been read by the caller in an smp safe manner. Where read-side
954 * locks are involved, it is sufficient to read the reset_counter before
955 * unlocking the lock that protects the seqno. For lockless tricks, the
956 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
959 * Returns 0 if the seqno was found within the alloted time. Else returns the
960 * errno with remaining time filled in timeout argument.
962 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
963 unsigned reset_counter,
964 bool interruptible, struct timespec *timeout)
966 drm_i915_private_t *dev_priv = ring->dev->dev_private;
967 struct timespec before, now, wait_time={1,0};
968 unsigned long timeout_jiffies;
970 bool wait_forever = true;
973 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
976 if (timeout != NULL) {
977 wait_time = *timeout;
978 wait_forever = false;
981 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
983 if (WARN_ON(!ring->irq_get(ring)))
986 /* Record current time in case interrupted by signal, or wedged * */
987 getrawmonotonic(&before);
990 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
991 i915_reset_in_progress(&dev_priv->gpu_error) || \
992 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
995 end = wait_event_interruptible_timeout(ring->irq_queue,
999 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1002 /* We need to check whether any gpu reset happened in between
1003 * the caller grabbing the seqno and now ... */
1004 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1007 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1009 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1012 } while (end == 0 && wait_forever);
1014 getrawmonotonic(&now);
1016 ring->irq_put(ring);
1020 struct timespec sleep_time = timespec_sub(now, before);
1021 *timeout = timespec_sub(*timeout, sleep_time);
1022 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1023 set_normalized_timespec(timeout, 0, 0);
1028 case -EAGAIN: /* Wedged */
1029 case -ERESTARTSYS: /* Signal */
1031 case 0: /* Timeout */
1032 return -ETIMEDOUT; /* -ETIME on Linux */
1033 default: /* Completed */
1034 WARN_ON(end < 0); /* We're not aware of other errors */
1040 * Waits for a sequence number to be signaled, and cleans up the
1041 * request and object lists appropriately for that event.
1044 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1046 struct drm_device *dev = ring->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private;
1048 bool interruptible = dev_priv->mm.interruptible;
1051 DRM_LOCK_ASSERT(dev);
1054 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 ret = i915_gem_check_olr(ring, seqno);
1062 return __wait_seqno(ring, seqno,
1063 atomic_read(&dev_priv->gpu_error.reset_counter),
1064 interruptible, NULL);
1068 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1069 struct intel_ring_buffer *ring)
1071 i915_gem_retire_requests_ring(ring);
1073 /* Manually manage the write flush as we may have not yet
1074 * retired the buffer.
1076 * Note that the last_write_seqno is always the earlier of
1077 * the two (read/write) seqno, so if we haved successfully waited,
1078 * we know we have passed the last write.
1080 obj->last_write_seqno = 0;
1081 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1087 * Ensures that all rendering to the object has completed and the object is
1088 * safe to unbind from the GTT or access from the CPU.
1090 static __must_check int
1091 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1094 struct intel_ring_buffer *ring = obj->ring;
1098 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1102 ret = i915_wait_seqno(ring, seqno);
1106 return i915_gem_object_wait_rendering__tail(obj, ring);
1109 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1110 * as the object state may change during this call.
1112 static __must_check int
1113 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1116 struct drm_device *dev = obj->base.dev;
1117 struct drm_i915_private *dev_priv = dev->dev_private;
1118 struct intel_ring_buffer *ring = obj->ring;
1119 unsigned reset_counter;
1123 DRM_LOCK_ASSERT(dev);
1124 BUG_ON(!dev_priv->mm.interruptible);
1126 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1130 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1134 ret = i915_gem_check_olr(ring, seqno);
1138 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1139 mutex_unlock(&dev->struct_mutex);
1140 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1141 mutex_lock(&dev->struct_mutex);
1145 return i915_gem_object_wait_rendering__tail(obj, ring);
1149 * Called when user space prepares to use an object with the CPU, either
1150 * through the mmap ioctl's mapping or a GTT mapping.
1153 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1154 struct drm_file *file)
1156 struct drm_i915_gem_set_domain *args = data;
1157 struct drm_i915_gem_object *obj;
1158 uint32_t read_domains = args->read_domains;
1159 uint32_t write_domain = args->write_domain;
1162 /* Only handle setting domains to types used by the CPU. */
1163 if (write_domain & I915_GEM_GPU_DOMAINS)
1166 if (read_domains & I915_GEM_GPU_DOMAINS)
1169 /* Having something in the write domain implies it's in the read
1170 * domain, and only that read domain. Enforce that in the request.
1172 if (write_domain != 0 && read_domains != write_domain)
1175 ret = i915_mutex_lock_interruptible(dev);
1179 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1180 if (&obj->base == NULL) {
1185 /* Try to flush the object off the GPU without holding the lock.
1186 * We will repeat the flush holding the lock in the normal manner
1187 * to catch cases where we are gazumped.
1189 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1193 if (read_domains & I915_GEM_DOMAIN_GTT) {
1194 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1196 /* Silently promote "you're not bound, there was nothing to do"
1197 * to success, since the client was just asking us to
1198 * make sure everything was done.
1203 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1207 drm_gem_object_unreference(&obj->base);
1209 mutex_unlock(&dev->struct_mutex);
1214 * Called when user space has done writes to this buffer
1217 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1218 struct drm_file *file)
1220 struct drm_i915_gem_sw_finish *args = data;
1221 struct drm_i915_gem_object *obj;
1224 ret = i915_mutex_lock_interruptible(dev);
1227 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1228 if (&obj->base == NULL) {
1233 /* Pinned buffers may be scanout, so flush the cache */
1235 i915_gem_object_flush_cpu_write_domain(obj);
1237 drm_gem_object_unreference(&obj->base);
1239 mutex_unlock(&dev->struct_mutex);
1244 * Maps the contents of an object, returning the address it is mapped
1247 * While the mapping holds a reference on the contents of the object, it doesn't
1248 * imply a ref on the object itself.
1251 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1252 struct drm_file *file)
1254 struct drm_i915_gem_mmap *args = data;
1255 struct drm_gem_object *obj;
1256 struct proc *p = curproc;
1257 vm_map_t map = &p->p_vmspace->vm_map;
1262 obj = drm_gem_object_lookup(dev, file, args->handle);
1266 if (args->size == 0)
1269 size = round_page(args->size);
1270 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
1276 vm_object_hold(obj->vm_obj);
1277 vm_object_reference_locked(obj->vm_obj);
1278 vm_object_drop(obj->vm_obj);
1279 rv = vm_map_find(map, obj->vm_obj, NULL,
1280 args->offset, &addr, args->size,
1281 PAGE_SIZE, /* align */
1283 VM_MAPTYPE_NORMAL, /* maptype */
1284 VM_PROT_READ | VM_PROT_WRITE, /* prot */
1285 VM_PROT_READ | VM_PROT_WRITE, /* max */
1286 MAP_SHARED /* cow */);
1287 if (rv != KERN_SUCCESS) {
1288 vm_object_deallocate(obj->vm_obj);
1289 error = -vm_mmap_to_errno(rv);
1291 args->addr_ptr = (uint64_t)addr;
1294 drm_gem_object_unreference(obj);
1301 * i915_gem_fault - fault a page into the GTT
1302 * vma: VMA in question
1305 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1306 * from userspace. The fault handler takes care of binding the object to
1307 * the GTT (if needed), allocating and programming a fence register (again,
1308 * only if needed based on whether the old reg is still valid or the object
1309 * is tiled) and inserting a new PTE into the faulting process.
1311 * Note that the faulting process may involve evicting existing objects
1312 * from the GTT and/or fence registers to make room. So performance may
1313 * suffer if the GTT working set is large or there are few fence registers
1317 i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
1320 struct drm_gem_object *gem_obj;
1321 struct drm_i915_gem_object *obj;
1322 struct drm_device *dev;
1323 drm_i915_private_t *dev_priv;
1328 gem_obj = vm_obj->handle;
1329 obj = to_intel_bo(gem_obj);
1330 dev = obj->base.dev;
1331 dev_priv = dev->dev_private;
1333 write = (prot & VM_PROT_WRITE) != 0;
1337 vm_object_pip_add(vm_obj, 1);
1340 * Remove the placeholder page inserted by vm_fault() from the
1341 * object before dropping the object lock. If
1342 * i915_gem_release_mmap() is active in parallel on this gem
1343 * object, then it owns the drm device sx and might find the
1344 * placeholder already. Then, since the page is busy,
1345 * i915_gem_release_mmap() sleeps waiting for the busy state
1346 * of the page cleared. We will be not able to acquire drm
1347 * device lock until i915_gem_release_mmap() is able to make a
1350 if (*mres != NULL) {
1352 vm_page_remove(oldm);
1357 VM_OBJECT_UNLOCK(vm_obj);
1363 ret = i915_mutex_lock_interruptible(dev);
1369 mutex_lock(&dev->struct_mutex);
1372 * Since the object lock was dropped, other thread might have
1373 * faulted on the same GTT address and instantiated the
1374 * mapping for the page. Recheck.
1376 VM_OBJECT_LOCK(vm_obj);
1377 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
1379 if ((m->flags & PG_BUSY) != 0) {
1380 mutex_unlock(&dev->struct_mutex);
1382 vm_page_sleep(m, "915pee");
1388 VM_OBJECT_UNLOCK(vm_obj);
1390 /* Access to snoopable pages through the GTT is incoherent. */
1391 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1396 /* Now bind it into the GTT if needed */
1397 if (!obj->map_and_fenceable) {
1398 ret = i915_gem_object_unbind(obj);
1404 if (!obj->gtt_space) {
1405 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1411 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1418 if (obj->tiling_mode == I915_TILING_NONE)
1419 ret = i915_gem_object_put_fence(obj);
1421 ret = i915_gem_object_get_fence(obj);
1427 if (i915_gem_object_is_inactive(obj))
1428 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1430 obj->fault_mappable = true;
1431 VM_OBJECT_LOCK(vm_obj);
1432 m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
1439 KASSERT((m->flags & PG_FICTITIOUS) != 0,
1440 ("not fictitious %p", m));
1441 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
1443 if ((m->flags & PG_BUSY) != 0) {
1444 mutex_unlock(&dev->struct_mutex);
1446 vm_page_sleep(m, "915pbs");
1450 m->valid = VM_PAGE_BITS_ALL;
1451 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
1454 vm_page_busy_try(m, false);
1456 mutex_unlock(&dev->struct_mutex);
1460 vm_object_pip_wakeup(vm_obj);
1461 return (VM_PAGER_OK);
1464 mutex_unlock(&dev->struct_mutex);
1466 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
1467 if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
1468 goto unlocked_vmobj;
1470 VM_OBJECT_LOCK(vm_obj);
1471 vm_object_pip_wakeup(vm_obj);
1472 return (VM_PAGER_ERROR);
1476 * i915_gem_release_mmap - remove physical page mappings
1477 * @obj: obj in question
1479 * Preserve the reservation of the mmapping with the DRM core code, but
1480 * relinquish ownership of the pages back to the system.
1482 * It is vital that we remove the page mapping if we have mapped a tiled
1483 * object through the GTT and then lose the fence register due to
1484 * resource pressure. Similarly if the object has been moved out of the
1485 * aperture, than pages mapped into userspace must be revoked. Removing the
1486 * mapping will then trigger a page fault on the next user access, allowing
1487 * fixup by i915_gem_fault().
1490 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1496 if (!obj->fault_mappable)
1499 devobj = cdev_pager_lookup(obj);
1500 if (devobj != NULL) {
1501 page_count = OFF_TO_IDX(obj->base.size);
1503 VM_OBJECT_LOCK(devobj);
1504 for (i = 0; i < page_count; i++) {
1505 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1508 cdev_pager_free_page(devobj, m);
1510 VM_OBJECT_UNLOCK(devobj);
1511 vm_object_deallocate(devobj);
1514 obj->fault_mappable = false;
1518 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1522 if (INTEL_INFO(dev)->gen >= 4 ||
1523 tiling_mode == I915_TILING_NONE)
1526 /* Previous chips need a power-of-two fence region when tiling */
1527 if (INTEL_INFO(dev)->gen == 3)
1528 gtt_size = 1024*1024;
1530 gtt_size = 512*1024;
1532 while (gtt_size < size)
1539 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1540 * @obj: object to check
1542 * Return the required GTT alignment for an object, taking into account
1543 * potential fence register mapping.
1546 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1547 int tiling_mode, bool fenced)
1551 * Minimum alignment is 4k (GTT page size), but might be greater
1552 * if a fence register is needed for the object.
1554 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1555 tiling_mode == I915_TILING_NONE)
1559 * Previous chips need to be aligned to the size of the smallest
1560 * fence register that can contain the object.
1562 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1566 i915_gem_mmap_gtt(struct drm_file *file,
1567 struct drm_device *dev,
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572 struct drm_i915_gem_object *obj;
1575 ret = i915_mutex_lock_interruptible(dev);
1579 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1580 if (&obj->base == NULL) {
1585 if (obj->base.size > dev_priv->gtt.mappable_end) {
1590 if (obj->madv != I915_MADV_WILLNEED) {
1591 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1596 ret = drm_gem_create_mmap_offset(&obj->base);
1600 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1601 DRM_GEM_MAPPING_KEY;
1603 drm_gem_object_unreference(&obj->base);
1605 mutex_unlock(&dev->struct_mutex);
1610 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1612 * @data: GTT mapping ioctl data
1613 * @file: GEM object info
1615 * Simply returns the fake offset to userspace so it can mmap it.
1616 * The mmap call will end up in drm_gem_mmap(), which will set things
1617 * up so we can get faults in the handler above.
1619 * The fault handler will take care of binding the object into the GTT
1620 * (since it may have been evicted to make room for something), allocating
1621 * a fence register, and mapping the appropriate aperture address into
1625 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1626 struct drm_file *file)
1628 struct drm_i915_gem_mmap_gtt *args = data;
1630 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1633 /* Immediately discard the backing storage */
1635 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1639 vm_obj = obj->base.vm_obj;
1640 VM_OBJECT_LOCK(vm_obj);
1641 vm_object_page_remove(vm_obj, 0, 0, false);
1642 VM_OBJECT_UNLOCK(vm_obj);
1643 obj->madv = __I915_MADV_PURGED;
1647 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1649 return obj->madv == I915_MADV_DONTNEED;
1653 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1658 BUG_ON(obj->madv == __I915_MADV_PURGED);
1660 if (obj->tiling_mode != I915_TILING_NONE)
1661 i915_gem_object_save_bit_17_swizzle(obj);
1662 if (obj->madv == I915_MADV_DONTNEED)
1664 page_count = obj->base.size / PAGE_SIZE;
1665 VM_OBJECT_LOCK(obj->base.vm_obj);
1666 #if GEM_PARANOID_CHECK_GTT
1667 i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1669 for (i = 0; i < page_count; i++) {
1673 if (obj->madv == I915_MADV_WILLNEED)
1674 vm_page_reference(m);
1675 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1676 vm_page_unwire(obj->pages[i], 1);
1677 vm_page_wakeup(obj->pages[i]);
1679 VM_OBJECT_UNLOCK(obj->base.vm_obj);
1681 drm_free(obj->pages, M_DRM);
1686 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1688 const struct drm_i915_gem_object_ops *ops = obj->ops;
1690 if (obj->pages == NULL)
1693 BUG_ON(obj->gtt_space);
1695 if (obj->pages_pin_count)
1698 /* ->put_pages might need to allocate memory for the bit17 swizzle
1699 * array, hence protect them from being reaped by removing them from gtt
1701 list_del(&obj->global_list);
1703 ops->put_pages(obj);
1706 if (i915_gem_object_is_purgeable(obj))
1707 i915_gem_object_truncate(obj);
1713 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1715 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1716 struct drm_device *dev;
1718 int page_count, i, j;
1719 struct vm_page *page;
1721 dev = obj->base.dev;
1722 KASSERT(obj->pages == NULL, ("Obj already has pages"));
1723 page_count = obj->base.size / PAGE_SIZE;
1724 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1727 vm_obj = obj->base.vm_obj;
1728 VM_OBJECT_LOCK(vm_obj);
1730 for (i = 0; i < page_count; i++) {
1731 page = shmem_read_mapping_page(vm_obj, i);
1733 i915_gem_purge(dev_priv, page_count);
1737 obj->pages[i] = page;
1740 VM_OBJECT_UNLOCK(vm_obj);
1741 if (i915_gem_object_needs_bit17_swizzle(obj))
1742 i915_gem_object_do_bit_17_swizzle(obj);
1747 for (j = 0; j < i; j++) {
1748 page = obj->pages[j];
1749 vm_page_busy_wait(page, FALSE, "i915gem");
1750 vm_page_unwire(page, 0);
1751 vm_page_wakeup(page);
1753 VM_OBJECT_UNLOCK(vm_obj);
1754 drm_free(obj->pages, M_DRM);
1759 /* Ensure that the associated pages are gathered from the backing storage
1760 * and pinned into our object. i915_gem_object_get_pages() may be called
1761 * multiple times before they are released by a single call to
1762 * i915_gem_object_put_pages() - once the pages are no longer referenced
1763 * either as a result of memory pressure (reaping pages under the shrinker)
1764 * or as the object is itself released.
1767 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1769 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1770 const struct drm_i915_gem_object_ops *ops = obj->ops;
1776 if (obj->madv != I915_MADV_WILLNEED) {
1777 DRM_ERROR("Attempting to obtain a purgeable object\n");
1781 BUG_ON(obj->pages_pin_count);
1783 ret = ops->get_pages(obj);
1787 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
1792 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1793 struct intel_ring_buffer *ring)
1795 struct drm_device *dev = obj->base.dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 u32 seqno = intel_ring_get_seqno(ring);
1799 BUG_ON(ring == NULL);
1800 if (obj->ring != ring && obj->last_write_seqno) {
1801 /* Keep the seqno relative to the current ring */
1802 obj->last_write_seqno = seqno;
1806 /* Add a reference if we're newly entering the active list. */
1808 drm_gem_object_reference(&obj->base);
1812 /* Move from whatever list we were on to the tail of execution. */
1813 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1814 list_move_tail(&obj->ring_list, &ring->active_list);
1816 obj->last_read_seqno = seqno;
1818 if (obj->fenced_gpu_access) {
1819 obj->last_fenced_seqno = seqno;
1821 /* Bump MRU to take account of the delayed flush */
1822 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1823 struct drm_i915_fence_reg *reg;
1825 reg = &dev_priv->fence_regs[obj->fence_reg];
1826 list_move_tail(®->lru_list,
1827 &dev_priv->mm.fence_list);
1833 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1835 struct drm_device *dev = obj->base.dev;
1836 struct drm_i915_private *dev_priv = dev->dev_private;
1838 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1839 BUG_ON(!obj->active);
1841 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1843 list_del_init(&obj->ring_list);
1846 obj->last_read_seqno = 0;
1847 obj->last_write_seqno = 0;
1848 obj->base.write_domain = 0;
1850 obj->last_fenced_seqno = 0;
1851 obj->fenced_gpu_access = false;
1854 drm_gem_object_unreference(&obj->base);
1856 WARN_ON(i915_verify_lists(dev));
1860 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1862 struct drm_i915_private *dev_priv = dev->dev_private;
1863 struct intel_ring_buffer *ring;
1866 /* Carefully retire all requests without writing to the rings */
1867 for_each_ring(ring, dev_priv, i) {
1868 ret = intel_ring_idle(ring);
1872 i915_gem_retire_requests(dev);
1874 /* Finally reset hw state */
1875 for_each_ring(ring, dev_priv, i) {
1876 intel_ring_init_seqno(ring, seqno);
1878 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1879 ring->sync_seqno[j] = 0;
1885 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1887 struct drm_i915_private *dev_priv = dev->dev_private;
1893 /* HWS page needs to be set less than what we
1894 * will inject to ring
1896 ret = i915_gem_init_seqno(dev, seqno - 1);
1900 /* Carefully set the last_seqno value so that wrap
1901 * detection still works
1903 dev_priv->next_seqno = seqno;
1904 dev_priv->last_seqno = seqno - 1;
1905 if (dev_priv->last_seqno == 0)
1906 dev_priv->last_seqno--;
1912 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1914 struct drm_i915_private *dev_priv = dev->dev_private;
1916 /* reserve 0 for non-seqno */
1917 if (dev_priv->next_seqno == 0) {
1918 int ret = i915_gem_init_seqno(dev, 0);
1922 dev_priv->next_seqno = 1;
1925 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1929 int __i915_add_request(struct intel_ring_buffer *ring,
1930 struct drm_file *file,
1931 struct drm_i915_gem_object *obj,
1934 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1935 struct drm_i915_gem_request *request;
1936 u32 request_ring_position, request_start;
1940 request_start = intel_ring_get_tail(ring);
1942 * Emit any outstanding flushes - execbuf can fail to emit the flush
1943 * after having emitted the batchbuffer command. Hence we need to fix
1944 * things up similar to emitting the lazy request. The difference here
1945 * is that the flush _must_ happen before the next request, no matter
1948 ret = intel_ring_flush_all_caches(ring);
1952 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK);
1953 if (request == NULL)
1957 /* Record the position of the start of the request so that
1958 * should we detect the updated seqno part-way through the
1959 * GPU processing the request, we never over-estimate the
1960 * position of the head.
1962 request_ring_position = intel_ring_get_tail(ring);
1964 ret = ring->add_request(ring);
1970 request->seqno = intel_ring_get_seqno(ring);
1971 request->ring = ring;
1972 request->head = request_start;
1973 request->tail = request_ring_position;
1974 request->ctx = ring->last_context;
1975 request->batch_obj = obj;
1977 /* Whilst this request exists, batch_obj will be on the
1978 * active_list, and so will hold the active reference. Only when this
1979 * request is retired will the the batch_obj be moved onto the
1980 * inactive_list and lose its active reference. Hence we do not need
1981 * to explicitly hold another reference here.
1985 i915_gem_context_reference(request->ctx);
1987 request->emitted_jiffies = jiffies;
1988 was_empty = list_empty(&ring->request_list);
1989 list_add_tail(&request->list, &ring->request_list);
1990 request->file_priv = NULL;
1993 struct drm_i915_file_private *file_priv = file->driver_priv;
1995 spin_lock(&file_priv->mm.lock);
1996 request->file_priv = file_priv;
1997 list_add_tail(&request->client_list,
1998 &file_priv->mm.request_list);
1999 spin_unlock(&file_priv->mm.lock);
2002 ring->outstanding_lazy_request = 0;
2004 if (!dev_priv->mm.suspended) {
2005 if (i915_enable_hangcheck) {
2006 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2007 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2010 queue_delayed_work(dev_priv->wq,
2011 &dev_priv->mm.retire_work,
2012 round_jiffies_up_relative(hz));
2013 intel_mark_busy(dev_priv->dev);
2018 *out_seqno = request->seqno;
2023 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2025 struct drm_i915_file_private *file_priv = request->file_priv;
2030 spin_lock(&file_priv->mm.lock);
2031 if (request->file_priv) {
2032 list_del(&request->client_list);
2033 request->file_priv = NULL;
2035 spin_unlock(&file_priv->mm.lock);
2038 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
2040 if (acthd >= obj->gtt_offset &&
2041 acthd < obj->gtt_offset + obj->base.size)
2047 static bool i915_head_inside_request(const u32 acthd_unmasked,
2048 const u32 request_start,
2049 const u32 request_end)
2051 const u32 acthd = acthd_unmasked & HEAD_ADDR;
2053 if (request_start < request_end) {
2054 if (acthd >= request_start && acthd < request_end)
2056 } else if (request_start > request_end) {
2057 if (acthd >= request_start || acthd < request_end)
2064 static bool i915_request_guilty(struct drm_i915_gem_request *request,
2065 const u32 acthd, bool *inside)
2067 /* There is a possibility that unmasked head address
2068 * pointing inside the ring, matches the batch_obj address range.
2069 * However this is extremely unlikely.
2072 if (request->batch_obj) {
2073 if (i915_head_inside_object(acthd, request->batch_obj)) {
2079 if (i915_head_inside_request(acthd, request->head, request->tail)) {
2087 static void i915_set_reset_status(struct intel_ring_buffer *ring,
2088 struct drm_i915_gem_request *request,
2091 struct i915_ctx_hang_stats *hs = NULL;
2092 bool inside, guilty;
2094 /* Innocent until proven guilty */
2097 if (ring->hangcheck.action != wait &&
2098 i915_request_guilty(request, acthd, &inside)) {
2099 DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
2101 inside ? "inside" : "flushing",
2102 request->batch_obj ?
2103 request->batch_obj->gtt_offset : 0,
2104 request->ctx ? request->ctx->id : 0,
2110 /* If contexts are disabled or this is the default context, use
2111 * file_priv->reset_state
2113 if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2114 hs = &request->ctx->hang_stats;
2115 else if (request->file_priv)
2116 hs = &request->file_priv->hang_stats;
2122 hs->batch_pending++;
2126 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2128 list_del(&request->list);
2129 i915_gem_request_remove_from_client(request);
2132 i915_gem_context_unreference(request->ctx);
2137 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2138 struct intel_ring_buffer *ring)
2140 u32 completed_seqno;
2143 acthd = intel_ring_get_active_head(ring);
2144 completed_seqno = ring->get_seqno(ring, false);
2146 while (!list_empty(&ring->request_list)) {
2147 struct drm_i915_gem_request *request;
2149 request = list_first_entry(&ring->request_list,
2150 struct drm_i915_gem_request,
2153 if (request->seqno > completed_seqno)
2154 i915_set_reset_status(ring, request, acthd);
2156 i915_gem_free_request(request);
2159 while (!list_empty(&ring->active_list)) {
2160 struct drm_i915_gem_object *obj;
2162 obj = list_first_entry(&ring->active_list,
2163 struct drm_i915_gem_object,
2166 i915_gem_object_move_to_inactive(obj);
2170 void i915_gem_restore_fences(struct drm_device *dev)
2172 struct drm_i915_private *dev_priv = dev->dev_private;
2175 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2176 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2179 * Commit delayed tiling changes if we have an object still
2180 * attached to the fence, otherwise just clear the fence.
2183 i915_gem_object_update_fence(reg->obj, reg,
2184 reg->obj->tiling_mode);
2186 i915_gem_write_fence(dev, i, NULL);
2191 void i915_gem_reset(struct drm_device *dev)
2193 struct drm_i915_private *dev_priv = dev->dev_private;
2194 struct drm_i915_gem_object *obj;
2195 struct intel_ring_buffer *ring;
2198 for_each_ring(ring, dev_priv, i)
2199 i915_gem_reset_ring_lists(dev_priv, ring);
2201 /* Move everything out of the GPU domains to ensure we do any
2202 * necessary invalidation upon reuse.
2204 list_for_each_entry(obj,
2205 &dev_priv->mm.inactive_list,
2208 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2211 i915_gem_restore_fences(dev);
2215 * This function clears the request list as sequence numbers are passed.
2218 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2222 if (list_empty(&ring->request_list))
2225 WARN_ON(i915_verify_lists(ring->dev));
2227 seqno = ring->get_seqno(ring, true);
2229 while (!list_empty(&ring->request_list)) {
2230 struct drm_i915_gem_request *request;
2232 request = list_first_entry(&ring->request_list,
2233 struct drm_i915_gem_request,
2236 if (!i915_seqno_passed(seqno, request->seqno))
2239 /* We know the GPU must have read the request to have
2240 * sent us the seqno + interrupt, so use the position
2241 * of tail of the request to update the last known position
2244 ring->last_retired_head = request->tail;
2246 i915_gem_free_request(request);
2249 /* Move any buffers on the active list that are no longer referenced
2250 * by the ringbuffer to the flushing/inactive lists as appropriate.
2252 while (!list_empty(&ring->active_list)) {
2253 struct drm_i915_gem_object *obj;
2255 obj = list_first_entry(&ring->active_list,
2256 struct drm_i915_gem_object,
2259 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2262 i915_gem_object_move_to_inactive(obj);
2265 if (unlikely(ring->trace_irq_seqno &&
2266 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2267 ring->irq_put(ring);
2268 ring->trace_irq_seqno = 0;
2274 i915_gem_retire_requests(struct drm_device *dev)
2276 drm_i915_private_t *dev_priv = dev->dev_private;
2277 struct intel_ring_buffer *ring;
2280 for_each_ring(ring, dev_priv, i)
2281 i915_gem_retire_requests_ring(ring);
2285 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
2286 bool purgeable_only)
2288 struct drm_i915_gem_object *obj, *next;
2291 list_for_each_entry_safe(obj, next,
2292 &dev_priv->mm.unbound_list,
2295 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2296 i915_gem_object_put_pages(obj) == 0) {
2297 count += obj->base.size >> PAGE_SHIFT;
2298 if (count >= target)
2304 list_for_each_entry_safe(obj, next,
2305 &dev_priv->mm.inactive_list,
2308 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
2309 i915_gem_object_unbind(obj) == 0 &&
2310 i915_gem_object_put_pages(obj) == 0) {
2311 count += obj->base.size >> PAGE_SHIFT;
2312 if (count >= target)
2322 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2324 return __i915_gem_shrink(dev_priv, target, true);
2328 i915_gem_retire_work_handler(struct work_struct *work)
2330 drm_i915_private_t *dev_priv;
2331 struct drm_device *dev;
2332 struct intel_ring_buffer *ring;
2336 dev_priv = container_of(work, drm_i915_private_t,
2337 mm.retire_work.work);
2338 dev = dev_priv->dev;
2340 /* Come back later if the device is busy... */
2341 if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT)) {
2342 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2343 round_jiffies_up_relative(hz));
2347 i915_gem_retire_requests(dev);
2349 /* Send a periodic flush down the ring so we don't hold onto GEM
2350 * objects indefinitely.
2353 for_each_ring(ring, dev_priv, i) {
2354 if (ring->gpu_caches_dirty)
2355 i915_add_request(ring, NULL);
2357 idle &= list_empty(&ring->request_list);
2360 if (!dev_priv->mm.suspended && !idle)
2361 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2362 round_jiffies_up_relative(hz));
2364 intel_mark_idle(dev);
2366 mutex_unlock(&dev->struct_mutex);
2369 * Ensures that an object will eventually get non-busy by flushing any required
2370 * write domains, emitting any outstanding lazy request and retiring and
2371 * completed requests.
2374 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2379 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2383 i915_gem_retire_requests_ring(obj->ring);
2390 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2391 * @DRM_IOCTL_ARGS: standard ioctl arguments
2393 * Returns 0 if successful, else an error is returned with the remaining time in
2394 * the timeout parameter.
2395 * -ETIME: object is still busy after timeout
2396 * -ERESTARTSYS: signal interrupted the wait
2397 * -ENONENT: object doesn't exist
2398 * Also possible, but rare:
2399 * -EAGAIN: GPU wedged
2401 * -ENODEV: Internal IRQ fail
2402 * -E?: The add request failed
2404 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2405 * non-zero timeout parameter the wait ioctl will wait for the given number of
2406 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2407 * without holding struct_mutex the object may become re-busied before this
2408 * function completes. A similar but shorter * race condition exists in the busy
2412 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2414 drm_i915_private_t *dev_priv = dev->dev_private;
2415 struct drm_i915_gem_wait *args = data;
2416 struct drm_i915_gem_object *obj;
2417 struct intel_ring_buffer *ring = NULL;
2418 struct timespec timeout_stack, *timeout = NULL;
2419 unsigned reset_counter;
2423 if (args->timeout_ns >= 0) {
2424 timeout_stack = ns_to_timespec(args->timeout_ns);
2425 timeout = &timeout_stack;
2428 ret = i915_mutex_lock_interruptible(dev);
2432 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2433 if (&obj->base == NULL) {
2434 mutex_unlock(&dev->struct_mutex);
2438 /* Need to make sure the object gets inactive eventually. */
2439 ret = i915_gem_object_flush_active(obj);
2444 seqno = obj->last_read_seqno;
2451 /* Do this after OLR check to make sure we make forward progress polling
2452 * on this IOCTL with a 0 timeout (like busy ioctl)
2454 if (!args->timeout_ns) {
2459 drm_gem_object_unreference(&obj->base);
2460 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2461 mutex_unlock(&dev->struct_mutex);
2463 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2465 args->timeout_ns = timespec_to_ns(timeout);
2469 drm_gem_object_unreference(&obj->base);
2470 mutex_unlock(&dev->struct_mutex);
2475 * i915_gem_object_sync - sync an object to a ring.
2477 * @obj: object which may be in use on another ring.
2478 * @to: ring we wish to use the object on. May be NULL.
2480 * This code is meant to abstract object synchronization with the GPU.
2481 * Calling with NULL implies synchronizing the object with the CPU
2482 * rather than a particular GPU ring.
2484 * Returns 0 if successful, else propagates up the lower layer error.
2487 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2488 struct intel_ring_buffer *to)
2490 struct intel_ring_buffer *from = obj->ring;
2494 if (from == NULL || to == from)
2497 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2498 return i915_gem_object_wait_rendering(obj, false);
2500 idx = intel_ring_sync_index(from, to);
2502 seqno = obj->last_read_seqno;
2503 if (seqno <= from->sync_seqno[idx])
2506 ret = i915_gem_check_olr(obj->ring, seqno);
2510 ret = to->sync_to(to, from, seqno);
2512 /* We use last_read_seqno because sync_to()
2513 * might have just caused seqno wrap under
2516 from->sync_seqno[idx] = obj->last_read_seqno;
2521 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2523 u32 old_write_domain, old_read_domains;
2525 /* Force a pagefault for domain tracking on next user access */
2526 i915_gem_release_mmap(obj);
2528 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2531 /* Wait for any direct GTT access to complete */
2534 old_read_domains = obj->base.read_domains;
2535 old_write_domain = obj->base.write_domain;
2537 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2538 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2543 * Unbinds an object from the GTT aperture.
2546 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2548 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2551 if (obj->gtt_space == NULL)
2557 BUG_ON(obj->pages == NULL);
2559 ret = i915_gem_object_finish_gpu(obj);
2562 /* Continue on if we fail due to EIO, the GPU is hung so we
2563 * should be safe and we need to cleanup or else we might
2564 * cause memory corruption through use-after-free.
2567 i915_gem_object_finish_gtt(obj);
2569 /* Move the object to the CPU domain to ensure that
2570 * any possible CPU writes while it's not in the GTT
2571 * are flushed when we go to remap it.
2574 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2575 if (ret == -ERESTARTSYS)
2578 /* In the event of a disaster, abandon all caches and
2579 * hope for the best.
2581 i915_gem_clflush_object(obj);
2582 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2585 /* release the fence reg _after_ flushing */
2586 ret = i915_gem_object_put_fence(obj);
2590 if (obj->has_global_gtt_mapping)
2591 i915_gem_gtt_unbind_object(obj);
2592 if (obj->has_aliasing_ppgtt_mapping) {
2593 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2594 obj->has_aliasing_ppgtt_mapping = 0;
2596 i915_gem_gtt_finish_object(obj);
2598 i915_gem_object_put_pages_gtt(obj);
2600 list_del_init(&obj->global_list);
2601 list_del_init(&obj->mm_list);
2602 /* Avoid an unnecessary call to unbind on rebind. */
2603 obj->map_and_fenceable = true;
2605 drm_mm_put_block(obj->gtt_space);
2606 obj->gtt_space = NULL;
2607 obj->gtt_offset = 0;
2609 if (i915_gem_object_is_purgeable(obj))
2610 i915_gem_object_truncate(obj);
2615 int i915_gpu_idle(struct drm_device *dev)
2617 drm_i915_private_t *dev_priv = dev->dev_private;
2618 struct intel_ring_buffer *ring;
2621 /* Flush everything onto the inactive list. */
2622 for_each_ring(ring, dev_priv, i) {
2623 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2627 ret = intel_ring_idle(ring);
2635 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2636 struct drm_i915_gem_object *obj)
2638 drm_i915_private_t *dev_priv = dev->dev_private;
2640 int fence_pitch_shift;
2642 if (INTEL_INFO(dev)->gen >= 6) {
2643 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2644 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2646 fence_reg = FENCE_REG_965_0;
2647 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2650 fence_reg += reg * 8;
2652 /* To w/a incoherency with non-atomic 64-bit register updates,
2653 * we split the 64-bit update into two 32-bit writes. In order
2654 * for a partial fence not to be evaluated between writes, we
2655 * precede the update with write to turn off the fence register,
2656 * and only enable the fence as the last step.
2658 * For extra levels of paranoia, we make sure each step lands
2659 * before applying the next step.
2661 I915_WRITE(fence_reg, 0);
2662 POSTING_READ(fence_reg);
2665 u32 size = obj->gtt_space->size;
2668 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2670 val |= obj->gtt_offset & 0xfffff000;
2671 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2672 if (obj->tiling_mode == I915_TILING_Y)
2673 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2674 val |= I965_FENCE_REG_VALID;
2676 I915_WRITE(fence_reg + 4, val >> 32);
2677 POSTING_READ(fence_reg + 4);
2679 I915_WRITE(fence_reg + 0, val);
2680 POSTING_READ(fence_reg);
2682 I915_WRITE(fence_reg + 4, 0);
2683 POSTING_READ(fence_reg + 4);
2687 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2688 struct drm_i915_gem_object *obj)
2690 drm_i915_private_t *dev_priv = dev->dev_private;
2694 u32 size = obj->gtt_space->size;
2698 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2699 (size & -size) != size ||
2700 (obj->gtt_offset & (size - 1)),
2701 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2702 obj->gtt_offset, obj->map_and_fenceable, size);
2704 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2709 /* Note: pitch better be a power of two tile widths */
2710 pitch_val = obj->stride / tile_width;
2711 pitch_val = ffs(pitch_val) - 1;
2713 val = obj->gtt_offset;
2714 if (obj->tiling_mode == I915_TILING_Y)
2715 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2716 val |= I915_FENCE_SIZE_BITS(size);
2717 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2718 val |= I830_FENCE_REG_VALID;
2723 reg = FENCE_REG_830_0 + reg * 4;
2725 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2727 I915_WRITE(reg, val);
2731 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2732 struct drm_i915_gem_object *obj)
2734 drm_i915_private_t *dev_priv = dev->dev_private;
2738 u32 size = obj->gtt_space->size;
2741 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2742 (size & -size) != size ||
2743 (obj->gtt_offset & (size - 1)),
2744 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2745 obj->gtt_offset, size);
2747 pitch_val = obj->stride / 128;
2748 pitch_val = ffs(pitch_val) - 1;
2750 val = obj->gtt_offset;
2751 if (obj->tiling_mode == I915_TILING_Y)
2752 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2753 val |= I830_FENCE_SIZE_BITS(size);
2754 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2755 val |= I830_FENCE_REG_VALID;
2759 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2760 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2763 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2765 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2768 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2769 struct drm_i915_gem_object *obj)
2771 struct drm_i915_private *dev_priv = dev->dev_private;
2773 /* Ensure that all CPU reads are completed before installing a fence
2774 * and all writes before removing the fence.
2776 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2779 WARN(obj && (!obj->stride || !obj->tiling_mode),
2780 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2781 obj->stride, obj->tiling_mode);
2783 switch (INTEL_INFO(dev)->gen) {
2787 case 4: i965_write_fence_reg(dev, reg, obj); break;
2788 case 3: i915_write_fence_reg(dev, reg, obj); break;
2789 case 2: i830_write_fence_reg(dev, reg, obj); break;
2793 /* And similarly be paranoid that no direct access to this region
2794 * is reordered to before the fence is installed.
2796 if (i915_gem_object_needs_mb(obj))
2800 static inline int fence_number(struct drm_i915_private *dev_priv,
2801 struct drm_i915_fence_reg *fence)
2803 return fence - dev_priv->fence_regs;
2806 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2807 struct drm_i915_fence_reg *fence,
2810 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2811 int reg = fence_number(dev_priv, fence);
2813 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2816 obj->fence_reg = reg;
2818 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2820 obj->fence_reg = I915_FENCE_REG_NONE;
2822 list_del_init(&fence->lru_list);
2824 obj->fence_dirty = false;
2828 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2830 if (obj->last_fenced_seqno) {
2831 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2835 obj->last_fenced_seqno = 0;
2838 obj->fenced_gpu_access = false;
2843 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2845 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2846 struct drm_i915_fence_reg *fence;
2849 ret = i915_gem_object_wait_fence(obj);
2853 if (obj->fence_reg == I915_FENCE_REG_NONE)
2856 fence = &dev_priv->fence_regs[obj->fence_reg];
2858 i915_gem_object_fence_lost(obj);
2859 i915_gem_object_update_fence(obj, fence, false);
2864 static struct drm_i915_fence_reg *
2865 i915_find_fence_reg(struct drm_device *dev)
2867 struct drm_i915_private *dev_priv = dev->dev_private;
2868 struct drm_i915_fence_reg *reg, *avail;
2871 /* First try to find a free reg */
2873 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2874 reg = &dev_priv->fence_regs[i];
2878 if (!reg->pin_count)
2885 /* None available, try to steal one or wait for a user to finish */
2886 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2897 * i915_gem_object_get_fence - set up fencing for an object
2898 * @obj: object to map through a fence reg
2900 * When mapping objects through the GTT, userspace wants to be able to write
2901 * to them without having to worry about swizzling if the object is tiled.
2902 * This function walks the fence regs looking for a free one for @obj,
2903 * stealing one if it can't find any.
2905 * It then sets up the reg based on the object's properties: address, pitch
2906 * and tiling format.
2908 * For an untiled surface, this removes any existing fence.
2911 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2913 struct drm_device *dev = obj->base.dev;
2914 struct drm_i915_private *dev_priv = dev->dev_private;
2915 bool enable = obj->tiling_mode != I915_TILING_NONE;
2916 struct drm_i915_fence_reg *reg;
2919 /* Have we updated the tiling parameters upon the object and so
2920 * will need to serialise the write to the associated fence register?
2922 if (obj->fence_dirty) {
2923 ret = i915_gem_object_wait_fence(obj);
2928 /* Just update our place in the LRU if our fence is getting reused. */
2929 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2930 reg = &dev_priv->fence_regs[obj->fence_reg];
2931 if (!obj->fence_dirty) {
2932 list_move_tail(®->lru_list,
2933 &dev_priv->mm.fence_list);
2936 } else if (enable) {
2937 reg = i915_find_fence_reg(dev);
2942 struct drm_i915_gem_object *old = reg->obj;
2944 ret = i915_gem_object_wait_fence(old);
2948 i915_gem_object_fence_lost(old);
2953 i915_gem_object_update_fence(obj, reg, enable);
2958 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2959 struct drm_mm_node *gtt_space,
2960 unsigned long cache_level)
2962 struct drm_mm_node *other;
2964 /* On non-LLC machines we have to be careful when putting differing
2965 * types of snoopable memory together to avoid the prefetcher
2966 * crossing memory domains and dying.
2971 if (gtt_space == NULL)
2974 if (list_empty(>t_space->node_list))
2977 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2978 if (other->allocated && !other->hole_follows && other->color != cache_level)
2981 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2982 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2988 static void i915_gem_verify_gtt(struct drm_device *dev)
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 struct drm_i915_gem_object *obj;
2995 list_for_each_entry(obj, &dev_priv->mm.global_list, global_list) {
2996 if (obj->gtt_space == NULL) {
2997 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3002 if (obj->cache_level != obj->gtt_space->color) {
3003 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3004 obj->gtt_space->start,
3005 obj->gtt_space->start + obj->gtt_space->size,
3007 obj->gtt_space->color);
3012 if (!i915_gem_valid_gtt_space(dev,
3014 obj->cache_level)) {
3015 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3016 obj->gtt_space->start,
3017 obj->gtt_space->start + obj->gtt_space->size,
3029 * Finds free space in the GTT aperture and binds the object there.
3032 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3034 bool map_and_fenceable,
3037 struct drm_device *dev = obj->base.dev;
3038 drm_i915_private_t *dev_priv = dev->dev_private;
3039 struct drm_mm_node *node;
3040 u32 size, fence_size, fence_alignment, unfenced_alignment;
3041 bool mappable, fenceable;
3042 size_t gtt_max = map_and_fenceable ?
3043 dev_priv->gtt.mappable_end : dev_priv->gtt.total;
3046 fence_size = i915_gem_get_gtt_size(dev,
3049 fence_alignment = i915_gem_get_gtt_alignment(dev,
3051 obj->tiling_mode, true);
3052 unfenced_alignment =
3053 i915_gem_get_gtt_alignment(dev,
3055 obj->tiling_mode, false);
3058 alignment = map_and_fenceable ? fence_alignment :
3060 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3061 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3065 size = map_and_fenceable ? fence_size : obj->base.size;
3067 /* If the object is bigger than the entire aperture, reject it early
3068 * before evicting everything in a vain attempt to find space.
3070 if (obj->base.size > gtt_max) {
3071 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3073 map_and_fenceable ? "mappable" : "total",
3079 if (map_and_fenceable)
3080 node = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
3081 size, alignment, obj->cache_level,
3082 0, dev_priv->gtt.mappable_end,
3085 node = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
3086 size, alignment, obj->cache_level,
3089 if (map_and_fenceable)
3091 drm_mm_get_block_range_generic(node,
3092 size, alignment, obj->cache_level,
3093 0, dev_priv->gtt.mappable_end,
3097 drm_mm_get_block_generic(node,
3098 size, alignment, obj->cache_level,
3101 if (obj->gtt_space == NULL) {
3102 ret = i915_gem_evict_something(dev, size, alignment,
3113 * NOTE: i915_gem_object_get_pages_gtt() cannot
3114 * return ENOMEM, since we used VM_ALLOC_RETRY.
3116 ret = i915_gem_object_get_pages_gtt(obj);
3118 drm_mm_put_block(obj->gtt_space);
3119 obj->gtt_space = NULL;
3123 i915_gem_gtt_bind_object(obj, obj->cache_level);
3125 i915_gem_object_put_pages_gtt(obj);
3126 drm_mm_put_block(obj->gtt_space);
3127 obj->gtt_space = NULL;
3128 if (i915_gem_evict_everything(dev))
3133 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
3134 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3136 obj->gtt_offset = obj->gtt_space->start;
3139 obj->gtt_space->size == fence_size &&
3140 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
3143 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3145 obj->map_and_fenceable = mappable && fenceable;
3147 trace_i915_gem_object_bind(obj, map_and_fenceable);
3148 i915_gem_verify_gtt(dev);
3153 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3156 /* If we don't have a page list set up, then we're not pinned
3157 * to GPU, and we can ignore the cache flush because it'll happen
3158 * again at bind time.
3160 if (obj->pages == NULL)
3164 * Stolen memory is always coherent with the GPU as it is explicitly
3165 * marked as wc by the system, or the system is cache-coherent.
3170 /* If the GPU is snooping the contents of the CPU cache,
3171 * we do not need to manually clear the CPU cache lines. However,
3172 * the caches are only snooped when the render cache is
3173 * flushed/invalidated. As we always have to emit invalidations
3174 * and flushes when moving into and out of the RENDER domain, correct
3175 * snooping behaviour occurs naturally as the result of our domain
3178 if (obj->cache_level != I915_CACHE_NONE)
3181 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
3184 /** Flushes the GTT write domain for the object if it's dirty. */
3186 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3188 uint32_t old_write_domain;
3190 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3193 /* No actual flushing is required for the GTT write domain. Writes
3194 * to it immediately go to main memory as far as we know, so there's
3195 * no chipset flush. It also doesn't land in render cache.
3197 * However, we do have to enforce the order so that all writes through
3198 * the GTT land before any writes to the device, such as updates to
3203 old_write_domain = obj->base.write_domain;
3204 obj->base.write_domain = 0;
3207 /** Flushes the CPU write domain for the object if it's dirty. */
3209 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3211 uint32_t old_write_domain;
3213 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3216 i915_gem_clflush_object(obj);
3217 i915_gem_chipset_flush(obj->base.dev);
3218 old_write_domain = obj->base.write_domain;
3219 obj->base.write_domain = 0;
3223 * Moves a single object to the GTT read, and possibly write domain.
3225 * This function returns when the move is complete, including waiting on
3229 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3231 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3232 uint32_t old_write_domain, old_read_domains;
3235 /* Not valid to be called on unbound objects. */
3236 if (obj->gtt_space == NULL)
3239 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3242 ret = i915_gem_object_wait_rendering(obj, !write);
3246 i915_gem_object_flush_cpu_write_domain(obj);
3248 /* Serialise direct access to this object with the barriers for
3249 * coherent writes from the GPU, by effectively invalidating the
3250 * GTT domain upon first access.
3252 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3255 old_write_domain = obj->base.write_domain;
3256 old_read_domains = obj->base.read_domains;
3258 /* It should now be out of any other write domains, and we can update
3259 * the domain values for our changes.
3261 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3262 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3264 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3265 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3269 /* And bump the LRU for this access */
3270 if (i915_gem_object_is_inactive(obj))
3271 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3276 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3277 enum i915_cache_level cache_level)
3279 struct drm_device *dev = obj->base.dev;
3280 drm_i915_private_t *dev_priv = dev->dev_private;
3283 if (obj->cache_level == cache_level)
3286 if (obj->pin_count) {
3287 DRM_DEBUG("can not change the cache level of pinned objects\n");
3291 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3292 ret = i915_gem_object_unbind(obj);
3297 if (obj->gtt_space) {
3298 ret = i915_gem_object_finish_gpu(obj);
3302 i915_gem_object_finish_gtt(obj);
3304 /* Before SandyBridge, you could not use tiling or fence
3305 * registers with snooped memory, so relinquish any fences
3306 * currently pointing to our region in the aperture.
3308 if (INTEL_INFO(dev)->gen < 6) {
3309 ret = i915_gem_object_put_fence(obj);
3314 if (obj->has_global_gtt_mapping)
3315 i915_gem_gtt_bind_object(obj, cache_level);
3316 if (obj->has_aliasing_ppgtt_mapping)
3317 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3320 obj->gtt_space->color = cache_level;
3323 if (cache_level == I915_CACHE_NONE) {
3324 u32 old_read_domains, old_write_domain;
3326 /* If we're coming from LLC cached, then we haven't
3327 * actually been tracking whether the data is in the
3328 * CPU cache or not, since we only allow one bit set
3329 * in obj->write_domain and have been skipping the clflushes.
3330 * Just set it to the CPU cache for now.
3332 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3333 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3335 old_read_domains = obj->base.read_domains;
3336 old_write_domain = obj->base.write_domain;
3338 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3339 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3343 obj->cache_level = cache_level;
3344 i915_gem_verify_gtt(dev);
3348 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3349 struct drm_file *file)
3351 struct drm_i915_gem_caching *args = data;
3352 struct drm_i915_gem_object *obj;
3355 ret = i915_mutex_lock_interruptible(dev);
3359 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3360 if (&obj->base == NULL) {
3365 args->caching = obj->cache_level != I915_CACHE_NONE;
3367 drm_gem_object_unreference(&obj->base);
3369 mutex_unlock(&dev->struct_mutex);
3373 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3374 struct drm_file *file)
3376 struct drm_i915_gem_caching *args = data;
3377 struct drm_i915_gem_object *obj;
3378 enum i915_cache_level level;
3381 switch (args->caching) {
3382 case I915_CACHING_NONE:
3383 level = I915_CACHE_NONE;
3385 case I915_CACHING_CACHED:
3386 level = I915_CACHE_LLC;
3392 ret = i915_mutex_lock_interruptible(dev);
3396 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3397 if (&obj->base == NULL) {
3402 ret = i915_gem_object_set_cache_level(obj, level);
3404 drm_gem_object_unreference(&obj->base);
3406 mutex_unlock(&dev->struct_mutex);
3411 * Prepare buffer for display plane (scanout, cursors, etc).
3412 * Can be called from an uninterruptible phase (modesetting) and allows
3413 * any flushes to be pipelined (for pageflips).
3416 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3418 struct intel_ring_buffer *pipelined)
3420 u32 old_read_domains, old_write_domain;
3423 if (pipelined != obj->ring) {
3424 ret = i915_gem_object_sync(obj, pipelined);
3429 /* The display engine is not coherent with the LLC cache on gen6. As
3430 * a result, we make sure that the pinning that is about to occur is
3431 * done with uncached PTEs. This is lowest common denominator for all
3434 * However for gen6+, we could do better by using the GFDT bit instead
3435 * of uncaching, which would allow us to flush all the LLC-cached data
3436 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3438 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3442 /* As the user may map the buffer once pinned in the display plane
3443 * (e.g. libkms for the bootup splash), we have to ensure that we
3444 * always use map_and_fenceable for all scanout buffers.
3446 ret = i915_gem_object_pin(obj, alignment, true, false);
3450 i915_gem_object_flush_cpu_write_domain(obj);
3452 old_write_domain = obj->base.write_domain;
3453 old_read_domains = obj->base.read_domains;
3455 /* It should now be out of any other write domains, and we can update
3456 * the domain values for our changes.
3458 obj->base.write_domain = 0;
3459 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3465 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3469 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3472 ret = i915_gem_object_wait_rendering(obj, false);
3476 /* Ensure that we invalidate the GPU's caches and TLBs. */
3477 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3482 * Moves a single object to the CPU read, and possibly write domain.
3484 * This function returns when the move is complete, including waiting on
3488 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3490 uint32_t old_write_domain, old_read_domains;
3493 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3496 ret = i915_gem_object_wait_rendering(obj, !write);
3500 i915_gem_object_flush_gtt_write_domain(obj);
3502 old_write_domain = obj->base.write_domain;
3503 old_read_domains = obj->base.read_domains;
3505 /* Flush the CPU cache if it's still invalid. */
3506 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3507 i915_gem_clflush_object(obj);
3509 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3512 /* It should now be out of any other write domains, and we can update
3513 * the domain values for our changes.
3515 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3517 /* If we're writing through the CPU, then the GPU read domains will
3518 * need to be invalidated at next use.
3521 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3522 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3528 /* Throttle our rendering by waiting until the ring has completed our requests
3529 * emitted over 20 msec ago.
3531 * Note that if we were to use the current jiffies each time around the loop,
3532 * we wouldn't escape the function with any frames outstanding if the time to
3533 * render a frame was over 20ms.
3535 * This should get us reasonable parallelism between CPU and GPU but also
3536 * relatively low latency when blocking on a particular request to finish.
3539 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3541 struct drm_i915_private *dev_priv = dev->dev_private;
3542 struct drm_i915_file_private *file_priv = file->driver_priv;
3543 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3544 struct drm_i915_gem_request *request;
3545 struct intel_ring_buffer *ring = NULL;
3546 unsigned reset_counter;
3550 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3554 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3558 spin_lock(&file_priv->mm.lock);
3559 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3560 if (time_after_eq(request->emitted_jiffies, recent_enough))
3563 ring = request->ring;
3564 seqno = request->seqno;
3566 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3567 spin_unlock(&file_priv->mm.lock);
3572 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3574 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3580 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3582 bool map_and_fenceable,
3587 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3590 if (obj->gtt_space != NULL) {
3591 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3592 (map_and_fenceable && !obj->map_and_fenceable)) {
3593 WARN(obj->pin_count,
3594 "bo is already pinned with incorrect alignment:"
3595 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3596 " obj->map_and_fenceable=%d\n",
3597 obj->gtt_offset, alignment,
3599 obj->map_and_fenceable);
3600 ret = i915_gem_object_unbind(obj);
3606 if (obj->gtt_space == NULL) {
3607 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3609 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3615 if (!dev_priv->mm.aliasing_ppgtt)
3616 i915_gem_gtt_bind_object(obj, obj->cache_level);
3619 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3620 i915_gem_gtt_bind_object(obj, obj->cache_level);
3623 obj->pin_mappable |= map_and_fenceable;
3629 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3631 BUG_ON(obj->pin_count == 0);
3632 BUG_ON(obj->gtt_space == NULL);
3634 if (--obj->pin_count == 0)
3635 obj->pin_mappable = false;
3639 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3640 struct drm_file *file)
3642 struct drm_i915_gem_pin *args = data;
3643 struct drm_i915_gem_object *obj;
3646 ret = i915_mutex_lock_interruptible(dev);
3650 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3651 if (&obj->base == NULL) {
3656 if (obj->madv != I915_MADV_WILLNEED) {
3657 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3662 if (obj->pin_filp != NULL && obj->pin_filp != file) {
3663 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3669 if (obj->user_pin_count == 0) {
3670 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3675 obj->user_pin_count++;
3676 obj->pin_filp = file;
3678 /* XXX - flush the CPU caches for pinned objects
3679 * as the X server doesn't manage domains yet
3681 i915_gem_object_flush_cpu_write_domain(obj);
3682 args->offset = obj->gtt_offset;
3684 drm_gem_object_unreference(&obj->base);
3686 mutex_unlock(&dev->struct_mutex);
3691 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3692 struct drm_file *file)
3694 struct drm_i915_gem_pin *args = data;
3695 struct drm_i915_gem_object *obj;
3698 ret = i915_mutex_lock_interruptible(dev);
3702 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3703 if (&obj->base == NULL) {
3708 if (obj->pin_filp != file) {
3709 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3714 obj->user_pin_count--;
3715 if (obj->user_pin_count == 0) {
3716 obj->pin_filp = NULL;
3717 i915_gem_object_unpin(obj);
3721 drm_gem_object_unreference(&obj->base);
3723 mutex_unlock(&dev->struct_mutex);
3728 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3729 struct drm_file *file)
3731 struct drm_i915_gem_busy *args = data;
3732 struct drm_i915_gem_object *obj;
3735 ret = i915_mutex_lock_interruptible(dev);
3739 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3740 if (&obj->base == NULL) {
3745 /* Count all active objects as busy, even if they are currently not used
3746 * by the gpu. Users of this interface expect objects to eventually
3747 * become non-busy without any further actions, therefore emit any
3748 * necessary flushes here.
3750 ret = i915_gem_object_flush_active(obj);
3752 args->busy = obj->active;
3754 args->busy |= intel_ring_flag(obj->ring) << 16;
3757 drm_gem_object_unreference(&obj->base);
3759 mutex_unlock(&dev->struct_mutex);
3764 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3765 struct drm_file *file_priv)
3767 return i915_gem_ring_throttle(dev, file_priv);
3771 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3772 struct drm_file *file_priv)
3774 struct drm_i915_gem_madvise *args = data;
3775 struct drm_i915_gem_object *obj;
3778 switch (args->madv) {
3779 case I915_MADV_DONTNEED:
3780 case I915_MADV_WILLNEED:
3786 ret = i915_mutex_lock_interruptible(dev);
3790 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3791 if (&obj->base == NULL) {
3796 if (obj->pin_count) {
3801 if (obj->madv != __I915_MADV_PURGED)
3802 obj->madv = args->madv;
3804 /* if the object is no longer attached, discard its backing storage */
3805 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3806 i915_gem_object_truncate(obj);
3808 args->retained = obj->madv != __I915_MADV_PURGED;
3811 drm_gem_object_unreference(&obj->base);
3813 mutex_unlock(&dev->struct_mutex);
3817 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3818 const struct drm_i915_gem_object_ops *ops)
3820 INIT_LIST_HEAD(&obj->mm_list);
3821 INIT_LIST_HEAD(&obj->global_list);
3822 INIT_LIST_HEAD(&obj->ring_list);
3823 INIT_LIST_HEAD(&obj->exec_list);
3827 obj->fence_reg = I915_FENCE_REG_NONE;
3828 obj->madv = I915_MADV_WILLNEED;
3829 /* Avoid an unnecessary call to unbind on the first bind. */
3830 obj->map_and_fenceable = true;
3832 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3835 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3836 .get_pages = i915_gem_object_get_pages_gtt,
3837 .put_pages = i915_gem_object_put_pages_gtt,
3840 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3843 struct drm_i915_gem_object *obj;
3845 struct address_space *mapping;
3849 obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3853 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3859 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3860 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3861 /* 965gm cannot relocate objects above 4GiB. */
3862 mask &= ~__GFP_HIGHMEM;
3863 mask |= __GFP_DMA32;
3866 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3867 mapping_set_gfp_mask(mapping, mask);
3870 i915_gem_object_init(obj, &i915_gem_object_ops);
3872 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3873 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3876 /* On some devices, we can have the GPU use the LLC (the CPU
3877 * cache) for about a 10% performance improvement
3878 * compared to uncached. Graphics requests other than
3879 * display scanout are coherent with the CPU in
3880 * accessing this cache. This means in this mode we
3881 * don't need to clflush on the CPU side, and on the
3882 * GPU side we only need to flush internal caches to
3883 * get data visible to the CPU.
3885 * However, we maintain the display planes as UC, and so
3886 * need to rebind when first used as such.
3888 obj->cache_level = I915_CACHE_LLC;
3890 obj->cache_level = I915_CACHE_NONE;
3895 int i915_gem_init_object(struct drm_gem_object *obj)
3902 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3904 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3905 struct drm_device *dev = obj->base.dev;
3906 drm_i915_private_t *dev_priv = dev->dev_private;
3909 i915_gem_detach_phys_object(dev, obj);
3912 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3913 bool was_interruptible;
3915 was_interruptible = dev_priv->mm.interruptible;
3916 dev_priv->mm.interruptible = false;
3918 WARN_ON(i915_gem_object_unbind(obj));
3920 dev_priv->mm.interruptible = was_interruptible;
3923 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
3924 * before progressing. */
3926 i915_gem_object_unpin_pages(obj);
3928 if (WARN_ON(obj->pages_pin_count))
3929 obj->pages_pin_count = 0;
3930 i915_gem_object_put_pages(obj);
3931 drm_gem_free_mmap_offset(&obj->base);
3935 drm_gem_object_release(&obj->base);
3936 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3939 i915_gem_object_free(obj);
3943 i915_gem_idle(struct drm_device *dev)
3945 drm_i915_private_t *dev_priv = dev->dev_private;
3948 mutex_lock(&dev->struct_mutex);
3950 if (dev_priv->mm.suspended) {
3951 mutex_unlock(&dev->struct_mutex);
3955 ret = i915_gpu_idle(dev);
3957 mutex_unlock(&dev->struct_mutex);
3960 i915_gem_retire_requests(dev);
3962 /* Under UMS, be paranoid and evict. */
3963 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3964 i915_gem_evict_everything(dev);
3966 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3967 * We need to replace this with a semaphore, or something.
3968 * And not confound mm.suspended!
3970 dev_priv->mm.suspended = 1;
3971 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3973 i915_kernel_lost_context(dev);
3974 i915_gem_cleanup_ringbuffer(dev);
3976 mutex_unlock(&dev->struct_mutex);
3978 /* Cancel the retire work handler, which should be idle now. */
3979 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3984 void i915_gem_l3_remap(struct drm_device *dev)
3986 drm_i915_private_t *dev_priv = dev->dev_private;
3990 if (!HAS_L3_GPU_CACHE(dev))
3993 if (!dev_priv->l3_parity.remap_info)
3996 misccpctl = I915_READ(GEN7_MISCCPCTL);
3997 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3998 POSTING_READ(GEN7_MISCCPCTL);
4000 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4001 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
4002 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
4003 DRM_DEBUG("0x%x was already programmed to %x\n",
4004 GEN7_L3LOG_BASE + i, remap);
4005 if (remap && !dev_priv->l3_parity.remap_info[i/4])
4006 DRM_DEBUG_DRIVER("Clearing remapped register\n");
4007 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
4010 /* Make sure all the writes land before disabling dop clock gating */
4011 POSTING_READ(GEN7_L3LOG_BASE);
4013 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4016 void i915_gem_init_swizzling(struct drm_device *dev)
4018 drm_i915_private_t *dev_priv = dev->dev_private;
4020 if (INTEL_INFO(dev)->gen < 5 ||
4021 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4024 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4025 DISP_TILE_SURFACE_SWIZZLING);
4030 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4032 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4033 else if (IS_GEN7(dev))
4034 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4040 intel_enable_blt(struct drm_device *dev)
4047 /* The blitter was dysfunctional on early prototypes */
4048 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
4049 if (IS_GEN6(dev) && revision < 8) {
4050 DRM_INFO("BLT not supported on this pre-production hardware;"
4051 " graphics performance will be degraded.\n");
4058 static int i915_gem_init_rings(struct drm_device *dev)
4060 struct drm_i915_private *dev_priv = dev->dev_private;
4063 ret = intel_init_render_ring_buffer(dev);
4068 ret = intel_init_bsd_ring_buffer(dev);
4070 goto cleanup_render_ring;
4073 if (intel_enable_blt(dev)) {
4074 ret = intel_init_blt_ring_buffer(dev);
4076 goto cleanup_bsd_ring;
4079 if (HAS_VEBOX(dev)) {
4080 ret = intel_init_vebox_ring_buffer(dev);
4082 goto cleanup_blt_ring;
4086 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4088 goto cleanup_vebox_ring;
4093 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4095 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4097 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4098 cleanup_render_ring:
4099 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4105 i915_gem_init_hw(struct drm_device *dev)
4107 drm_i915_private_t *dev_priv = dev->dev_private;
4111 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4115 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4116 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4118 if (HAS_PCH_NOP(dev)) {
4119 u32 temp = I915_READ(GEN7_MSG_CTL);
4120 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4121 I915_WRITE(GEN7_MSG_CTL, temp);
4124 i915_gem_l3_remap(dev);
4126 i915_gem_init_swizzling(dev);
4128 ret = i915_gem_init_rings(dev);
4133 * XXX: There was some w/a described somewhere suggesting loading
4134 * contexts before PPGTT.
4136 i915_gem_context_init(dev);
4137 if (dev_priv->mm.aliasing_ppgtt) {
4138 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4140 i915_gem_cleanup_aliasing_ppgtt(dev);
4141 DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4148 int i915_gem_init(struct drm_device *dev)
4150 struct drm_i915_private *dev_priv = dev->dev_private;
4153 mutex_lock(&dev->struct_mutex);
4155 if (IS_VALLEYVIEW(dev)) {
4156 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4157 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4158 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4159 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4162 i915_gem_init_global_gtt(dev);
4164 ret = i915_gem_init_hw(dev);
4165 mutex_unlock(&dev->struct_mutex);
4167 i915_gem_cleanup_aliasing_ppgtt(dev);
4171 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4172 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4173 dev_priv->dri1.allow_batchbuffer = 1;
4178 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4180 drm_i915_private_t *dev_priv = dev->dev_private;
4181 struct intel_ring_buffer *ring;
4184 for_each_ring(ring, dev_priv, i)
4185 intel_cleanup_ring_buffer(ring);
4189 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4190 struct drm_file *file_priv)
4192 drm_i915_private_t *dev_priv = dev->dev_private;
4195 if (drm_core_check_feature(dev, DRIVER_MODESET))
4198 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4199 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4200 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4203 mutex_lock(&dev->struct_mutex);
4204 dev_priv->mm.suspended = 0;
4206 ret = i915_gem_init_hw(dev);
4208 mutex_unlock(&dev->struct_mutex);
4212 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
4213 mutex_unlock(&dev->struct_mutex);
4215 ret = drm_irq_install(dev);
4217 goto cleanup_ringbuffer;
4222 mutex_lock(&dev->struct_mutex);
4223 i915_gem_cleanup_ringbuffer(dev);
4224 dev_priv->mm.suspended = 1;
4225 mutex_unlock(&dev->struct_mutex);
4231 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4232 struct drm_file *file_priv)
4234 if (drm_core_check_feature(dev, DRIVER_MODESET))
4237 drm_irq_uninstall(dev);
4238 return i915_gem_idle(dev);
4242 i915_gem_lastclose(struct drm_device *dev)
4246 if (drm_core_check_feature(dev, DRIVER_MODESET))
4249 ret = i915_gem_idle(dev);
4251 DRM_ERROR("failed to idle hardware: %d\n", ret);
4255 init_ring_lists(struct intel_ring_buffer *ring)
4257 INIT_LIST_HEAD(&ring->active_list);
4258 INIT_LIST_HEAD(&ring->request_list);
4262 i915_gem_load(struct drm_device *dev)
4265 drm_i915_private_t *dev_priv = dev->dev_private;
4267 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4268 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4269 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4270 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4271 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4272 for (i = 0; i < I915_NUM_RINGS; i++)
4273 init_ring_lists(&dev_priv->ring[i]);
4274 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4275 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4276 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4277 i915_gem_retire_work_handler);
4278 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4280 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4282 I915_WRITE(MI_ARB_STATE,
4283 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4286 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4288 /* Old X drivers will take 0-2 for front, back, depth buffers */
4289 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4290 dev_priv->fence_reg_start = 3;
4292 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4293 dev_priv->num_fence_regs = 32;
4294 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4295 dev_priv->num_fence_regs = 16;
4297 dev_priv->num_fence_regs = 8;
4299 /* Initialize fence registers to zero */
4300 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4301 i915_gem_restore_fences(dev);
4303 i915_gem_detect_bit_6_swizzle(dev);
4304 init_waitqueue_head(&dev_priv->pending_flip_queue);
4306 dev_priv->mm.interruptible = true;
4309 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4310 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4311 register_shrinker(&dev_priv->mm.inactive_shrinker);
4313 dev_priv->mm.inactive_shrinker = EVENTHANDLER_REGISTER(vm_lowmem,
4314 i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
4319 * Create a physically contiguous memory object for this object
4320 * e.g. for cursor + overlay regs
4322 static int i915_gem_init_phys_object(struct drm_device *dev,
4323 int id, int size, int align)
4325 drm_i915_private_t *dev_priv = dev->dev_private;
4326 struct drm_i915_gem_phys_object *phys_obj;
4329 if (dev_priv->mm.phys_objs[id - 1] || !size)
4332 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4338 phys_obj->handle = drm_pci_alloc(dev, size, align);
4339 if (!phys_obj->handle) {
4343 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
4344 size / PAGE_SIZE, PAT_WRITE_COMBINING);
4346 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4355 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4357 drm_i915_private_t *dev_priv = dev->dev_private;
4358 struct drm_i915_gem_phys_object *phys_obj;
4360 if (!dev_priv->mm.phys_objs[id - 1])
4363 phys_obj = dev_priv->mm.phys_objs[id - 1];
4364 if (phys_obj->cur_obj) {
4365 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4368 drm_pci_free(dev, phys_obj->handle);
4370 dev_priv->mm.phys_objs[id - 1] = NULL;
4373 void i915_gem_free_all_phys_object(struct drm_device *dev)
4377 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4378 i915_gem_free_phys_object(dev, i);
4381 void i915_gem_detach_phys_object(struct drm_device *dev,
4382 struct drm_i915_gem_object *obj)
4384 struct vm_object *mapping = obj->base.vm_obj;
4391 vaddr = obj->phys_obj->handle->vaddr;
4393 page_count = obj->base.size / PAGE_SIZE;
4394 VM_OBJECT_LOCK(obj->base.vm_obj);
4395 for (i = 0; i < page_count; i++) {
4396 struct vm_page *page = shmem_read_mapping_page(mapping, i);
4397 if (!IS_ERR(page)) {
4398 VM_OBJECT_UNLOCK(obj->base.vm_obj);
4399 char *dst = kmap_atomic(page);
4400 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4403 drm_clflush_pages(&page, 1);
4406 set_page_dirty(page);
4407 mark_page_accessed(page);
4408 page_cache_release(page);
4410 VM_OBJECT_LOCK(obj->base.vm_obj);
4411 vm_page_reference(page);
4412 vm_page_dirty(page);
4413 vm_page_busy_wait(page, FALSE, "i915gem");
4414 vm_page_unwire(page, 0);
4415 vm_page_wakeup(page);
4418 VM_OBJECT_UNLOCK(obj->base.vm_obj);
4419 intel_gtt_chipset_flush();
4421 obj->phys_obj->cur_obj = NULL;
4422 obj->phys_obj = NULL;
4426 i915_gem_attach_phys_object(struct drm_device *dev,
4427 struct drm_i915_gem_object *obj,
4431 struct vm_object *mapping = obj->base.vm_obj;
4432 drm_i915_private_t *dev_priv = dev->dev_private;
4437 if (id > I915_MAX_PHYS_OBJECT)
4440 if (obj->phys_obj) {
4441 if (obj->phys_obj->id == id)
4443 i915_gem_detach_phys_object(dev, obj);
4446 /* create a new object */
4447 if (!dev_priv->mm.phys_objs[id - 1]) {
4448 ret = i915_gem_init_phys_object(dev, id,
4449 obj->base.size, align);
4451 DRM_ERROR("failed to init phys object %d size: %zu\n",
4452 id, obj->base.size);
4457 /* bind to the object */
4458 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4459 obj->phys_obj->cur_obj = obj;
4461 page_count = obj->base.size / PAGE_SIZE;
4463 VM_OBJECT_LOCK(obj->base.vm_obj);
4464 for (i = 0; i < page_count; i++) {
4465 struct vm_page *page;
4468 page = shmem_read_mapping_page(mapping, i);
4469 VM_OBJECT_UNLOCK(obj->base.vm_obj);
4471 return PTR_ERR(page);
4473 src = kmap_atomic(page);
4474 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4475 memcpy(dst, src, PAGE_SIZE);
4479 mark_page_accessed(page);
4480 page_cache_release(page);
4482 VM_OBJECT_LOCK(obj->base.vm_obj);
4483 vm_page_reference(page);
4484 vm_page_busy_wait(page, FALSE, "i915gem");
4485 vm_page_unwire(page, 0);
4486 vm_page_wakeup(page);
4488 VM_OBJECT_UNLOCK(obj->base.vm_obj);
4494 i915_gem_phys_pwrite(struct drm_device *dev,
4495 struct drm_i915_gem_object *obj,
4496 struct drm_i915_gem_pwrite *args,
4497 struct drm_file *file_priv)
4499 void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
4500 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4502 if (copyin_nofault(user_data, vaddr, args->size) != 0) {
4503 unsigned long unwritten;
4505 /* The physical object once assigned is fixed for the lifetime
4506 * of the obj, so we can safely drop the lock and continue
4509 mutex_unlock(&dev->struct_mutex);
4510 unwritten = copy_from_user(vaddr, user_data, args->size);
4511 mutex_lock(&dev->struct_mutex);
4516 i915_gem_chipset_flush(dev);
4520 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4522 struct drm_i915_file_private *file_priv = file->driver_priv;
4524 /* Clean up our request list when the client is going away, so that
4525 * later retire_requests won't dereference our soon-to-be-gone
4528 spin_lock(&file_priv->mm.lock);
4529 while (!list_empty(&file_priv->mm.request_list)) {
4530 struct drm_i915_gem_request *request;
4532 request = list_first_entry(&file_priv->mm.request_list,
4533 struct drm_i915_gem_request,
4535 list_del(&request->client_list);
4536 request->file_priv = NULL;
4538 spin_unlock(&file_priv->mm.lock);
4542 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
4543 vm_ooffset_t foff, struct ucred *cred, u_short *color)
4546 *color = 0; /* XXXKIB */
4551 i915_gem_pager_dtor(void *handle)
4553 struct drm_gem_object *obj;
4554 struct drm_device *dev;
4559 mutex_lock(&dev->struct_mutex);
4560 drm_gem_free_mmap_offset(obj);
4561 i915_gem_release_mmap(to_intel_bo(obj));
4562 drm_gem_object_unreference(obj);
4563 mutex_unlock(&dev->struct_mutex);
4566 #define GEM_PARANOID_CHECK_GTT 0
4567 #if GEM_PARANOID_CHECK_GTT
4569 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
4572 struct drm_i915_private *dev_priv;
4574 unsigned long start, end;
4578 dev_priv = dev->dev_private;
4579 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
4580 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
4581 for (i = start; i < end; i++) {
4582 pa = intel_gtt_read_pte_paddr(i);
4583 for (j = 0; j < page_count; j++) {
4584 if (pa == VM_PAGE_TO_PHYS(ma[j])) {
4585 panic("Page %p in GTT pte index %d pte %x",
4586 ma[i], i, intel_gtt_read_pte(i));
4590 obj->fence_dirty = false;
4595 i915_gpu_is_active(struct drm_device *dev)
4597 drm_i915_private_t *dev_priv = dev->dev_private;
4599 return !list_empty(&dev_priv->mm.active_list);
4603 i915_gem_lowmem(void *arg)
4605 struct drm_device *dev;
4606 struct drm_i915_private *dev_priv;
4607 struct drm_i915_gem_object *obj, *next;
4608 int cnt, cnt_fail, cnt_total;
4611 dev_priv = dev->dev_private;
4613 if (lockmgr(&dev->struct_mutex, LK_EXCLUSIVE|LK_NOWAIT))
4617 /* first scan for clean buffers */
4618 i915_gem_retire_requests(dev);
4620 cnt_total = cnt_fail = cnt = 0;
4622 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4624 if (i915_gem_object_is_purgeable(obj)) {
4625 if (i915_gem_object_unbind(obj) != 0)
4631 /* second pass, evict/count anything still on the inactive list */
4632 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4634 if (i915_gem_object_unbind(obj) == 0)
4640 if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4642 * We are desperate for pages, so as a last resort, wait
4643 * for the GPU to finish and discard whatever we can.
4644 * This has a dramatic impact to reduce the number of
4645 * OOM-killer events whilst running the GPU aggressively.
4647 if (i915_gpu_idle(dev) == 0)
4650 mutex_unlock(&dev->struct_mutex);