2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
59 #include <drm/i915_drm.h>
61 #include "intel_drv.h"
62 #include <linux/shmem_fs.h>
63 #include <linux/completion.h>
64 #include <linux/highmem.h>
65 #include <linux/jiffies.h>
66 #include <linux/time.h>
68 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
69 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
70 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
72 bool map_and_fenceable,
74 static int i915_gem_phys_pwrite(struct drm_device *dev,
75 struct drm_i915_gem_object *obj,
76 struct drm_i915_gem_pwrite *args,
77 struct drm_file *file);
79 static void i915_gem_write_fence(struct drm_device *dev, int reg,
80 struct drm_i915_gem_object *obj);
81 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
82 struct drm_i915_fence_reg *fence,
85 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
87 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
88 uint32_t size, int tiling_mode);
89 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
90 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
95 i915_gem_release_mmap(obj);
97 /* As we do not have an associated fence register, we will force
98 * a tiling change if we ever need to acquire one.
100 obj->fence_dirty = false;
101 obj->fence_reg = I915_FENCE_REG_NONE;
104 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
105 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
106 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
107 static void i915_gem_reset_fences(struct drm_device *dev);
108 static void i915_gem_lowmem(void *arg);
110 /* some bookkeeping */
111 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
114 dev_priv->mm.object_count++;
115 dev_priv->mm.object_memory += size;
118 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
121 dev_priv->mm.object_count--;
122 dev_priv->mm.object_memory -= size;
126 i915_gem_wait_for_error(struct drm_device *dev)
128 struct drm_i915_private *dev_priv = dev->dev_private;
129 struct completion *x = &dev_priv->error_completion;
132 if (!atomic_read(&dev_priv->mm.wedged))
136 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
137 * userspace. If it takes that long something really bad is going on and
138 * we should simply try to bail out and fail as gracefully as possible.
140 ret = wait_for_completion_interruptible_timeout(x, 10*hz);
142 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
144 } else if (ret < 0) {
148 if (atomic_read(&dev_priv->mm.wedged)) {
149 /* GPU is hung, bump the completion count to account for
150 * the token we just consumed so that we never hit zero and
151 * end up waiting upon a subsequent completion event that
154 lockmgr(&x->wait.lock, LK_EXCLUSIVE);
156 lockmgr(&x->wait.lock, LK_RELEASE);
161 int i915_mutex_lock_interruptible(struct drm_device *dev)
165 ret = i915_gem_wait_for_error(dev);
169 ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL);
173 WARN_ON(i915_verify_lists(dev));
178 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
184 i915_gem_init_ioctl(struct drm_device *dev, void *data,
185 struct drm_file *file)
187 struct drm_i915_gem_init *args = data;
189 if (drm_core_check_feature(dev, DRIVER_MODESET))
192 if (args->gtt_start >= args->gtt_end ||
193 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
196 /* GEM with user mode setting was never supported on ilk and later. */
197 if (INTEL_INFO(dev)->gen >= 5)
200 lockmgr(&dev->dev_lock, LK_EXCLUSIVE|LK_RETRY|LK_CANRECURSE);
201 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
202 lockmgr(&dev->dev_lock, LK_RELEASE);
208 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *file)
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 struct drm_i915_gem_get_aperture *args = data;
213 struct drm_i915_gem_object *obj;
218 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
220 pinned += obj->gtt_space->size;
223 args->aper_size = dev_priv->mm.gtt_total;
224 args->aper_available_size = args->aper_size - pinned;
230 i915_gem_create(struct drm_file *file,
231 struct drm_device *dev,
235 struct drm_i915_gem_object *obj;
239 size = roundup(size, PAGE_SIZE);
243 /* Allocate the new object */
244 obj = i915_gem_alloc_object(dev, size);
249 ret = drm_gem_handle_create(file, &obj->base, &handle);
251 drm_gem_object_release(&obj->base);
252 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
253 drm_free(obj, M_DRM);
257 /* drop reference from allocate - handle holds it now */
258 drm_gem_object_unreference(&obj->base);
264 i915_gem_dumb_create(struct drm_file *file,
265 struct drm_device *dev,
266 struct drm_mode_create_dumb *args)
269 /* have to work out size/pitch and return them */
270 args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
271 args->size = args->pitch * args->height;
272 return i915_gem_create(file, dev,
273 args->size, &args->handle);
276 int i915_gem_dumb_destroy(struct drm_file *file,
277 struct drm_device *dev,
281 return drm_gem_handle_delete(file, handle);
285 * Creates a new mm object and returns a handle to it.
288 i915_gem_create_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *file)
291 struct drm_i915_gem_create *args = data;
293 return i915_gem_create(file, dev,
294 args->size, &args->handle);
297 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
299 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
301 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
302 obj->tiling_mode != I915_TILING_NONE;
305 static inline void vm_page_reference(vm_page_t m)
307 vm_page_flag_set(m, PG_REFERENCED);
311 i915_gem_shmem_pread(struct drm_device *dev,
312 struct drm_i915_gem_object *obj,
313 struct drm_i915_gem_pread *args,
314 struct drm_file *file)
321 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
323 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
326 vm_obj = obj->base.vm_obj;
329 VM_OBJECT_LOCK(vm_obj);
330 vm_object_pip_add(vm_obj, 1);
331 while (args->size > 0) {
332 obj_pi = OFF_TO_IDX(args->offset);
333 obj_po = args->offset & PAGE_MASK;
335 m = shmem_read_mapping_page(vm_obj, obj_pi);
336 VM_OBJECT_UNLOCK(vm_obj);
338 sf = sf_buf_alloc(m);
339 mkva = sf_buf_kva(sf);
340 length = min(args->size, PAGE_SIZE - obj_po);
342 if (do_bit17_swizzling &&
343 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
344 cnt = roundup2(obj_po + 1, 64);
345 cnt = min(cnt - obj_po, length);
346 swizzled_po = obj_po ^ 64;
349 swizzled_po = obj_po;
351 ret = -copyout_nofault(
352 (char *)mkva + swizzled_po,
353 (void *)(uintptr_t)args->data_ptr, cnt);
356 args->data_ptr += cnt;
363 VM_OBJECT_LOCK(vm_obj);
364 vm_page_reference(m);
365 vm_page_busy_wait(m, FALSE, "i915gem");
366 vm_page_unwire(m, 1);
372 vm_object_pip_wakeup(vm_obj);
373 VM_OBJECT_UNLOCK(vm_obj);
379 * Reads data from the object referenced by handle.
381 * On error, the contents of *data are undefined.
384 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
385 struct drm_file *file)
387 struct drm_i915_gem_pread *args = data;
388 struct drm_i915_gem_object *obj;
394 ret = i915_mutex_lock_interruptible(dev);
398 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
399 if (&obj->base == NULL) {
404 /* Bounds check source. */
405 if (args->offset > obj->base.size ||
406 args->size > obj->base.size - args->offset) {
411 ret = i915_gem_shmem_pread(dev, obj, args, file);
413 drm_gem_object_unreference(&obj->base);
420 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
421 uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
427 * Pass the unaligned physical address and size to pmap_mapdev_attr()
428 * so it can properly calculate whether an extra page needs to be
429 * mapped or not to cover the requested range. The function will
430 * add the page offset into the returned mkva for us.
432 mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
433 offset, size, PAT_WRITE_COMBINING);
434 ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
435 pmap_unmapdev(mkva, size);
440 i915_gem_shmem_pwrite(struct drm_device *dev,
441 struct drm_i915_gem_object *obj,
442 struct drm_i915_gem_pwrite *args,
443 struct drm_file *file)
450 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
452 do_bit17_swizzling = 0;
455 vm_obj = obj->base.vm_obj;
458 VM_OBJECT_LOCK(vm_obj);
459 vm_object_pip_add(vm_obj, 1);
460 while (args->size > 0) {
461 obj_pi = OFF_TO_IDX(args->offset);
462 obj_po = args->offset & PAGE_MASK;
464 m = shmem_read_mapping_page(vm_obj, obj_pi);
465 VM_OBJECT_UNLOCK(vm_obj);
467 sf = sf_buf_alloc(m);
468 mkva = sf_buf_kva(sf);
469 length = min(args->size, PAGE_SIZE - obj_po);
471 if (do_bit17_swizzling &&
472 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
473 cnt = roundup2(obj_po + 1, 64);
474 cnt = min(cnt - obj_po, length);
475 swizzled_po = obj_po ^ 64;
478 swizzled_po = obj_po;
480 ret = -copyin_nofault(
481 (void *)(uintptr_t)args->data_ptr,
482 (char *)mkva + swizzled_po, cnt);
485 args->data_ptr += cnt;
492 VM_OBJECT_LOCK(vm_obj);
494 vm_page_reference(m);
495 vm_page_busy_wait(m, FALSE, "i915gem");
496 vm_page_unwire(m, 1);
502 vm_object_pip_wakeup(vm_obj);
503 VM_OBJECT_UNLOCK(vm_obj);
509 * Writes data to the object referenced by handle.
511 * On error, the contents of the buffer that were to be modified are undefined.
514 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file)
517 struct drm_i915_gem_pwrite *args = data;
518 struct drm_i915_gem_object *obj;
520 vm_offset_t start, end;
526 start = trunc_page(args->data_ptr);
527 end = round_page(args->data_ptr + args->size);
528 npages = howmany(end - start, PAGE_SIZE);
529 ma = kmalloc(npages * sizeof(vm_page_t), M_DRM, M_WAITOK |
531 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
532 (vm_offset_t)args->data_ptr, args->size,
533 VM_PROT_READ, ma, npages);
539 ret = i915_mutex_lock_interruptible(dev);
543 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
544 if (&obj->base == NULL) {
549 /* Bounds check destination. */
550 if (args->offset > obj->base.size ||
551 args->size > obj->base.size - args->offset) {
557 ret = i915_gem_phys_pwrite(dev, obj, args, file);
558 } else if (obj->gtt_space &&
559 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
560 ret = i915_gem_object_pin(obj, 0, true, false);
563 ret = i915_gem_object_set_to_gtt_domain(obj, true);
566 ret = i915_gem_object_put_fence(obj);
569 ret = i915_gem_gtt_write(dev, obj, args->data_ptr, args->size,
572 i915_gem_object_unpin(obj);
574 ret = i915_gem_object_set_to_cpu_domain(obj, true);
577 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
580 drm_gem_object_unreference(&obj->base);
584 vm_page_unhold_pages(ma, npages);
591 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
594 if (atomic_read(&dev_priv->mm.wedged)) {
595 struct completion *x = &dev_priv->error_completion;
596 bool recovery_complete;
598 /* Give the error handler a chance to run. */
599 lockmgr(&x->wait.lock, LK_EXCLUSIVE);
600 recovery_complete = x->done > 0;
601 lockmgr(&x->wait.lock, LK_RELEASE);
603 /* Non-interruptible callers can't handle -EAGAIN, hence return
604 * -EIO unconditionally for these. */
608 /* Recovery complete, but still wedged means reset failure. */
609 if (recovery_complete)
619 * Compare seqno against outstanding lazy request. Emit a request if they are
623 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
627 DRM_LOCK_ASSERT(ring->dev);
630 if (seqno == ring->outstanding_lazy_request)
631 ret = i915_add_request(ring, NULL, NULL);
637 * __wait_seqno - wait until execution of seqno has finished
638 * @ring: the ring expected to report seqno
640 * @interruptible: do an interruptible wait (normally yes)
641 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
643 * Returns 0 if the seqno was found within the alloted time. Else returns the
644 * errno with remaining time filled in timeout argument.
646 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
647 bool interruptible, struct timespec *timeout)
649 drm_i915_private_t *dev_priv = ring->dev->dev_private;
650 struct timespec before, now, wait_time={1,0};
651 unsigned long timeout_jiffies;
653 bool wait_forever = true;
656 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
659 if (timeout != NULL) {
660 wait_time = *timeout;
661 wait_forever = false;
664 timeout_jiffies = timespec_to_jiffies(&wait_time);
666 if (WARN_ON(!ring->irq_get(ring)))
669 /* Record current time in case interrupted by signal, or wedged * */
670 getrawmonotonic(&before);
673 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
674 atomic_read(&dev_priv->mm.wedged))
677 end = wait_event_interruptible_timeout(ring->irq_queue,
681 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
684 ret = i915_gem_check_wedge(dev_priv, interruptible);
687 } while (end == 0 && wait_forever);
689 getrawmonotonic(&now);
695 struct timespec sleep_time = timespec_sub(now, before);
696 *timeout = timespec_sub(*timeout, sleep_time);
701 case -EAGAIN: /* Wedged */
702 case -ERESTARTSYS: /* Signal */
704 case 0: /* Timeout */
706 set_normalized_timespec(timeout, 0, 0);
707 return -ETIMEDOUT; /* -ETIME on Linux */
708 default: /* Completed */
709 WARN_ON(end < 0); /* We're not aware of other errors */
715 * Waits for a sequence number to be signaled, and cleans up the
716 * request and object lists appropriately for that event.
719 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
721 struct drm_device *dev = ring->dev;
722 struct drm_i915_private *dev_priv = dev->dev_private;
725 DRM_LOCK_ASSERT(dev);
728 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
732 ret = i915_gem_check_olr(ring, seqno);
736 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
742 * Ensures that all rendering to the object has completed and the object is
743 * safe to unbind from the GTT or access from the CPU.
745 static __must_check int
746 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
749 struct intel_ring_buffer *ring = obj->ring;
753 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
757 ret = i915_wait_seqno(ring, seqno);
761 i915_gem_retire_requests_ring(ring);
763 /* Manually manage the write flush as we may have not yet
764 * retired the buffer.
766 if (obj->last_write_seqno &&
767 i915_seqno_passed(seqno, obj->last_write_seqno)) {
768 obj->last_write_seqno = 0;
769 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
775 /* A nonblocking variant of the above wait. This is a highly dangerous routine
776 * as the object state may change during this call.
778 static __must_check int
779 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
782 struct drm_device *dev = obj->base.dev;
783 struct drm_i915_private *dev_priv = dev->dev_private;
784 struct intel_ring_buffer *ring = obj->ring;
788 DRM_LOCK_ASSERT(dev);
789 BUG_ON(!dev_priv->mm.interruptible);
791 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
795 ret = i915_gem_check_wedge(dev_priv, true);
799 ret = i915_gem_check_olr(ring, seqno);
804 ret = __wait_seqno(ring, seqno, true, NULL);
807 i915_gem_retire_requests_ring(ring);
809 /* Manually manage the write flush as we may have not yet
810 * retired the buffer.
812 if (obj->last_write_seqno &&
813 i915_seqno_passed(seqno, obj->last_write_seqno)) {
814 obj->last_write_seqno = 0;
815 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
822 * Called when user space prepares to use an object with the CPU, either
823 * through the mmap ioctl's mapping or a GTT mapping.
826 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file)
829 struct drm_i915_gem_set_domain *args = data;
830 struct drm_i915_gem_object *obj;
831 uint32_t read_domains = args->read_domains;
832 uint32_t write_domain = args->write_domain;
835 /* Only handle setting domains to types used by the CPU. */
836 if (write_domain & I915_GEM_GPU_DOMAINS)
839 if (read_domains & I915_GEM_GPU_DOMAINS)
842 /* Having something in the write domain implies it's in the read
843 * domain, and only that read domain. Enforce that in the request.
845 if (write_domain != 0 && read_domains != write_domain)
848 ret = i915_mutex_lock_interruptible(dev);
852 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
853 if (&obj->base == NULL) {
858 /* Try to flush the object off the GPU without holding the lock.
859 * We will repeat the flush holding the lock in the normal manner
860 * to catch cases where we are gazumped.
862 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
866 if (read_domains & I915_GEM_DOMAIN_GTT) {
867 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
869 /* Silently promote "you're not bound, there was nothing to do"
870 * to success, since the client was just asking us to
871 * make sure everything was done.
876 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
880 drm_gem_object_unreference(&obj->base);
887 * Called when user space has done writes to this buffer
890 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
891 struct drm_file *file)
893 struct drm_i915_gem_sw_finish *args = data;
894 struct drm_i915_gem_object *obj;
897 ret = i915_mutex_lock_interruptible(dev);
900 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
901 if (&obj->base == NULL) {
906 /* Pinned buffers may be scanout, so flush the cache */
908 i915_gem_object_flush_cpu_write_domain(obj);
910 drm_gem_object_unreference(&obj->base);
917 * Maps the contents of an object, returning the address it is mapped
920 * While the mapping holds a reference on the contents of the object, it doesn't
921 * imply a ref on the object itself.
924 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
925 struct drm_file *file)
927 struct drm_i915_gem_mmap *args = data;
928 struct drm_gem_object *obj;
929 struct proc *p = curproc;
930 vm_map_t map = &p->p_vmspace->vm_map;
935 obj = drm_gem_object_lookup(dev, file, args->handle);
942 size = round_page(args->size);
943 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
949 vm_object_hold(obj->vm_obj);
950 vm_object_reference_locked(obj->vm_obj);
951 vm_object_drop(obj->vm_obj);
952 rv = vm_map_find(map, obj->vm_obj, NULL,
953 args->offset, &addr, args->size,
954 PAGE_SIZE, /* align */
956 VM_MAPTYPE_NORMAL, /* maptype */
957 VM_PROT_READ | VM_PROT_WRITE, /* prot */
958 VM_PROT_READ | VM_PROT_WRITE, /* max */
959 MAP_SHARED /* cow */);
960 if (rv != KERN_SUCCESS) {
961 vm_object_deallocate(obj->vm_obj);
962 error = -vm_mmap_to_errno(rv);
964 args->addr_ptr = (uint64_t)addr;
967 drm_gem_object_unreference(obj);
972 * i915_gem_release_mmap - remove physical page mappings
973 * @obj: obj in question
975 * Preserve the reservation of the mmapping with the DRM core code, but
976 * relinquish ownership of the pages back to the system.
978 * It is vital that we remove the page mapping if we have mapped a tiled
979 * object through the GTT and then lose the fence register due to
980 * resource pressure. Similarly if the object has been moved out of the
981 * aperture, than pages mapped into userspace must be revoked. Removing the
982 * mapping will then trigger a page fault on the next user access, allowing
983 * fixup by i915_gem_fault().
986 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
992 if (!obj->fault_mappable)
995 devobj = cdev_pager_lookup(obj);
996 if (devobj != NULL) {
997 page_count = OFF_TO_IDX(obj->base.size);
999 VM_OBJECT_LOCK(devobj);
1000 for (i = 0; i < page_count; i++) {
1001 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
1004 cdev_pager_free_page(devobj, m);
1006 VM_OBJECT_UNLOCK(devobj);
1007 vm_object_deallocate(devobj);
1010 obj->fault_mappable = false;
1014 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1018 if (INTEL_INFO(dev)->gen >= 4 ||
1019 tiling_mode == I915_TILING_NONE)
1022 /* Previous chips need a power-of-two fence region when tiling */
1023 if (INTEL_INFO(dev)->gen == 3)
1024 gtt_size = 1024*1024;
1026 gtt_size = 512*1024;
1028 while (gtt_size < size)
1035 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1036 * @obj: object to check
1038 * Return the required GTT alignment for an object, taking into account
1039 * potential fence register mapping.
1042 i915_gem_get_gtt_alignment(struct drm_device *dev,
1048 * Minimum alignment is 4k (GTT page size), but might be greater
1049 * if a fence register is needed for the object.
1051 if (INTEL_INFO(dev)->gen >= 4 ||
1052 tiling_mode == I915_TILING_NONE)
1056 * Previous chips need to be aligned to the size of the smallest
1057 * fence register that can contain the object.
1059 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1063 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1066 * @size: size of the object
1067 * @tiling_mode: tiling mode of the object
1069 * Return the required GTT alignment for an object, only taking into account
1070 * unfenced tiled surface requirements.
1073 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1078 * Minimum alignment is 4k (GTT page size) for sane hw.
1080 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1081 tiling_mode == I915_TILING_NONE)
1084 /* Previous hardware however needs to be aligned to a power-of-two
1085 * tile height. The simplest method for determining this is to reuse
1086 * the power-of-tile object size.
1088 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1092 i915_gem_mmap_gtt(struct drm_file *file,
1093 struct drm_device *dev,
1097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 struct drm_i915_gem_object *obj;
1101 ret = i915_mutex_lock_interruptible(dev);
1105 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1106 if (&obj->base == NULL) {
1111 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1116 if (obj->madv != I915_MADV_WILLNEED) {
1117 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1122 ret = drm_gem_create_mmap_offset(&obj->base);
1126 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1127 DRM_GEM_MAPPING_KEY;
1129 drm_gem_object_unreference(&obj->base);
1136 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1138 * @data: GTT mapping ioctl data
1139 * @file: GEM object info
1141 * Simply returns the fake offset to userspace so it can mmap it.
1142 * The mmap call will end up in drm_gem_mmap(), which will set things
1143 * up so we can get faults in the handler above.
1145 * The fault handler will take care of binding the object into the GTT
1146 * (since it may have been evicted to make room for something), allocating
1147 * a fence register, and mapping the appropriate aperture address into
1151 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1152 struct drm_file *file)
1154 struct drm_i915_gem_mmap_gtt *args = data;
1156 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1159 /* Immediately discard the backing storage */
1161 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1165 vm_obj = obj->base.vm_obj;
1166 VM_OBJECT_LOCK(vm_obj);
1167 vm_object_page_remove(vm_obj, 0, 0, false);
1168 VM_OBJECT_UNLOCK(vm_obj);
1169 obj->madv = __I915_MADV_PURGED;
1173 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1175 return obj->madv == I915_MADV_DONTNEED;
1179 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1184 BUG_ON(obj->madv == __I915_MADV_PURGED);
1186 if (obj->tiling_mode != I915_TILING_NONE)
1187 i915_gem_object_save_bit_17_swizzle(obj);
1188 if (obj->madv == I915_MADV_DONTNEED)
1190 page_count = obj->base.size / PAGE_SIZE;
1191 VM_OBJECT_LOCK(obj->base.vm_obj);
1192 #if GEM_PARANOID_CHECK_GTT
1193 i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1195 for (i = 0; i < page_count; i++) {
1199 if (obj->madv == I915_MADV_WILLNEED)
1200 vm_page_reference(m);
1201 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1202 vm_page_unwire(obj->pages[i], 1);
1203 vm_page_wakeup(obj->pages[i]);
1205 VM_OBJECT_UNLOCK(obj->base.vm_obj);
1207 drm_free(obj->pages, M_DRM);
1212 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1214 struct drm_device *dev;
1216 int page_count, i, j;
1217 struct vm_page *page;
1219 dev = obj->base.dev;
1220 KASSERT(obj->pages == NULL, ("Obj already has pages"));
1221 page_count = obj->base.size / PAGE_SIZE;
1222 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1225 vm_obj = obj->base.vm_obj;
1226 VM_OBJECT_LOCK(vm_obj);
1228 for (i = 0; i < page_count; i++) {
1229 page = shmem_read_mapping_page(vm_obj, i);
1233 obj->pages[i] = page;
1236 VM_OBJECT_UNLOCK(vm_obj);
1237 if (i915_gem_object_needs_bit17_swizzle(obj))
1238 i915_gem_object_do_bit_17_swizzle(obj);
1243 for (j = 0; j < i; j++) {
1244 page = obj->pages[j];
1245 vm_page_busy_wait(page, FALSE, "i915gem");
1246 vm_page_unwire(page, 0);
1247 vm_page_wakeup(page);
1249 VM_OBJECT_UNLOCK(vm_obj);
1250 drm_free(obj->pages, M_DRM);
1256 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1257 struct intel_ring_buffer *ring)
1259 struct drm_device *dev = obj->base.dev;
1260 struct drm_i915_private *dev_priv = dev->dev_private;
1261 u32 seqno = intel_ring_get_seqno(ring);
1263 BUG_ON(ring == NULL);
1266 /* Add a reference if we're newly entering the active list. */
1268 drm_gem_object_reference(&obj->base);
1272 /* Move from whatever list we were on to the tail of execution. */
1273 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1274 list_move_tail(&obj->ring_list, &ring->active_list);
1276 obj->last_read_seqno = seqno;
1278 if (obj->fenced_gpu_access) {
1279 obj->last_fenced_seqno = seqno;
1281 /* Bump MRU to take account of the delayed flush */
1282 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1283 struct drm_i915_fence_reg *reg;
1285 reg = &dev_priv->fence_regs[obj->fence_reg];
1286 list_move_tail(®->lru_list,
1287 &dev_priv->mm.fence_list);
1293 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1295 struct drm_device *dev = obj->base.dev;
1296 struct drm_i915_private *dev_priv = dev->dev_private;
1298 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1299 BUG_ON(!obj->active);
1301 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1303 list_del_init(&obj->ring_list);
1306 obj->last_read_seqno = 0;
1307 obj->last_write_seqno = 0;
1308 obj->base.write_domain = 0;
1310 obj->last_fenced_seqno = 0;
1311 obj->fenced_gpu_access = false;
1314 drm_gem_object_unreference(&obj->base);
1316 WARN_ON(i915_verify_lists(dev));
1320 i915_gem_handle_seqno_wrap(struct drm_device *dev)
1322 struct drm_i915_private *dev_priv = dev->dev_private;
1323 struct intel_ring_buffer *ring;
1326 /* The hardware uses various monotonic 32-bit counters, if we
1327 * detect that they will wraparound we need to idle the GPU
1328 * and reset those counters.
1331 for_each_ring(ring, dev_priv, i) {
1332 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1333 ret |= ring->sync_seqno[j] != 0;
1338 ret = i915_gpu_idle(dev);
1342 i915_gem_retire_requests(dev);
1343 for_each_ring(ring, dev_priv, i) {
1344 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1345 ring->sync_seqno[j] = 0;
1352 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1354 struct drm_i915_private *dev_priv = dev->dev_private;
1356 /* reserve 0 for non-seqno */
1357 if (dev_priv->next_seqno == 0) {
1358 int ret = i915_gem_handle_seqno_wrap(dev);
1362 dev_priv->next_seqno = 1;
1365 *seqno = dev_priv->next_seqno++;
1370 i915_add_request(struct intel_ring_buffer *ring,
1371 struct drm_file *file,
1374 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1375 struct drm_i915_gem_request *request;
1376 u32 request_ring_position;
1381 * Emit any outstanding flushes - execbuf can fail to emit the flush
1382 * after having emitted the batchbuffer command. Hence we need to fix
1383 * things up similar to emitting the lazy request. The difference here
1384 * is that the flush _must_ happen before the next request, no matter
1387 ret = intel_ring_flush_all_caches(ring);
1391 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK | M_ZERO);
1392 if (request == NULL)
1396 /* Record the position of the start of the request so that
1397 * should we detect the updated seqno part-way through the
1398 * GPU processing the request, we never over-estimate the
1399 * position of the head.
1401 request_ring_position = intel_ring_get_tail(ring);
1403 ret = ring->add_request(ring);
1405 kfree(request, M_DRM);
1409 request->seqno = intel_ring_get_seqno(ring);
1410 request->ring = ring;
1411 request->tail = request_ring_position;
1412 request->emitted_jiffies = jiffies;
1413 was_empty = list_empty(&ring->request_list);
1414 list_add_tail(&request->list, &ring->request_list);
1415 request->file_priv = NULL;
1418 struct drm_i915_file_private *file_priv = file->driver_priv;
1420 spin_lock(&file_priv->mm.lock);
1421 request->file_priv = file_priv;
1422 list_add_tail(&request->client_list,
1423 &file_priv->mm.request_list);
1424 spin_unlock(&file_priv->mm.lock);
1427 ring->outstanding_lazy_request = 0;
1429 if (!dev_priv->mm.suspended) {
1430 if (i915_enable_hangcheck) {
1431 mod_timer(&dev_priv->hangcheck_timer,
1432 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1435 queue_delayed_work(dev_priv->wq,
1436 &dev_priv->mm.retire_work,
1437 round_jiffies_up_relative(hz));
1438 intel_mark_busy(dev_priv->dev);
1443 *out_seqno = request->seqno;
1448 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1450 struct drm_i915_file_private *file_priv = request->file_priv;
1455 spin_lock(&file_priv->mm.lock);
1456 if (request->file_priv) {
1457 list_del(&request->client_list);
1458 request->file_priv = NULL;
1460 spin_unlock(&file_priv->mm.lock);
1463 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1464 struct intel_ring_buffer *ring)
1466 while (!list_empty(&ring->request_list)) {
1467 struct drm_i915_gem_request *request;
1469 request = list_first_entry(&ring->request_list,
1470 struct drm_i915_gem_request,
1473 list_del(&request->list);
1474 i915_gem_request_remove_from_client(request);
1475 drm_free(request, M_DRM);
1478 while (!list_empty(&ring->active_list)) {
1479 struct drm_i915_gem_object *obj;
1481 obj = list_first_entry(&ring->active_list,
1482 struct drm_i915_gem_object,
1485 i915_gem_object_move_to_inactive(obj);
1489 static void i915_gem_reset_fences(struct drm_device *dev)
1491 struct drm_i915_private *dev_priv = dev->dev_private;
1494 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1495 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1497 i915_gem_write_fence(dev, i, NULL);
1500 i915_gem_object_fence_lost(reg->obj);
1504 INIT_LIST_HEAD(®->lru_list);
1507 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1510 void i915_gem_reset(struct drm_device *dev)
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 struct drm_i915_gem_object *obj;
1514 struct intel_ring_buffer *ring;
1517 for_each_ring(ring, dev_priv, i)
1518 i915_gem_reset_ring_lists(dev_priv, ring);
1520 /* Move everything out of the GPU domains to ensure we do any
1521 * necessary invalidation upon reuse.
1523 list_for_each_entry(obj,
1524 &dev_priv->mm.inactive_list,
1527 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1530 /* The fence registers are invalidated so clear them out */
1531 i915_gem_reset_fences(dev);
1535 * This function clears the request list as sequence numbers are passed.
1538 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1542 if (list_empty(&ring->request_list))
1545 WARN_ON(i915_verify_lists(ring->dev));
1547 seqno = ring->get_seqno(ring, true);
1549 while (!list_empty(&ring->request_list)) {
1550 struct drm_i915_gem_request *request;
1552 request = list_first_entry(&ring->request_list,
1553 struct drm_i915_gem_request,
1556 if (!i915_seqno_passed(seqno, request->seqno))
1559 /* We know the GPU must have read the request to have
1560 * sent us the seqno + interrupt, so use the position
1561 * of tail of the request to update the last known position
1564 ring->last_retired_head = request->tail;
1566 list_del(&request->list);
1567 i915_gem_request_remove_from_client(request);
1568 kfree(request, M_DRM);
1571 /* Move any buffers on the active list that are no longer referenced
1572 * by the ringbuffer to the flushing/inactive lists as appropriate.
1574 while (!list_empty(&ring->active_list)) {
1575 struct drm_i915_gem_object *obj;
1577 obj = list_first_entry(&ring->active_list,
1578 struct drm_i915_gem_object,
1581 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1584 i915_gem_object_move_to_inactive(obj);
1587 if (unlikely(ring->trace_irq_seqno &&
1588 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1589 ring->irq_put(ring);
1590 ring->trace_irq_seqno = 0;
1596 i915_gem_retire_requests(struct drm_device *dev)
1598 drm_i915_private_t *dev_priv = dev->dev_private;
1599 struct intel_ring_buffer *ring;
1602 for_each_ring(ring, dev_priv, i)
1603 i915_gem_retire_requests_ring(ring);
1607 i915_gem_retire_work_handler(struct work_struct *work)
1609 drm_i915_private_t *dev_priv;
1610 struct drm_device *dev;
1611 struct intel_ring_buffer *ring;
1615 dev_priv = container_of(work, drm_i915_private_t,
1616 mm.retire_work.work);
1617 dev = dev_priv->dev;
1619 /* Come back later if the device is busy... */
1620 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
1621 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1622 round_jiffies_up_relative(hz));
1626 i915_gem_retire_requests(dev);
1628 /* Send a periodic flush down the ring so we don't hold onto GEM
1629 * objects indefinitely.
1632 for_each_ring(ring, dev_priv, i) {
1633 if (ring->gpu_caches_dirty)
1634 i915_add_request(ring, NULL, NULL);
1636 idle &= list_empty(&ring->request_list);
1639 if (!dev_priv->mm.suspended && !idle)
1640 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1641 round_jiffies_up_relative(hz));
1643 intel_mark_idle(dev);
1648 * Ensures that an object will eventually get non-busy by flushing any required
1649 * write domains, emitting any outstanding lazy request and retiring and
1650 * completed requests.
1653 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1658 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1662 i915_gem_retire_requests_ring(obj->ring);
1669 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1670 * @DRM_IOCTL_ARGS: standard ioctl arguments
1672 * Returns 0 if successful, else an error is returned with the remaining time in
1673 * the timeout parameter.
1674 * -ETIME: object is still busy after timeout
1675 * -ERESTARTSYS: signal interrupted the wait
1676 * -ENONENT: object doesn't exist
1677 * Also possible, but rare:
1678 * -EAGAIN: GPU wedged
1680 * -ENODEV: Internal IRQ fail
1681 * -E?: The add request failed
1683 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1684 * non-zero timeout parameter the wait ioctl will wait for the given number of
1685 * nanoseconds on an object becoming unbusy. Since the wait itself does so
1686 * without holding struct_mutex the object may become re-busied before this
1687 * function completes. A similar but shorter * race condition exists in the busy
1691 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1693 struct drm_i915_gem_wait *args = data;
1694 struct drm_i915_gem_object *obj;
1695 struct intel_ring_buffer *ring = NULL;
1696 struct timespec timeout_stack, *timeout = NULL;
1700 if (args->timeout_ns >= 0) {
1701 timeout_stack = ns_to_timespec(args->timeout_ns);
1702 timeout = &timeout_stack;
1705 ret = i915_mutex_lock_interruptible(dev);
1709 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1710 if (&obj->base == NULL) {
1715 /* Need to make sure the object gets inactive eventually. */
1716 ret = i915_gem_object_flush_active(obj);
1721 seqno = obj->last_read_seqno;
1728 /* Do this after OLR check to make sure we make forward progress polling
1729 * on this IOCTL with a 0 timeout (like busy ioctl)
1731 if (!args->timeout_ns) {
1736 drm_gem_object_unreference(&obj->base);
1739 ret = __wait_seqno(ring, seqno, true, timeout);
1741 WARN_ON(!timespec_valid(timeout));
1742 args->timeout_ns = timespec_to_ns(timeout);
1747 drm_gem_object_unreference(&obj->base);
1753 * i915_gem_object_sync - sync an object to a ring.
1755 * @obj: object which may be in use on another ring.
1756 * @to: ring we wish to use the object on. May be NULL.
1758 * This code is meant to abstract object synchronization with the GPU.
1759 * Calling with NULL implies synchronizing the object with the CPU
1760 * rather than a particular GPU ring.
1762 * Returns 0 if successful, else propagates up the lower layer error.
1765 i915_gem_object_sync(struct drm_i915_gem_object *obj,
1766 struct intel_ring_buffer *to)
1768 struct intel_ring_buffer *from = obj->ring;
1772 if (from == NULL || to == from)
1775 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1776 return i915_gem_object_wait_rendering(obj, false);
1778 idx = intel_ring_sync_index(from, to);
1780 seqno = obj->last_read_seqno;
1781 if (seqno <= from->sync_seqno[idx])
1784 ret = i915_gem_check_olr(obj->ring, seqno);
1788 ret = to->sync_to(to, from, seqno);
1790 /* We use last_read_seqno because sync_to()
1791 * might have just caused seqno wrap under
1794 from->sync_seqno[idx] = obj->last_read_seqno;
1799 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1801 u32 old_write_domain, old_read_domains;
1803 /* Act a barrier for all accesses through the GTT */
1806 /* Force a pagefault for domain tracking on next user access */
1807 i915_gem_release_mmap(obj);
1809 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1812 old_read_domains = obj->base.read_domains;
1813 old_write_domain = obj->base.write_domain;
1815 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1816 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1821 * Unbinds an object from the GTT aperture.
1824 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
1826 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1829 if (obj->gtt_space == NULL)
1835 ret = i915_gem_object_finish_gpu(obj);
1838 /* Continue on if we fail due to EIO, the GPU is hung so we
1839 * should be safe and we need to cleanup or else we might
1840 * cause memory corruption through use-after-free.
1843 i915_gem_object_finish_gtt(obj);
1845 /* Move the object to the CPU domain to ensure that
1846 * any possible CPU writes while it's not in the GTT
1847 * are flushed when we go to remap it.
1850 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1851 if (ret == -ERESTARTSYS)
1854 /* In the event of a disaster, abandon all caches and
1855 * hope for the best.
1857 i915_gem_clflush_object(obj);
1858 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1861 /* release the fence reg _after_ flushing */
1862 ret = i915_gem_object_put_fence(obj);
1866 if (obj->has_global_gtt_mapping)
1867 i915_gem_gtt_unbind_object(obj);
1868 if (obj->has_aliasing_ppgtt_mapping) {
1869 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1870 obj->has_aliasing_ppgtt_mapping = 0;
1872 i915_gem_gtt_finish_object(obj);
1874 i915_gem_object_put_pages_gtt(obj);
1876 list_del_init(&obj->gtt_list);
1877 list_del_init(&obj->mm_list);
1878 /* Avoid an unnecessary call to unbind on rebind. */
1879 obj->map_and_fenceable = true;
1881 drm_mm_put_block(obj->gtt_space);
1882 obj->gtt_space = NULL;
1883 obj->gtt_offset = 0;
1885 if (i915_gem_object_is_purgeable(obj))
1886 i915_gem_object_truncate(obj);
1891 int i915_gpu_idle(struct drm_device *dev)
1893 drm_i915_private_t *dev_priv = dev->dev_private;
1894 struct intel_ring_buffer *ring;
1897 /* Flush everything onto the inactive list. */
1898 for_each_ring(ring, dev_priv, i) {
1899 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
1903 ret = intel_ring_idle(ring);
1911 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
1912 struct drm_i915_gem_object *obj)
1914 drm_i915_private_t *dev_priv = dev->dev_private;
1918 u32 size = obj->gtt_space->size;
1920 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1922 val |= obj->gtt_offset & 0xfffff000;
1923 val |= (uint64_t)((obj->stride / 128) - 1) <<
1924 SANDYBRIDGE_FENCE_PITCH_SHIFT;
1926 if (obj->tiling_mode == I915_TILING_Y)
1927 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1928 val |= I965_FENCE_REG_VALID;
1932 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
1933 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
1936 static void i965_write_fence_reg(struct drm_device *dev, int reg,
1937 struct drm_i915_gem_object *obj)
1939 drm_i915_private_t *dev_priv = dev->dev_private;
1943 u32 size = obj->gtt_space->size;
1945 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1947 val |= obj->gtt_offset & 0xfffff000;
1948 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1949 if (obj->tiling_mode == I915_TILING_Y)
1950 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1951 val |= I965_FENCE_REG_VALID;
1955 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
1956 POSTING_READ(FENCE_REG_965_0 + reg * 8);
1959 static void i915_write_fence_reg(struct drm_device *dev, int reg,
1960 struct drm_i915_gem_object *obj)
1962 drm_i915_private_t *dev_priv = dev->dev_private;
1966 u32 size = obj->gtt_space->size;
1970 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
1971 (size & -size) != size ||
1972 (obj->gtt_offset & (size - 1)),
1973 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
1974 obj->gtt_offset, obj->map_and_fenceable, size);
1976 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1981 /* Note: pitch better be a power of two tile widths */
1982 pitch_val = obj->stride / tile_width;
1983 pitch_val = ffs(pitch_val) - 1;
1985 val = obj->gtt_offset;
1986 if (obj->tiling_mode == I915_TILING_Y)
1987 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1988 val |= I915_FENCE_SIZE_BITS(size);
1989 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1990 val |= I830_FENCE_REG_VALID;
1995 reg = FENCE_REG_830_0 + reg * 4;
1997 reg = FENCE_REG_945_8 + (reg - 8) * 4;
1999 I915_WRITE(reg, val);
2003 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2004 struct drm_i915_gem_object *obj)
2006 drm_i915_private_t *dev_priv = dev->dev_private;
2010 u32 size = obj->gtt_space->size;
2013 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2014 (size & -size) != size ||
2015 (obj->gtt_offset & (size - 1)),
2016 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2017 obj->gtt_offset, size);
2019 pitch_val = obj->stride / 128;
2020 pitch_val = ffs(pitch_val) - 1;
2022 val = obj->gtt_offset;
2023 if (obj->tiling_mode == I915_TILING_Y)
2024 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2025 val |= I830_FENCE_SIZE_BITS(size);
2026 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2027 val |= I830_FENCE_REG_VALID;
2031 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2032 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2035 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2036 struct drm_i915_gem_object *obj)
2038 switch (INTEL_INFO(dev)->gen) {
2040 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2042 case 4: i965_write_fence_reg(dev, reg, obj); break;
2043 case 3: i915_write_fence_reg(dev, reg, obj); break;
2044 case 2: i830_write_fence_reg(dev, reg, obj); break;
2049 static inline int fence_number(struct drm_i915_private *dev_priv,
2050 struct drm_i915_fence_reg *fence)
2052 return fence - dev_priv->fence_regs;
2055 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2056 struct drm_i915_fence_reg *fence,
2059 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2060 int reg = fence_number(dev_priv, fence);
2062 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2065 obj->fence_reg = reg;
2067 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2069 obj->fence_reg = I915_FENCE_REG_NONE;
2071 list_del_init(&fence->lru_list);
2076 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2078 if (obj->last_fenced_seqno) {
2079 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2083 obj->last_fenced_seqno = 0;
2086 /* Ensure that all CPU reads are completed before installing a fence
2087 * and all writes before removing the fence.
2089 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2092 obj->fenced_gpu_access = false;
2097 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2099 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2102 ret = i915_gem_object_flush_fence(obj);
2106 if (obj->fence_reg == I915_FENCE_REG_NONE)
2109 i915_gem_object_update_fence(obj,
2110 &dev_priv->fence_regs[obj->fence_reg],
2112 i915_gem_object_fence_lost(obj);
2117 static struct drm_i915_fence_reg *
2118 i915_find_fence_reg(struct drm_device *dev)
2120 struct drm_i915_private *dev_priv = dev->dev_private;
2121 struct drm_i915_fence_reg *reg, *avail;
2124 /* First try to find a free reg */
2126 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2127 reg = &dev_priv->fence_regs[i];
2131 if (!reg->pin_count)
2138 /* None available, try to steal one or wait for a user to finish */
2139 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2150 * i915_gem_object_get_fence - set up fencing for an object
2151 * @obj: object to map through a fence reg
2153 * When mapping objects through the GTT, userspace wants to be able to write
2154 * to them without having to worry about swizzling if the object is tiled.
2155 * This function walks the fence regs looking for a free one for @obj,
2156 * stealing one if it can't find any.
2158 * It then sets up the reg based on the object's properties: address, pitch
2159 * and tiling format.
2161 * For an untiled surface, this removes any existing fence.
2164 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2166 struct drm_device *dev = obj->base.dev;
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 bool enable = obj->tiling_mode != I915_TILING_NONE;
2169 struct drm_i915_fence_reg *reg;
2172 /* Have we updated the tiling parameters upon the object and so
2173 * will need to serialise the write to the associated fence register?
2175 if (obj->fence_dirty) {
2176 ret = i915_gem_object_flush_fence(obj);
2181 /* Just update our place in the LRU if our fence is getting reused. */
2182 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2183 reg = &dev_priv->fence_regs[obj->fence_reg];
2184 if (!obj->fence_dirty) {
2185 list_move_tail(®->lru_list,
2186 &dev_priv->mm.fence_list);
2189 } else if (enable) {
2190 reg = i915_find_fence_reg(dev);
2195 struct drm_i915_gem_object *old = reg->obj;
2197 ret = i915_gem_object_flush_fence(old);
2201 i915_gem_object_fence_lost(old);
2206 i915_gem_object_update_fence(obj, reg, enable);
2207 obj->fence_dirty = false;
2212 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2213 struct drm_mm_node *gtt_space,
2214 unsigned long cache_level)
2216 struct drm_mm_node *other;
2218 /* On non-LLC machines we have to be careful when putting differing
2219 * types of snoopable memory together to avoid the prefetcher
2220 * crossing memory domains and dieing.
2225 if (gtt_space == NULL)
2228 if (list_empty(>t_space->node_list))
2231 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2232 if (other->allocated && !other->hole_follows && other->color != cache_level)
2235 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2236 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2242 static void i915_gem_verify_gtt(struct drm_device *dev)
2245 struct drm_i915_private *dev_priv = dev->dev_private;
2246 struct drm_i915_gem_object *obj;
2249 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2250 if (obj->gtt_space == NULL) {
2251 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2256 if (obj->cache_level != obj->gtt_space->color) {
2257 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2258 obj->gtt_space->start,
2259 obj->gtt_space->start + obj->gtt_space->size,
2261 obj->gtt_space->color);
2266 if (!i915_gem_valid_gtt_space(dev,
2268 obj->cache_level)) {
2269 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2270 obj->gtt_space->start,
2271 obj->gtt_space->start + obj->gtt_space->size,
2283 * Finds free space in the GTT aperture and binds the object there.
2286 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2288 bool map_and_fenceable,
2291 struct drm_device *dev = obj->base.dev;
2292 drm_i915_private_t *dev_priv = dev->dev_private;
2293 struct drm_mm_node *free_space;
2294 uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2295 bool mappable, fenceable;
2298 if (obj->madv != I915_MADV_WILLNEED) {
2299 DRM_ERROR("Attempting to bind a purgeable object\n");
2303 fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2305 fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2307 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2308 obj->base.size, obj->tiling_mode);
2310 alignment = map_and_fenceable ? fence_alignment :
2312 if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2313 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2317 size = map_and_fenceable ? fence_size : obj->base.size;
2319 /* If the object is bigger than the entire aperture, reject it early
2320 * before evicting everything in a vain attempt to find space.
2322 if (obj->base.size > (map_and_fenceable ?
2323 dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2325 "Attempting to bind an object larger than the aperture\n");
2330 if (map_and_fenceable)
2332 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2333 size, alignment, obj->cache_level,
2334 0, dev_priv->mm.gtt_mappable_end,
2337 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2338 size, alignment, obj->cache_level,
2341 if (free_space != NULL) {
2342 if (map_and_fenceable)
2344 drm_mm_get_block_range_generic(free_space,
2345 size, alignment, obj->cache_level,
2346 0, dev_priv->mm.gtt_mappable_end,
2350 drm_mm_get_block_generic(free_space,
2351 size, alignment, obj->cache_level,
2354 if (obj->gtt_space == NULL) {
2355 ret = i915_gem_evict_something(dev, size, alignment,
2366 * NOTE: i915_gem_object_get_pages_gtt() cannot
2367 * return ENOMEM, since we used VM_ALLOC_RETRY.
2369 ret = i915_gem_object_get_pages_gtt(obj);
2371 drm_mm_put_block(obj->gtt_space);
2372 obj->gtt_space = NULL;
2376 i915_gem_gtt_bind_object(obj, obj->cache_level);
2378 i915_gem_object_put_pages_gtt(obj);
2379 drm_mm_put_block(obj->gtt_space);
2380 obj->gtt_space = NULL;
2381 if (i915_gem_evict_everything(dev))
2386 list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2387 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2389 obj->gtt_offset = obj->gtt_space->start;
2392 obj->gtt_space->size == fence_size &&
2393 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2396 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2397 obj->map_and_fenceable = mappable && fenceable;
2399 i915_gem_verify_gtt(dev);
2404 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2407 /* If we don't have a page list set up, then we're not pinned
2408 * to GPU, and we can ignore the cache flush because it'll happen
2409 * again at bind time.
2411 if (obj->pages == NULL)
2414 /* If the GPU is snooping the contents of the CPU cache,
2415 * we do not need to manually clear the CPU cache lines. However,
2416 * the caches are only snooped when the render cache is
2417 * flushed/invalidated. As we always have to emit invalidations
2418 * and flushes when moving into and out of the RENDER domain, correct
2419 * snooping behaviour occurs naturally as the result of our domain
2422 if (obj->cache_level != I915_CACHE_NONE)
2425 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2428 /** Flushes the GTT write domain for the object if it's dirty. */
2430 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2432 uint32_t old_write_domain;
2434 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2437 /* No actual flushing is required for the GTT write domain. Writes
2438 * to it immediately go to main memory as far as we know, so there's
2439 * no chipset flush. It also doesn't land in render cache.
2441 * However, we do have to enforce the order so that all writes through
2442 * the GTT land before any writes to the device, such as updates to
2447 old_write_domain = obj->base.write_domain;
2448 obj->base.write_domain = 0;
2451 /** Flushes the CPU write domain for the object if it's dirty. */
2453 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2455 uint32_t old_write_domain;
2457 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2460 i915_gem_clflush_object(obj);
2461 intel_gtt_chipset_flush();
2462 old_write_domain = obj->base.write_domain;
2463 obj->base.write_domain = 0;
2467 * Moves a single object to the GTT read, and possibly write domain.
2469 * This function returns when the move is complete, including waiting on
2473 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2475 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2476 uint32_t old_write_domain, old_read_domains;
2479 /* Not valid to be called on unbound objects. */
2480 if (obj->gtt_space == NULL)
2483 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2486 ret = i915_gem_object_wait_rendering(obj, !write);
2490 i915_gem_object_flush_cpu_write_domain(obj);
2492 old_write_domain = obj->base.write_domain;
2493 old_read_domains = obj->base.read_domains;
2495 /* It should now be out of any other write domains, and we can update
2496 * the domain values for our changes.
2498 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2499 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2501 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2502 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2506 /* And bump the LRU for this access */
2507 if (i915_gem_object_is_inactive(obj))
2508 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2513 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2514 enum i915_cache_level cache_level)
2516 struct drm_device *dev = obj->base.dev;
2517 drm_i915_private_t *dev_priv = dev->dev_private;
2520 if (obj->cache_level == cache_level)
2523 if (obj->pin_count) {
2524 DRM_DEBUG("can not change the cache level of pinned objects\n");
2528 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2529 ret = i915_gem_object_unbind(obj);
2534 if (obj->gtt_space) {
2535 ret = i915_gem_object_finish_gpu(obj);
2539 i915_gem_object_finish_gtt(obj);
2541 /* Before SandyBridge, you could not use tiling or fence
2542 * registers with snooped memory, so relinquish any fences
2543 * currently pointing to our region in the aperture.
2545 if (INTEL_INFO(dev)->gen < 6) {
2546 ret = i915_gem_object_put_fence(obj);
2551 if (obj->has_global_gtt_mapping)
2552 i915_gem_gtt_bind_object(obj, cache_level);
2553 if (obj->has_aliasing_ppgtt_mapping)
2554 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2557 obj->gtt_space->color = cache_level;
2560 if (cache_level == I915_CACHE_NONE) {
2561 u32 old_read_domains, old_write_domain;
2563 /* If we're coming from LLC cached, then we haven't
2564 * actually been tracking whether the data is in the
2565 * CPU cache or not, since we only allow one bit set
2566 * in obj->write_domain and have been skipping the clflushes.
2567 * Just set it to the CPU cache for now.
2569 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
2570 ("obj %p in CPU write domain", obj));
2571 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
2572 ("obj %p in CPU read domain", obj));
2574 old_read_domains = obj->base.read_domains;
2575 old_write_domain = obj->base.write_domain;
2577 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2578 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2582 obj->cache_level = cache_level;
2583 i915_gem_verify_gtt(dev);
2587 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2588 struct drm_file *file)
2590 struct drm_i915_gem_caching *args = data;
2591 struct drm_i915_gem_object *obj;
2594 ret = i915_mutex_lock_interruptible(dev);
2598 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2599 if (&obj->base == NULL) {
2604 args->caching = obj->cache_level != I915_CACHE_NONE;
2606 drm_gem_object_unreference(&obj->base);
2612 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2613 struct drm_file *file)
2615 struct drm_i915_gem_caching *args = data;
2616 struct drm_i915_gem_object *obj;
2617 enum i915_cache_level level;
2620 switch (args->caching) {
2621 case I915_CACHING_NONE:
2622 level = I915_CACHE_NONE;
2624 case I915_CACHING_CACHED:
2625 level = I915_CACHE_LLC;
2631 ret = i915_mutex_lock_interruptible(dev);
2635 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2636 if (&obj->base == NULL) {
2641 ret = i915_gem_object_set_cache_level(obj, level);
2643 drm_gem_object_unreference(&obj->base);
2650 * Prepare buffer for display plane (scanout, cursors, etc).
2651 * Can be called from an uninterruptible phase (modesetting) and allows
2652 * any flushes to be pipelined (for pageflips).
2655 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2657 struct intel_ring_buffer *pipelined)
2659 u32 old_read_domains, old_write_domain;
2662 if (pipelined != obj->ring) {
2663 ret = i915_gem_object_sync(obj, pipelined);
2668 /* The display engine is not coherent with the LLC cache on gen6. As
2669 * a result, we make sure that the pinning that is about to occur is
2670 * done with uncached PTEs. This is lowest common denominator for all
2673 * However for gen6+, we could do better by using the GFDT bit instead
2674 * of uncaching, which would allow us to flush all the LLC-cached data
2675 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2677 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2681 /* As the user may map the buffer once pinned in the display plane
2682 * (e.g. libkms for the bootup splash), we have to ensure that we
2683 * always use map_and_fenceable for all scanout buffers.
2685 ret = i915_gem_object_pin(obj, alignment, true, false);
2689 i915_gem_object_flush_cpu_write_domain(obj);
2691 old_write_domain = obj->base.write_domain;
2692 old_read_domains = obj->base.read_domains;
2694 /* It should now be out of any other write domains, and we can update
2695 * the domain values for our changes.
2697 obj->base.write_domain = 0;
2698 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2704 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2708 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2711 ret = i915_gem_object_wait_rendering(obj, false);
2715 /* Ensure that we invalidate the GPU's caches and TLBs. */
2716 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2721 * Moves a single object to the CPU read, and possibly write domain.
2723 * This function returns when the move is complete, including waiting on
2727 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2729 uint32_t old_write_domain, old_read_domains;
2732 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2735 ret = i915_gem_object_wait_rendering(obj, !write);
2739 i915_gem_object_flush_gtt_write_domain(obj);
2741 old_write_domain = obj->base.write_domain;
2742 old_read_domains = obj->base.read_domains;
2744 /* Flush the CPU cache if it's still invalid. */
2745 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2746 i915_gem_clflush_object(obj);
2748 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2751 /* It should now be out of any other write domains, and we can update
2752 * the domain values for our changes.
2754 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2756 /* If we're writing through the CPU, then the GPU read domains will
2757 * need to be invalidated at next use.
2760 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2761 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2767 /* Throttle our rendering by waiting until the ring has completed our requests
2768 * emitted over 20 msec ago.
2770 * Note that if we were to use the current jiffies each time around the loop,
2771 * we wouldn't escape the function with any frames outstanding if the time to
2772 * render a frame was over 20ms.
2774 * This should get us reasonable parallelism between CPU and GPU but also
2775 * relatively low latency when blocking on a particular request to finish.
2778 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2780 struct drm_i915_private *dev_priv = dev->dev_private;
2781 struct drm_i915_file_private *file_priv = file->driver_priv;
2782 unsigned long recent_enough = ticks - (20 * hz / 1000);
2783 struct drm_i915_gem_request *request;
2784 struct intel_ring_buffer *ring = NULL;
2788 if (atomic_read(&dev_priv->mm.wedged))
2791 spin_lock(&file_priv->mm.lock);
2792 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
2793 if (time_after_eq(request->emitted_jiffies, recent_enough))
2796 ring = request->ring;
2797 seqno = request->seqno;
2799 spin_unlock(&file_priv->mm.lock);
2804 ret = __wait_seqno(ring, seqno, true, NULL);
2807 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2813 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2815 bool map_and_fenceable,
2820 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
2823 if (obj->gtt_space != NULL) {
2824 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2825 (map_and_fenceable && !obj->map_and_fenceable)) {
2826 WARN(obj->pin_count,
2827 "bo is already pinned with incorrect alignment:"
2828 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2829 " obj->map_and_fenceable=%d\n",
2830 obj->gtt_offset, alignment,
2832 obj->map_and_fenceable);
2833 ret = i915_gem_object_unbind(obj);
2839 if (obj->gtt_space == NULL) {
2840 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2842 ret = i915_gem_object_bind_to_gtt(obj, alignment,
2848 if (!dev_priv->mm.aliasing_ppgtt)
2849 i915_gem_gtt_bind_object(obj, obj->cache_level);
2852 if (!obj->has_global_gtt_mapping && map_and_fenceable)
2853 i915_gem_gtt_bind_object(obj, obj->cache_level);
2856 obj->pin_mappable |= map_and_fenceable;
2862 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
2864 BUG_ON(obj->pin_count == 0);
2865 BUG_ON(obj->gtt_space == NULL);
2867 if (--obj->pin_count == 0)
2868 obj->pin_mappable = false;
2872 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2873 struct drm_file *file)
2875 struct drm_i915_gem_pin *args = data;
2876 struct drm_i915_gem_object *obj;
2879 ret = i915_mutex_lock_interruptible(dev);
2883 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2884 if (&obj->base == NULL) {
2889 if (obj->madv != I915_MADV_WILLNEED) {
2890 DRM_ERROR("Attempting to pin a purgeable buffer\n");
2895 if (obj->pin_filp != NULL && obj->pin_filp != file) {
2896 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2902 if (obj->user_pin_count == 0) {
2903 ret = i915_gem_object_pin(obj, args->alignment, true, false);
2908 obj->user_pin_count++;
2909 obj->pin_filp = file;
2911 /* XXX - flush the CPU caches for pinned objects
2912 * as the X server doesn't manage domains yet
2914 i915_gem_object_flush_cpu_write_domain(obj);
2915 args->offset = obj->gtt_offset;
2917 drm_gem_object_unreference(&obj->base);
2924 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2925 struct drm_file *file)
2927 struct drm_i915_gem_pin *args = data;
2928 struct drm_i915_gem_object *obj;
2931 ret = i915_mutex_lock_interruptible(dev);
2935 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2936 if (&obj->base == NULL) {
2941 if (obj->pin_filp != file) {
2942 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2947 obj->user_pin_count--;
2948 if (obj->user_pin_count == 0) {
2949 obj->pin_filp = NULL;
2950 i915_gem_object_unpin(obj);
2954 drm_gem_object_unreference(&obj->base);
2961 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2962 struct drm_file *file)
2964 struct drm_i915_gem_busy *args = data;
2965 struct drm_i915_gem_object *obj;
2968 ret = i915_mutex_lock_interruptible(dev);
2972 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2973 if (&obj->base == NULL) {
2978 /* Count all active objects as busy, even if they are currently not used
2979 * by the gpu. Users of this interface expect objects to eventually
2980 * become non-busy without any further actions, therefore emit any
2981 * necessary flushes here.
2983 ret = i915_gem_object_flush_active(obj);
2985 args->busy = obj->active;
2987 args->busy |= intel_ring_flag(obj->ring) << 17;
2990 drm_gem_object_unreference(&obj->base);
2997 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2998 struct drm_file *file_priv)
3000 return i915_gem_ring_throttle(dev, file_priv);
3004 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3005 struct drm_file *file_priv)
3007 struct drm_i915_gem_madvise *args = data;
3008 struct drm_i915_gem_object *obj;
3011 switch (args->madv) {
3012 case I915_MADV_DONTNEED:
3013 case I915_MADV_WILLNEED:
3019 ret = i915_mutex_lock_interruptible(dev);
3023 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3024 if (&obj->base == NULL) {
3029 if (obj->pin_count) {
3034 if (obj->madv != __I915_MADV_PURGED)
3035 obj->madv = args->madv;
3037 /* if the object is no longer attached, discard its backing storage */
3038 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3039 i915_gem_object_truncate(obj);
3041 args->retained = obj->madv != __I915_MADV_PURGED;
3044 drm_gem_object_unreference(&obj->base);
3050 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3051 const struct drm_i915_gem_object_ops *ops)
3053 INIT_LIST_HEAD(&obj->mm_list);
3054 INIT_LIST_HEAD(&obj->gtt_list);
3055 INIT_LIST_HEAD(&obj->ring_list);
3056 INIT_LIST_HEAD(&obj->exec_list);
3060 obj->fence_reg = I915_FENCE_REG_NONE;
3061 obj->madv = I915_MADV_WILLNEED;
3062 /* Avoid an unnecessary call to unbind on the first bind. */
3063 obj->map_and_fenceable = true;
3065 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3068 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3069 .get_pages = i915_gem_object_get_pages_gtt,
3070 .put_pages = i915_gem_object_put_pages_gtt,
3073 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3076 struct drm_i915_gem_object *obj;
3078 struct address_space *mapping;
3082 obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3086 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3092 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3093 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3094 /* 965gm cannot relocate objects above 4GiB. */
3095 mask &= ~__GFP_HIGHMEM;
3096 mask |= __GFP_DMA32;
3099 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3100 mapping_set_gfp_mask(mapping, mask);
3103 i915_gem_object_init(obj, &i915_gem_object_ops);
3105 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3106 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3109 /* On some devices, we can have the GPU use the LLC (the CPU
3110 * cache) for about a 10% performance improvement
3111 * compared to uncached. Graphics requests other than
3112 * display scanout are coherent with the CPU in
3113 * accessing this cache. This means in this mode we
3114 * don't need to clflush on the CPU side, and on the
3115 * GPU side we only need to flush internal caches to
3116 * get data visible to the CPU.
3118 * However, we maintain the display planes as UC, and so
3119 * need to rebind when first used as such.
3121 obj->cache_level = I915_CACHE_LLC;
3123 obj->cache_level = I915_CACHE_NONE;
3128 int i915_gem_init_object(struct drm_gem_object *obj)
3135 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3137 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3138 struct drm_device *dev = obj->base.dev;
3139 drm_i915_private_t *dev_priv = dev->dev_private;
3142 i915_gem_detach_phys_object(dev, obj);
3145 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3146 bool was_interruptible;
3148 was_interruptible = dev_priv->mm.interruptible;
3149 dev_priv->mm.interruptible = false;
3151 WARN_ON(i915_gem_object_unbind(obj));
3153 dev_priv->mm.interruptible = was_interruptible;
3156 drm_gem_free_mmap_offset(&obj->base);
3158 drm_gem_object_release(&obj->base);
3159 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3161 drm_free(obj->bit_17, M_DRM);
3162 drm_free(obj, M_DRM);
3166 i915_gem_do_init(struct drm_device *dev, unsigned long start,
3167 unsigned long mappable_end, unsigned long end)
3169 drm_i915_private_t *dev_priv;
3170 unsigned long mappable;
3173 dev_priv = dev->dev_private;
3174 mappable = min(end, mappable_end) - start;
3176 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
3178 dev_priv->mm.gtt_start = start;
3179 dev_priv->mm.gtt_mappable_end = mappable_end;
3180 dev_priv->mm.gtt_end = end;
3181 dev_priv->mm.gtt_total = end - start;
3182 dev_priv->mm.mappable_gtt_total = mappable;
3184 /* Take over this portion of the GTT */
3185 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
3186 device_printf(dev->dev,
3187 "taking over the fictitious range 0x%lx-0x%lx\n",
3188 dev->agp->base + start, dev->agp->base + start + mappable);
3189 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
3190 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
3195 i915_gem_idle(struct drm_device *dev)
3197 drm_i915_private_t *dev_priv = dev->dev_private;
3202 if (dev_priv->mm.suspended) {
3207 ret = i915_gpu_idle(dev);
3212 i915_gem_retire_requests(dev);
3214 /* Under UMS, be paranoid and evict. */
3215 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3216 i915_gem_evict_everything(dev);
3218 i915_gem_reset_fences(dev);
3220 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3221 * We need to replace this with a semaphore, or something.
3222 * And not confound mm.suspended!
3224 dev_priv->mm.suspended = 1;
3225 del_timer_sync(&dev_priv->hangcheck_timer);
3227 i915_kernel_lost_context(dev);
3228 i915_gem_cleanup_ringbuffer(dev);
3232 /* Cancel the retire work handler, which should be idle now. */
3233 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3238 void i915_gem_l3_remap(struct drm_device *dev)
3240 drm_i915_private_t *dev_priv = dev->dev_private;
3244 if (!HAS_L3_GPU_CACHE(dev))
3247 if (!dev_priv->l3_parity.remap_info)
3250 misccpctl = I915_READ(GEN7_MISCCPCTL);
3251 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3252 POSTING_READ(GEN7_MISCCPCTL);
3254 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3255 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3256 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3257 DRM_DEBUG("0x%x was already programmed to %x\n",
3258 GEN7_L3LOG_BASE + i, remap);
3259 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3260 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3261 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3264 /* Make sure all the writes land before disabling dop clock gating */
3265 POSTING_READ(GEN7_L3LOG_BASE);
3267 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3270 void i915_gem_init_swizzling(struct drm_device *dev)
3272 drm_i915_private_t *dev_priv = dev->dev_private;
3274 if (INTEL_INFO(dev)->gen < 5 ||
3275 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3278 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3279 DISP_TILE_SURFACE_SWIZZLING);
3284 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3286 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3288 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3292 intel_enable_blt(struct drm_device *dev)
3299 /* The blitter was dysfunctional on early prototypes */
3300 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
3301 if (IS_GEN6(dev) && revision < 8) {
3302 DRM_INFO("BLT not supported on this pre-production hardware;"
3303 " graphics performance will be degraded.\n");
3311 i915_gem_init_hw(struct drm_device *dev)
3313 drm_i915_private_t *dev_priv = dev->dev_private;
3316 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3317 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3319 i915_gem_l3_remap(dev);
3321 i915_gem_init_swizzling(dev);
3323 ret = intel_init_render_ring_buffer(dev);
3328 ret = intel_init_bsd_ring_buffer(dev);
3330 goto cleanup_render_ring;
3333 if (intel_enable_blt(dev)) {
3334 ret = intel_init_blt_ring_buffer(dev);
3336 goto cleanup_bsd_ring;
3339 dev_priv->next_seqno = 1;
3342 * XXX: There was some w/a described somewhere suggesting loading
3343 * contexts before PPGTT.
3345 i915_gem_context_init(dev);
3346 i915_gem_init_ppgtt(dev);
3351 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3352 cleanup_render_ring:
3353 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3358 intel_enable_ppgtt(struct drm_device *dev)
3360 if (i915_enable_ppgtt >= 0)
3361 return i915_enable_ppgtt;
3363 /* Disable ppgtt on SNB if VT-d is on. */
3364 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
3370 int i915_gem_init(struct drm_device *dev)
3372 struct drm_i915_private *dev_priv = dev->dev_private;
3373 unsigned long prealloc_size, gtt_size, mappable_size;
3376 prealloc_size = dev_priv->mm.gtt->stolen_size;
3377 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3378 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3380 /* Basic memrange allocator for stolen space */
3381 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
3384 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3385 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3386 * aperture accordingly when using aliasing ppgtt. */
3387 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3388 /* For paranoia keep the guard page in between. */
3389 gtt_size -= PAGE_SIZE;
3391 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
3393 ret = i915_gem_init_aliasing_ppgtt(dev);
3399 /* Let GEM Manage all of the aperture.
3401 * However, leave one page at the end still bound to the scratch
3402 * page. There are a number of places where the hardware
3403 * apparently prefetches past the end of the object, and we've
3404 * seen multiple hangs with the GPU head pointer stuck in a
3405 * batchbuffer bound at the last page of the aperture. One page
3406 * should be enough to keep any prefetching inside of the
3409 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
3412 ret = i915_gem_init_hw(dev);
3415 i915_gem_cleanup_aliasing_ppgtt(dev);
3420 /* Try to set up FBC with a reasonable compressed buffer size */
3421 if (I915_HAS_FBC(dev) && i915_powersave) {
3424 /* Leave 1M for line length buffer & misc. */
3426 /* Try to get a 32M buffer... */
3427 if (prealloc_size > (36*1024*1024))
3428 cfb_size = 32*1024*1024;
3429 else /* fall back to 7/8 of the stolen space */
3430 cfb_size = prealloc_size * 7 / 8;
3431 i915_setup_compression(dev, cfb_size);
3435 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3436 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3437 dev_priv->dri1.allow_batchbuffer = 1;
3442 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3444 drm_i915_private_t *dev_priv = dev->dev_private;
3445 struct intel_ring_buffer *ring;
3448 for_each_ring(ring, dev_priv, i)
3449 intel_cleanup_ring_buffer(ring);
3453 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3454 struct drm_file *file_priv)
3456 drm_i915_private_t *dev_priv = dev->dev_private;
3459 if (drm_core_check_feature(dev, DRIVER_MODESET))
3462 if (atomic_read(&dev_priv->mm.wedged)) {
3463 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3464 atomic_set(&dev_priv->mm.wedged, 0);
3468 dev_priv->mm.suspended = 0;
3470 ret = i915_gem_init_hw(dev);
3476 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
3479 ret = drm_irq_install(dev);
3481 goto cleanup_ringbuffer;
3487 i915_gem_cleanup_ringbuffer(dev);
3488 dev_priv->mm.suspended = 1;
3495 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3496 struct drm_file *file_priv)
3498 if (drm_core_check_feature(dev, DRIVER_MODESET))
3501 drm_irq_uninstall(dev);
3502 return i915_gem_idle(dev);
3506 i915_gem_lastclose(struct drm_device *dev)
3510 if (drm_core_check_feature(dev, DRIVER_MODESET))
3513 ret = i915_gem_idle(dev);
3515 DRM_ERROR("failed to idle hardware: %d\n", ret);
3519 init_ring_lists(struct intel_ring_buffer *ring)
3521 INIT_LIST_HEAD(&ring->active_list);
3522 INIT_LIST_HEAD(&ring->request_list);
3526 i915_gem_load(struct drm_device *dev)
3529 drm_i915_private_t *dev_priv = dev->dev_private;
3531 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3532 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3533 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3534 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
3535 for (i = 0; i < I915_NUM_RINGS; i++)
3536 init_ring_lists(&dev_priv->ring[i]);
3537 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3538 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3539 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3540 i915_gem_retire_work_handler);
3541 init_completion(&dev_priv->error_completion);
3543 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3545 I915_WRITE(MI_ARB_STATE,
3546 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3549 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3551 /* Old X drivers will take 0-2 for front, back, depth buffers */
3552 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3553 dev_priv->fence_reg_start = 3;
3555 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3556 dev_priv->num_fence_regs = 16;
3558 dev_priv->num_fence_regs = 8;
3560 /* Initialize fence registers to zero */
3561 i915_gem_reset_fences(dev);
3563 i915_gem_detect_bit_6_swizzle(dev);
3564 init_waitqueue_head(&dev_priv->pending_flip_queue);
3566 dev_priv->mm.interruptible = true;
3569 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3570 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3571 register_shrinker(&dev_priv->mm.inactive_shrinker);
3573 dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
3574 i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
3579 * Create a physically contiguous memory object for this object
3580 * e.g. for cursor + overlay regs
3582 static int i915_gem_init_phys_object(struct drm_device *dev,
3583 int id, int size, int align)
3585 drm_i915_private_t *dev_priv = dev->dev_private;
3586 struct drm_i915_gem_phys_object *phys_obj;
3589 if (dev_priv->mm.phys_objs[id - 1] || !size)
3592 phys_obj = kmalloc(sizeof(struct drm_i915_gem_phys_object), M_DRM,
3599 phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3600 if (!phys_obj->handle) {
3604 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3605 size / PAGE_SIZE, PAT_WRITE_COMBINING);
3607 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3612 drm_free(phys_obj, M_DRM);
3616 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3618 drm_i915_private_t *dev_priv = dev->dev_private;
3619 struct drm_i915_gem_phys_object *phys_obj;
3621 if (!dev_priv->mm.phys_objs[id - 1])
3624 phys_obj = dev_priv->mm.phys_objs[id - 1];
3625 if (phys_obj->cur_obj) {
3626 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3629 drm_pci_free(dev, phys_obj->handle);
3630 drm_free(phys_obj, M_DRM);
3631 dev_priv->mm.phys_objs[id - 1] = NULL;
3634 void i915_gem_free_all_phys_object(struct drm_device *dev)
3638 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3639 i915_gem_free_phys_object(dev, i);
3642 void i915_gem_detach_phys_object(struct drm_device *dev,
3643 struct drm_i915_gem_object *obj)
3645 struct vm_object *mapping = obj->base.vm_obj;
3652 vaddr = obj->phys_obj->handle->vaddr;
3654 page_count = obj->base.size / PAGE_SIZE;
3655 VM_OBJECT_LOCK(obj->base.vm_obj);
3656 for (i = 0; i < page_count; i++) {
3657 struct vm_page *page = shmem_read_mapping_page(mapping, i);
3658 if (!IS_ERR(page)) {
3659 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3660 char *dst = kmap_atomic(page);
3661 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3664 drm_clflush_pages(&page, 1);
3667 set_page_dirty(page);
3668 mark_page_accessed(page);
3669 page_cache_release(page);
3671 VM_OBJECT_LOCK(obj->base.vm_obj);
3672 vm_page_reference(page);
3673 vm_page_dirty(page);
3674 vm_page_busy_wait(page, FALSE, "i915gem");
3675 vm_page_unwire(page, 0);
3676 vm_page_wakeup(page);
3679 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3680 intel_gtt_chipset_flush();
3682 obj->phys_obj->cur_obj = NULL;
3683 obj->phys_obj = NULL;
3687 i915_gem_attach_phys_object(struct drm_device *dev,
3688 struct drm_i915_gem_object *obj,
3692 struct vm_object *mapping = obj->base.vm_obj;
3693 drm_i915_private_t *dev_priv = dev->dev_private;
3698 if (id > I915_MAX_PHYS_OBJECT)
3701 if (obj->phys_obj) {
3702 if (obj->phys_obj->id == id)
3704 i915_gem_detach_phys_object(dev, obj);
3707 /* create a new object */
3708 if (!dev_priv->mm.phys_objs[id - 1]) {
3709 ret = i915_gem_init_phys_object(dev, id,
3710 obj->base.size, align);
3712 DRM_ERROR("failed to init phys object %d size: %zu\n",
3713 id, obj->base.size);
3718 /* bind to the object */
3719 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3720 obj->phys_obj->cur_obj = obj;
3722 page_count = obj->base.size / PAGE_SIZE;
3724 VM_OBJECT_LOCK(obj->base.vm_obj);
3725 for (i = 0; i < page_count; i++) {
3726 struct vm_page *page;
3729 page = shmem_read_mapping_page(mapping, i);
3730 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3732 return PTR_ERR(page);
3734 src = kmap_atomic(page);
3735 dst = (char*)obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3736 memcpy(dst, src, PAGE_SIZE);
3740 mark_page_accessed(page);
3741 page_cache_release(page);
3743 VM_OBJECT_LOCK(obj->base.vm_obj);
3744 vm_page_reference(page);
3745 vm_page_busy_wait(page, FALSE, "i915gem");
3746 vm_page_unwire(page, 0);
3747 vm_page_wakeup(page);
3749 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3755 i915_gem_phys_pwrite(struct drm_device *dev,
3756 struct drm_i915_gem_object *obj,
3757 struct drm_i915_gem_pwrite *args,
3758 struct drm_file *file_priv)
3760 void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
3761 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
3763 if (copyin_nofault(user_data, vaddr, args->size) != 0) {
3764 unsigned long unwritten;
3766 /* The physical object once assigned is fixed for the lifetime
3767 * of the obj, so we can safely drop the lock and continue
3771 unwritten = copy_from_user(vaddr, user_data, args->size);
3777 i915_gem_chipset_flush(dev);
3781 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3783 struct drm_i915_file_private *file_priv = file->driver_priv;
3785 /* Clean up our request list when the client is going away, so that
3786 * later retire_requests won't dereference our soon-to-be-gone
3789 spin_lock(&file_priv->mm.lock);
3790 while (!list_empty(&file_priv->mm.request_list)) {
3791 struct drm_i915_gem_request *request;
3793 request = list_first_entry(&file_priv->mm.request_list,
3794 struct drm_i915_gem_request,
3796 list_del(&request->client_list);
3797 request->file_priv = NULL;
3799 spin_unlock(&file_priv->mm.lock);
3803 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
3804 vm_ooffset_t foff, struct ucred *cred, u_short *color)
3807 *color = 0; /* XXXKIB */
3814 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
3817 struct drm_gem_object *gem_obj;
3818 struct drm_i915_gem_object *obj;
3819 struct drm_device *dev;
3820 drm_i915_private_t *dev_priv;
3825 gem_obj = vm_obj->handle;
3826 obj = to_intel_bo(gem_obj);
3827 dev = obj->base.dev;
3828 dev_priv = dev->dev_private;
3830 write = (prot & VM_PROT_WRITE) != 0;
3834 vm_object_pip_add(vm_obj, 1);
3837 * Remove the placeholder page inserted by vm_fault() from the
3838 * object before dropping the object lock. If
3839 * i915_gem_release_mmap() is active in parallel on this gem
3840 * object, then it owns the drm device sx and might find the
3841 * placeholder already. Then, since the page is busy,
3842 * i915_gem_release_mmap() sleeps waiting for the busy state
3843 * of the page cleared. We will be not able to acquire drm
3844 * device lock until i915_gem_release_mmap() is able to make a
3847 if (*mres != NULL) {
3849 vm_page_remove(oldm);
3854 VM_OBJECT_UNLOCK(vm_obj);
3860 ret = i915_mutex_lock_interruptible(dev);
3869 * Since the object lock was dropped, other thread might have
3870 * faulted on the same GTT address and instantiated the
3871 * mapping for the page. Recheck.
3873 VM_OBJECT_LOCK(vm_obj);
3874 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
3876 if ((m->flags & PG_BUSY) != 0) {
3879 vm_page_sleep(m, "915pee");
3885 VM_OBJECT_UNLOCK(vm_obj);
3887 /* Now bind it into the GTT if needed */
3888 if (!obj->map_and_fenceable) {
3889 ret = i915_gem_object_unbind(obj);
3895 if (!obj->gtt_space) {
3896 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
3902 ret = i915_gem_object_set_to_gtt_domain(obj, write);
3909 if (obj->tiling_mode == I915_TILING_NONE)
3910 ret = i915_gem_object_put_fence(obj);
3912 ret = i915_gem_object_get_fence(obj);
3918 if (i915_gem_object_is_inactive(obj))
3919 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3921 obj->fault_mappable = true;
3922 VM_OBJECT_LOCK(vm_obj);
3923 m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
3930 KASSERT((m->flags & PG_FICTITIOUS) != 0,
3931 ("not fictitious %p", m));
3932 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
3934 if ((m->flags & PG_BUSY) != 0) {
3937 vm_page_sleep(m, "915pbs");
3941 m->valid = VM_PAGE_BITS_ALL;
3942 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
3945 vm_page_busy_try(m, false);
3951 vm_object_pip_wakeup(vm_obj);
3952 return (VM_PAGER_OK);
3957 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
3958 if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
3959 goto unlocked_vmobj;
3961 VM_OBJECT_LOCK(vm_obj);
3962 vm_object_pip_wakeup(vm_obj);
3963 return (VM_PAGER_ERROR);
3967 i915_gem_pager_dtor(void *handle)
3969 struct drm_gem_object *obj;
3970 struct drm_device *dev;
3976 drm_gem_free_mmap_offset(obj);
3977 i915_gem_release_mmap(to_intel_bo(obj));
3978 drm_gem_object_unreference(obj);
3982 struct cdev_pager_ops i915_gem_pager_ops = {
3983 .cdev_pg_fault = i915_gem_pager_fault,
3984 .cdev_pg_ctor = i915_gem_pager_ctor,
3985 .cdev_pg_dtor = i915_gem_pager_dtor
3988 #define GEM_PARANOID_CHECK_GTT 0
3989 #if GEM_PARANOID_CHECK_GTT
3991 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
3994 struct drm_i915_private *dev_priv;
3996 unsigned long start, end;
4000 dev_priv = dev->dev_private;
4001 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
4002 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
4003 for (i = start; i < end; i++) {
4004 pa = intel_gtt_read_pte_paddr(i);
4005 for (j = 0; j < page_count; j++) {
4006 if (pa == VM_PAGE_TO_PHYS(ma[j])) {
4007 panic("Page %p in GTT pte index %d pte %x",
4008 ma[i], i, intel_gtt_read_pte(i));
4016 i915_gpu_is_active(struct drm_device *dev)
4018 drm_i915_private_t *dev_priv = dev->dev_private;
4020 return !list_empty(&dev_priv->mm.active_list);
4024 i915_gem_lowmem(void *arg)
4026 struct drm_device *dev;
4027 struct drm_i915_private *dev_priv;
4028 struct drm_i915_gem_object *obj, *next;
4029 int cnt, cnt_fail, cnt_total;
4032 dev_priv = dev->dev_private;
4034 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
4038 /* first scan for clean buffers */
4039 i915_gem_retire_requests(dev);
4041 cnt_total = cnt_fail = cnt = 0;
4043 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4045 if (i915_gem_object_is_purgeable(obj)) {
4046 if (i915_gem_object_unbind(obj) != 0)
4052 /* second pass, evict/count anything still on the inactive list */
4053 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4055 if (i915_gem_object_unbind(obj) == 0)
4061 if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4063 * We are desperate for pages, so as a last resort, wait
4064 * for the GPU to finish and discard whatever we can.
4065 * This has a dramatic impact to reduce the number of
4066 * OOM-killer events whilst running the GPU aggressively.
4068 if (i915_gpu_idle(dev) == 0)
4075 i915_gem_unload(struct drm_device *dev)
4077 struct drm_i915_private *dev_priv;
4079 dev_priv = dev->dev_private;
4080 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);