2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
59 #include <drm/i915_drm.h>
61 #include "intel_drv.h"
62 #include "intel_ringbuffer.h"
63 #include <linux/completion.h>
64 #include <linux/jiffies.h>
65 #include <linux/time.h>
67 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
68 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
69 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
71 bool map_and_fenceable,
73 static int i915_gem_phys_pwrite(struct drm_device *dev,
74 struct drm_i915_gem_object *obj,
75 struct drm_i915_gem_pwrite *args,
76 struct drm_file *file);
78 static void i915_gem_write_fence(struct drm_device *dev, int reg,
79 struct drm_i915_gem_object *obj);
80 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
81 struct drm_i915_fence_reg *fence,
84 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
86 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
87 uint32_t size, int tiling_mode);
88 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
90 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
91 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
93 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
96 i915_gem_release_mmap(obj);
98 /* As we do not have an associated fence register, we will force
99 * a tiling change if we ever need to acquire one.
101 obj->fence_dirty = false;
102 obj->fence_reg = I915_FENCE_REG_NONE;
105 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
106 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
107 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
108 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
109 static void i915_gem_reset_fences(struct drm_device *dev);
110 static void i915_gem_lowmem(void *arg);
112 /* some bookkeeping */
113 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
116 dev_priv->mm.object_count++;
117 dev_priv->mm.object_memory += size;
120 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
123 dev_priv->mm.object_count--;
124 dev_priv->mm.object_memory -= size;
128 i915_gem_wait_for_error(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 struct completion *x = &dev_priv->error_completion;
134 if (!atomic_read(&dev_priv->mm.wedged))
138 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
139 * userspace. If it takes that long something really bad is going on and
140 * we should simply try to bail out and fail as gracefully as possible.
142 ret = wait_for_completion_interruptible_timeout(x, 10*hz);
144 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
146 } else if (ret < 0) {
150 if (atomic_read(&dev_priv->mm.wedged)) {
151 /* GPU is hung, bump the completion count to account for
152 * the token we just consumed so that we never hit zero and
153 * end up waiting upon a subsequent completion event that
156 spin_lock(&x->wait.lock);
158 spin_unlock(&x->wait.lock);
163 int i915_mutex_lock_interruptible(struct drm_device *dev)
167 ret = i915_gem_wait_for_error(dev);
171 ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL);
175 WARN_ON(i915_verify_lists(dev));
180 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
186 i915_gem_init_ioctl(struct drm_device *dev, void *data,
187 struct drm_file *file)
189 struct drm_i915_gem_init *args = data;
191 if (drm_core_check_feature(dev, DRIVER_MODESET))
194 if (args->gtt_start >= args->gtt_end ||
195 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
198 /* GEM with user mode setting was never supported on ilk and later. */
199 if (INTEL_INFO(dev)->gen >= 5)
202 lockmgr(&dev->dev_lock, LK_EXCLUSIVE|LK_RETRY|LK_CANRECURSE);
203 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
204 lockmgr(&dev->dev_lock, LK_RELEASE);
210 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
211 struct drm_file *file)
213 struct drm_i915_private *dev_priv = dev->dev_private;
214 struct drm_i915_gem_get_aperture *args = data;
215 struct drm_i915_gem_object *obj;
220 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
222 pinned += obj->gtt_space->size;
225 args->aper_size = dev_priv->mm.gtt_total;
226 args->aper_available_size = args->aper_size - pinned;
232 i915_gem_create(struct drm_file *file,
233 struct drm_device *dev,
237 struct drm_i915_gem_object *obj;
241 size = roundup(size, PAGE_SIZE);
245 /* Allocate the new object */
246 obj = i915_gem_alloc_object(dev, size);
251 ret = drm_gem_handle_create(file, &obj->base, &handle);
253 drm_gem_object_release(&obj->base);
254 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
255 drm_free(obj, M_DRM);
259 /* drop reference from allocate - handle holds it now */
260 drm_gem_object_unreference(&obj->base);
266 i915_gem_dumb_create(struct drm_file *file,
267 struct drm_device *dev,
268 struct drm_mode_create_dumb *args)
271 /* have to work out size/pitch and return them */
272 args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
273 args->size = args->pitch * args->height;
274 return i915_gem_create(file, dev,
275 args->size, &args->handle);
278 int i915_gem_dumb_destroy(struct drm_file *file,
279 struct drm_device *dev,
283 return drm_gem_handle_delete(file, handle);
287 * Creates a new mm object and returns a handle to it.
290 i915_gem_create_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *file)
293 struct drm_i915_gem_create *args = data;
295 return i915_gem_create(file, dev,
296 args->size, &args->handle);
299 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
301 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
303 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
304 obj->tiling_mode != I915_TILING_NONE;
307 static inline void vm_page_reference(vm_page_t m)
309 vm_page_flag_set(m, PG_REFERENCED);
313 i915_gem_shmem_pread(struct drm_device *dev,
314 struct drm_i915_gem_object *obj,
315 struct drm_i915_gem_pread *args,
316 struct drm_file *file)
323 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
325 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
328 vm_obj = obj->base.vm_obj;
331 VM_OBJECT_LOCK(vm_obj);
332 vm_object_pip_add(vm_obj, 1);
333 while (args->size > 0) {
334 obj_pi = OFF_TO_IDX(args->offset);
335 obj_po = args->offset & PAGE_MASK;
337 m = i915_gem_wire_page(vm_obj, obj_pi);
338 VM_OBJECT_UNLOCK(vm_obj);
340 sf = sf_buf_alloc(m);
341 mkva = sf_buf_kva(sf);
342 length = min(args->size, PAGE_SIZE - obj_po);
344 if (do_bit17_swizzling &&
345 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
346 cnt = roundup2(obj_po + 1, 64);
347 cnt = min(cnt - obj_po, length);
348 swizzled_po = obj_po ^ 64;
351 swizzled_po = obj_po;
353 ret = -copyout_nofault(
354 (char *)mkva + swizzled_po,
355 (void *)(uintptr_t)args->data_ptr, cnt);
358 args->data_ptr += cnt;
365 VM_OBJECT_LOCK(vm_obj);
366 vm_page_reference(m);
367 vm_page_busy_wait(m, FALSE, "i915gem");
368 vm_page_unwire(m, 1);
374 vm_object_pip_wakeup(vm_obj);
375 VM_OBJECT_UNLOCK(vm_obj);
381 * Reads data from the object referenced by handle.
383 * On error, the contents of *data are undefined.
386 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
387 struct drm_file *file)
389 struct drm_i915_gem_pread *args = data;
390 struct drm_i915_gem_object *obj;
396 ret = i915_mutex_lock_interruptible(dev);
400 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
401 if (&obj->base == NULL) {
406 /* Bounds check source. */
407 if (args->offset > obj->base.size ||
408 args->size > obj->base.size - args->offset) {
413 ret = i915_gem_shmem_pread(dev, obj, args, file);
415 drm_gem_object_unreference(&obj->base);
422 * This is the fast pwrite path, where we copy the data directly from the
423 * user into the GTT, uncached.
426 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
427 struct drm_i915_gem_object *obj,
428 struct drm_i915_gem_pwrite *args,
429 struct drm_file *file)
435 * Pass the unaligned physical address and size to pmap_mapdev_attr()
436 * so it can properly calculate whether an extra page needs to be
437 * mapped or not to cover the requested range. The function will
438 * add the page offset into the returned mkva for us.
440 mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
441 args->offset, args->size, PAT_WRITE_COMBINING);
442 ret = -copyin_nofault((void *)(uintptr_t)args->data_ptr, (char *)mkva, args->size);
443 pmap_unmapdev(mkva, args->size);
449 i915_gem_shmem_pwrite(struct drm_device *dev,
450 struct drm_i915_gem_object *obj,
451 struct drm_i915_gem_pwrite *args,
452 struct drm_file *file)
459 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
461 do_bit17_swizzling = 0;
464 vm_obj = obj->base.vm_obj;
467 VM_OBJECT_LOCK(vm_obj);
468 vm_object_pip_add(vm_obj, 1);
469 while (args->size > 0) {
470 obj_pi = OFF_TO_IDX(args->offset);
471 obj_po = args->offset & PAGE_MASK;
473 m = i915_gem_wire_page(vm_obj, obj_pi);
474 VM_OBJECT_UNLOCK(vm_obj);
476 sf = sf_buf_alloc(m);
477 mkva = sf_buf_kva(sf);
478 length = min(args->size, PAGE_SIZE - obj_po);
480 if (do_bit17_swizzling &&
481 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
482 cnt = roundup2(obj_po + 1, 64);
483 cnt = min(cnt - obj_po, length);
484 swizzled_po = obj_po ^ 64;
487 swizzled_po = obj_po;
489 ret = -copyin_nofault(
490 (void *)(uintptr_t)args->data_ptr,
491 (char *)mkva + swizzled_po, cnt);
494 args->data_ptr += cnt;
501 VM_OBJECT_LOCK(vm_obj);
503 vm_page_reference(m);
504 vm_page_busy_wait(m, FALSE, "i915gem");
505 vm_page_unwire(m, 1);
511 vm_object_pip_wakeup(vm_obj);
512 VM_OBJECT_UNLOCK(vm_obj);
518 * Writes data to the object referenced by handle.
520 * On error, the contents of the buffer that were to be modified are undefined.
523 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
524 struct drm_file *file)
526 struct drm_i915_gem_pwrite *args = data;
527 struct drm_i915_gem_object *obj;
533 ret = i915_mutex_lock_interruptible(dev);
537 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
538 if (&obj->base == NULL) {
543 /* Bounds check destination. */
544 if (args->offset > obj->base.size ||
545 args->size > obj->base.size - args->offset) {
551 /* We can only do the GTT pwrite on untiled buffers, as otherwise
552 * it would end up going through the fenced access, and we'll get
553 * different detiling behavior between reading and writing.
554 * pread/pwrite currently are reading and writing from the CPU
555 * perspective, requiring manual detiling by the client.
558 ret = i915_gem_phys_pwrite(dev, obj, args, file);
562 if (obj->cache_level == I915_CACHE_NONE &&
563 obj->tiling_mode == I915_TILING_NONE &&
564 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
565 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
566 /* Note that the gtt paths might fail with non-page-backed user
567 * pointers (e.g. gtt mappings when moving data between
568 * textures). Fallback to the shmem path in that case. */
571 if (ret == -EFAULT || ret == -ENOSPC)
572 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
575 drm_gem_object_unreference(&obj->base);
582 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
585 if (atomic_read(&dev_priv->mm.wedged)) {
586 struct completion *x = &dev_priv->error_completion;
587 bool recovery_complete;
589 /* Give the error handler a chance to run. */
590 spin_lock(&x->wait.lock);
591 recovery_complete = x->done > 0;
592 spin_unlock(&x->wait.lock);
594 /* Non-interruptible callers can't handle -EAGAIN, hence return
595 * -EIO unconditionally for these. */
599 /* Recovery complete, but still wedged means reset failure. */
600 if (recovery_complete)
610 * Compare seqno against outstanding lazy request. Emit a request if they are
614 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
618 DRM_LOCK_ASSERT(ring->dev);
621 if (seqno == ring->outstanding_lazy_request)
622 ret = i915_add_request(ring, NULL, NULL);
628 * __wait_seqno - wait until execution of seqno has finished
629 * @ring: the ring expected to report seqno
631 * @interruptible: do an interruptible wait (normally yes)
632 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
634 * Returns 0 if the seqno was found within the alloted time. Else returns the
635 * errno with remaining time filled in timeout argument.
637 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
638 bool interruptible, struct timespec *timeout)
640 drm_i915_private_t *dev_priv = ring->dev->dev_private;
641 struct timespec before, now, wait_time={1,0};
642 unsigned long timeout_jiffies;
644 bool wait_forever = true;
647 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
650 if (timeout != NULL) {
651 wait_time = *timeout;
652 wait_forever = false;
655 timeout_jiffies = timespec_to_jiffies(&wait_time);
657 if (WARN_ON(!ring->irq_get(ring)))
660 /* Record current time in case interrupted by signal, or wedged * */
661 getrawmonotonic(&before);
664 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
665 atomic_read(&dev_priv->mm.wedged))
668 end = wait_event_interruptible_timeout(ring->irq_queue,
672 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
675 ret = i915_gem_check_wedge(dev_priv, interruptible);
678 } while (end == 0 && wait_forever);
680 getrawmonotonic(&now);
686 struct timespec sleep_time = timespec_sub(now, before);
687 *timeout = timespec_sub(*timeout, sleep_time);
692 case -EAGAIN: /* Wedged */
693 case -ERESTARTSYS: /* Signal */
695 case 0: /* Timeout */
697 set_normalized_timespec(timeout, 0, 0);
698 return -ETIMEDOUT; /* -ETIME on Linux */
699 default: /* Completed */
700 WARN_ON(end < 0); /* We're not aware of other errors */
706 * Waits for a sequence number to be signaled, and cleans up the
707 * request and object lists appropriately for that event.
710 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
712 struct drm_device *dev = ring->dev;
713 struct drm_i915_private *dev_priv = dev->dev_private;
716 DRM_LOCK_ASSERT(dev);
719 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
723 ret = i915_gem_check_olr(ring, seqno);
727 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
733 * Ensures that all rendering to the object has completed and the object is
734 * safe to unbind from the GTT or access from the CPU.
736 static __must_check int
737 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
740 struct intel_ring_buffer *ring = obj->ring;
744 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
748 ret = i915_wait_seqno(ring, seqno);
752 i915_gem_retire_requests_ring(ring);
754 /* Manually manage the write flush as we may have not yet
755 * retired the buffer.
757 if (obj->last_write_seqno &&
758 i915_seqno_passed(seqno, obj->last_write_seqno)) {
759 obj->last_write_seqno = 0;
760 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
766 /* A nonblocking variant of the above wait. This is a highly dangerous routine
767 * as the object state may change during this call.
769 static __must_check int
770 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
773 struct drm_device *dev = obj->base.dev;
774 struct drm_i915_private *dev_priv = dev->dev_private;
775 struct intel_ring_buffer *ring = obj->ring;
779 DRM_LOCK_ASSERT(dev);
780 BUG_ON(!dev_priv->mm.interruptible);
782 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
786 ret = i915_gem_check_wedge(dev_priv, true);
790 ret = i915_gem_check_olr(ring, seqno);
795 ret = __wait_seqno(ring, seqno, true, NULL);
798 i915_gem_retire_requests_ring(ring);
800 /* Manually manage the write flush as we may have not yet
801 * retired the buffer.
803 if (obj->last_write_seqno &&
804 i915_seqno_passed(seqno, obj->last_write_seqno)) {
805 obj->last_write_seqno = 0;
806 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
813 * Called when user space prepares to use an object with the CPU, either
814 * through the mmap ioctl's mapping or a GTT mapping.
817 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
818 struct drm_file *file)
820 struct drm_i915_gem_set_domain *args = data;
821 struct drm_i915_gem_object *obj;
822 uint32_t read_domains = args->read_domains;
823 uint32_t write_domain = args->write_domain;
826 /* Only handle setting domains to types used by the CPU. */
827 if (write_domain & I915_GEM_GPU_DOMAINS)
830 if (read_domains & I915_GEM_GPU_DOMAINS)
833 /* Having something in the write domain implies it's in the read
834 * domain, and only that read domain. Enforce that in the request.
836 if (write_domain != 0 && read_domains != write_domain)
839 ret = i915_mutex_lock_interruptible(dev);
843 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
844 if (&obj->base == NULL) {
849 /* Try to flush the object off the GPU without holding the lock.
850 * We will repeat the flush holding the lock in the normal manner
851 * to catch cases where we are gazumped.
853 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
857 if (read_domains & I915_GEM_DOMAIN_GTT) {
858 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
860 /* Silently promote "you're not bound, there was nothing to do"
861 * to success, since the client was just asking us to
862 * make sure everything was done.
867 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
871 drm_gem_object_unreference(&obj->base);
878 * Called when user space has done writes to this buffer
881 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
882 struct drm_file *file)
884 struct drm_i915_gem_sw_finish *args = data;
885 struct drm_i915_gem_object *obj;
888 ret = i915_mutex_lock_interruptible(dev);
891 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
892 if (&obj->base == NULL) {
897 /* Pinned buffers may be scanout, so flush the cache */
899 i915_gem_object_flush_cpu_write_domain(obj);
901 drm_gem_object_unreference(&obj->base);
908 * Maps the contents of an object, returning the address it is mapped
911 * While the mapping holds a reference on the contents of the object, it doesn't
912 * imply a ref on the object itself.
915 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
916 struct drm_file *file)
918 struct drm_i915_gem_mmap *args = data;
919 struct drm_gem_object *obj;
920 struct proc *p = curproc;
921 vm_map_t map = &p->p_vmspace->vm_map;
926 obj = drm_gem_object_lookup(dev, file, args->handle);
933 size = round_page(args->size);
934 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
940 vm_object_hold(obj->vm_obj);
941 vm_object_reference_locked(obj->vm_obj);
942 vm_object_drop(obj->vm_obj);
943 rv = vm_map_find(map, obj->vm_obj, NULL,
944 args->offset, &addr, args->size,
945 PAGE_SIZE, /* align */
947 VM_MAPTYPE_NORMAL, /* maptype */
948 VM_PROT_READ | VM_PROT_WRITE, /* prot */
949 VM_PROT_READ | VM_PROT_WRITE, /* max */
950 MAP_SHARED /* cow */);
951 if (rv != KERN_SUCCESS) {
952 vm_object_deallocate(obj->vm_obj);
953 error = -vm_mmap_to_errno(rv);
955 args->addr_ptr = (uint64_t)addr;
958 drm_gem_object_unreference(obj);
963 * i915_gem_release_mmap - remove physical page mappings
964 * @obj: obj in question
966 * Preserve the reservation of the mmapping with the DRM core code, but
967 * relinquish ownership of the pages back to the system.
969 * It is vital that we remove the page mapping if we have mapped a tiled
970 * object through the GTT and then lose the fence register due to
971 * resource pressure. Similarly if the object has been moved out of the
972 * aperture, than pages mapped into userspace must be revoked. Removing the
973 * mapping will then trigger a page fault on the next user access, allowing
974 * fixup by i915_gem_fault().
977 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
983 if (!obj->fault_mappable)
986 devobj = cdev_pager_lookup(obj);
987 if (devobj != NULL) {
988 page_count = OFF_TO_IDX(obj->base.size);
990 VM_OBJECT_LOCK(devobj);
991 for (i = 0; i < page_count; i++) {
992 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
995 cdev_pager_free_page(devobj, m);
997 VM_OBJECT_UNLOCK(devobj);
998 vm_object_deallocate(devobj);
1001 obj->fault_mappable = false;
1005 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1009 if (INTEL_INFO(dev)->gen >= 4 ||
1010 tiling_mode == I915_TILING_NONE)
1013 /* Previous chips need a power-of-two fence region when tiling */
1014 if (INTEL_INFO(dev)->gen == 3)
1015 gtt_size = 1024*1024;
1017 gtt_size = 512*1024;
1019 while (gtt_size < size)
1026 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1027 * @obj: object to check
1029 * Return the required GTT alignment for an object, taking into account
1030 * potential fence register mapping.
1033 i915_gem_get_gtt_alignment(struct drm_device *dev,
1039 * Minimum alignment is 4k (GTT page size), but might be greater
1040 * if a fence register is needed for the object.
1042 if (INTEL_INFO(dev)->gen >= 4 ||
1043 tiling_mode == I915_TILING_NONE)
1047 * Previous chips need to be aligned to the size of the smallest
1048 * fence register that can contain the object.
1050 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1054 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1057 * @size: size of the object
1058 * @tiling_mode: tiling mode of the object
1060 * Return the required GTT alignment for an object, only taking into account
1061 * unfenced tiled surface requirements.
1064 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1069 * Minimum alignment is 4k (GTT page size) for sane hw.
1071 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1072 tiling_mode == I915_TILING_NONE)
1075 /* Previous hardware however needs to be aligned to a power-of-two
1076 * tile height. The simplest method for determining this is to reuse
1077 * the power-of-tile object size.
1079 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1083 i915_gem_mmap_gtt(struct drm_file *file,
1084 struct drm_device *dev,
1088 struct drm_i915_private *dev_priv = dev->dev_private;
1089 struct drm_i915_gem_object *obj;
1092 ret = i915_mutex_lock_interruptible(dev);
1096 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1097 if (&obj->base == NULL) {
1102 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1107 if (obj->madv != I915_MADV_WILLNEED) {
1108 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1113 ret = drm_gem_create_mmap_offset(&obj->base);
1117 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
1118 DRM_GEM_MAPPING_KEY;
1120 drm_gem_object_unreference(&obj->base);
1127 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1129 * @data: GTT mapping ioctl data
1130 * @file: GEM object info
1132 * Simply returns the fake offset to userspace so it can mmap it.
1133 * The mmap call will end up in drm_gem_mmap(), which will set things
1134 * up so we can get faults in the handler above.
1136 * The fault handler will take care of binding the object into the GTT
1137 * (since it may have been evicted to make room for something), allocating
1138 * a fence register, and mapping the appropriate aperture address into
1142 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1143 struct drm_file *file)
1145 struct drm_i915_gem_mmap_gtt *args = data;
1147 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1150 /* Immediately discard the backing storage */
1152 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1156 vm_obj = obj->base.vm_obj;
1157 VM_OBJECT_LOCK(vm_obj);
1158 vm_object_page_remove(vm_obj, 0, 0, false);
1159 VM_OBJECT_UNLOCK(vm_obj);
1160 obj->madv = __I915_MADV_PURGED;
1164 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1166 return obj->madv == I915_MADV_DONTNEED;
1170 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1175 BUG_ON(obj->madv == __I915_MADV_PURGED);
1177 if (obj->tiling_mode != I915_TILING_NONE)
1178 i915_gem_object_save_bit_17_swizzle(obj);
1179 if (obj->madv == I915_MADV_DONTNEED)
1181 page_count = obj->base.size / PAGE_SIZE;
1182 VM_OBJECT_LOCK(obj->base.vm_obj);
1183 #if GEM_PARANOID_CHECK_GTT
1184 i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
1186 for (i = 0; i < page_count; i++) {
1190 if (obj->madv == I915_MADV_WILLNEED)
1191 vm_page_reference(m);
1192 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
1193 vm_page_unwire(obj->pages[i], 1);
1194 vm_page_wakeup(obj->pages[i]);
1196 VM_OBJECT_UNLOCK(obj->base.vm_obj);
1198 drm_free(obj->pages, M_DRM);
1203 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1206 struct drm_device *dev;
1209 int page_count, i, j;
1211 dev = obj->base.dev;
1212 KASSERT(obj->pages == NULL, ("Obj already has pages"));
1213 page_count = obj->base.size / PAGE_SIZE;
1214 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
1216 vm_obj = obj->base.vm_obj;
1217 VM_OBJECT_LOCK(vm_obj);
1218 for (i = 0; i < page_count; i++) {
1219 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
1222 VM_OBJECT_UNLOCK(vm_obj);
1223 if (i915_gem_object_needs_bit17_swizzle(obj))
1224 i915_gem_object_do_bit_17_swizzle(obj);
1228 for (j = 0; j < i; j++) {
1230 vm_page_busy_wait(m, FALSE, "i915gem");
1231 vm_page_unwire(m, 0);
1234 VM_OBJECT_UNLOCK(vm_obj);
1235 drm_free(obj->pages, M_DRM);
1241 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1242 struct intel_ring_buffer *ring)
1244 struct drm_device *dev = obj->base.dev;
1245 struct drm_i915_private *dev_priv = dev->dev_private;
1246 u32 seqno = intel_ring_get_seqno(ring);
1248 BUG_ON(ring == NULL);
1251 /* Add a reference if we're newly entering the active list. */
1253 drm_gem_object_reference(&obj->base);
1257 /* Move from whatever list we were on to the tail of execution. */
1258 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1259 list_move_tail(&obj->ring_list, &ring->active_list);
1261 obj->last_read_seqno = seqno;
1263 if (obj->fenced_gpu_access) {
1264 obj->last_fenced_seqno = seqno;
1266 /* Bump MRU to take account of the delayed flush */
1267 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1268 struct drm_i915_fence_reg *reg;
1270 reg = &dev_priv->fence_regs[obj->fence_reg];
1271 list_move_tail(®->lru_list,
1272 &dev_priv->mm.fence_list);
1278 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1280 struct drm_device *dev = obj->base.dev;
1281 struct drm_i915_private *dev_priv = dev->dev_private;
1283 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1284 BUG_ON(!obj->active);
1286 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1288 list_del_init(&obj->ring_list);
1291 obj->last_read_seqno = 0;
1292 obj->last_write_seqno = 0;
1293 obj->base.write_domain = 0;
1295 obj->last_fenced_seqno = 0;
1296 obj->fenced_gpu_access = false;
1299 drm_gem_object_unreference(&obj->base);
1301 WARN_ON(i915_verify_lists(dev));
1305 i915_gem_handle_seqno_wrap(struct drm_device *dev)
1307 struct drm_i915_private *dev_priv = dev->dev_private;
1308 struct intel_ring_buffer *ring;
1311 /* The hardware uses various monotonic 32-bit counters, if we
1312 * detect that they will wraparound we need to idle the GPU
1313 * and reset those counters.
1316 for_each_ring(ring, dev_priv, i) {
1317 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1318 ret |= ring->sync_seqno[j] != 0;
1323 ret = i915_gpu_idle(dev);
1327 i915_gem_retire_requests(dev);
1328 for_each_ring(ring, dev_priv, i) {
1329 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1330 ring->sync_seqno[j] = 0;
1337 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1339 struct drm_i915_private *dev_priv = dev->dev_private;
1341 /* reserve 0 for non-seqno */
1342 if (dev_priv->next_seqno == 0) {
1343 int ret = i915_gem_handle_seqno_wrap(dev);
1347 dev_priv->next_seqno = 1;
1350 *seqno = dev_priv->next_seqno++;
1355 i915_add_request(struct intel_ring_buffer *ring,
1356 struct drm_file *file,
1359 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1360 struct drm_i915_gem_request *request;
1361 u32 request_ring_position;
1366 * Emit any outstanding flushes - execbuf can fail to emit the flush
1367 * after having emitted the batchbuffer command. Hence we need to fix
1368 * things up similar to emitting the lazy request. The difference here
1369 * is that the flush _must_ happen before the next request, no matter
1372 ret = intel_ring_flush_all_caches(ring);
1376 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK | M_ZERO);
1377 if (request == NULL)
1381 /* Record the position of the start of the request so that
1382 * should we detect the updated seqno part-way through the
1383 * GPU processing the request, we never over-estimate the
1384 * position of the head.
1386 request_ring_position = intel_ring_get_tail(ring);
1388 ret = ring->add_request(ring);
1390 kfree(request, M_DRM);
1394 request->seqno = intel_ring_get_seqno(ring);
1395 request->ring = ring;
1396 request->tail = request_ring_position;
1397 request->emitted_jiffies = jiffies;
1398 was_empty = list_empty(&ring->request_list);
1399 list_add_tail(&request->list, &ring->request_list);
1400 request->file_priv = NULL;
1403 struct drm_i915_file_private *file_priv = file->driver_priv;
1405 spin_lock(&file_priv->mm.lock);
1406 request->file_priv = file_priv;
1407 list_add_tail(&request->client_list,
1408 &file_priv->mm.request_list);
1409 spin_unlock(&file_priv->mm.lock);
1412 ring->outstanding_lazy_request = 0;
1414 if (!dev_priv->mm.suspended) {
1415 if (i915_enable_hangcheck) {
1416 mod_timer(&dev_priv->hangcheck_timer,
1417 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1420 queue_delayed_work(dev_priv->wq,
1421 &dev_priv->mm.retire_work,
1422 round_jiffies_up_relative(hz));
1423 intel_mark_busy(dev_priv->dev);
1428 *out_seqno = request->seqno;
1433 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1435 struct drm_i915_file_private *file_priv = request->file_priv;
1440 spin_lock(&file_priv->mm.lock);
1441 if (request->file_priv) {
1442 list_del(&request->client_list);
1443 request->file_priv = NULL;
1445 spin_unlock(&file_priv->mm.lock);
1448 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1449 struct intel_ring_buffer *ring)
1451 while (!list_empty(&ring->request_list)) {
1452 struct drm_i915_gem_request *request;
1454 request = list_first_entry(&ring->request_list,
1455 struct drm_i915_gem_request,
1458 list_del(&request->list);
1459 i915_gem_request_remove_from_client(request);
1460 drm_free(request, M_DRM);
1463 while (!list_empty(&ring->active_list)) {
1464 struct drm_i915_gem_object *obj;
1466 obj = list_first_entry(&ring->active_list,
1467 struct drm_i915_gem_object,
1470 i915_gem_object_move_to_inactive(obj);
1474 static void i915_gem_reset_fences(struct drm_device *dev)
1476 struct drm_i915_private *dev_priv = dev->dev_private;
1479 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1480 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1482 i915_gem_write_fence(dev, i, NULL);
1485 i915_gem_object_fence_lost(reg->obj);
1489 INIT_LIST_HEAD(®->lru_list);
1492 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1495 void i915_gem_reset(struct drm_device *dev)
1497 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_i915_gem_object *obj;
1499 struct intel_ring_buffer *ring;
1502 for_each_ring(ring, dev_priv, i)
1503 i915_gem_reset_ring_lists(dev_priv, ring);
1505 /* Move everything out of the GPU domains to ensure we do any
1506 * necessary invalidation upon reuse.
1508 list_for_each_entry(obj,
1509 &dev_priv->mm.inactive_list,
1512 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1515 /* The fence registers are invalidated so clear them out */
1516 i915_gem_reset_fences(dev);
1520 * This function clears the request list as sequence numbers are passed.
1523 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1527 if (list_empty(&ring->request_list))
1530 WARN_ON(i915_verify_lists(ring->dev));
1532 seqno = ring->get_seqno(ring, true);
1534 while (!list_empty(&ring->request_list)) {
1535 struct drm_i915_gem_request *request;
1537 request = list_first_entry(&ring->request_list,
1538 struct drm_i915_gem_request,
1541 if (!i915_seqno_passed(seqno, request->seqno))
1544 /* We know the GPU must have read the request to have
1545 * sent us the seqno + interrupt, so use the position
1546 * of tail of the request to update the last known position
1549 ring->last_retired_head = request->tail;
1551 list_del(&request->list);
1552 i915_gem_request_remove_from_client(request);
1553 kfree(request, M_DRM);
1556 /* Move any buffers on the active list that are no longer referenced
1557 * by the ringbuffer to the flushing/inactive lists as appropriate.
1559 while (!list_empty(&ring->active_list)) {
1560 struct drm_i915_gem_object *obj;
1562 obj = list_first_entry(&ring->active_list,
1563 struct drm_i915_gem_object,
1566 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1569 i915_gem_object_move_to_inactive(obj);
1572 if (unlikely(ring->trace_irq_seqno &&
1573 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1574 ring->irq_put(ring);
1575 ring->trace_irq_seqno = 0;
1581 i915_gem_retire_requests(struct drm_device *dev)
1583 drm_i915_private_t *dev_priv = dev->dev_private;
1584 struct intel_ring_buffer *ring;
1587 for_each_ring(ring, dev_priv, i)
1588 i915_gem_retire_requests_ring(ring);
1592 i915_gem_retire_work_handler(struct work_struct *work)
1594 drm_i915_private_t *dev_priv;
1595 struct drm_device *dev;
1596 struct intel_ring_buffer *ring;
1600 dev_priv = container_of(work, drm_i915_private_t,
1601 mm.retire_work.work);
1602 dev = dev_priv->dev;
1604 /* Come back later if the device is busy... */
1605 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
1606 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1607 round_jiffies_up_relative(hz));
1611 i915_gem_retire_requests(dev);
1613 /* Send a periodic flush down the ring so we don't hold onto GEM
1614 * objects indefinitely.
1617 for_each_ring(ring, dev_priv, i) {
1618 if (ring->gpu_caches_dirty)
1619 i915_add_request(ring, NULL, NULL);
1621 idle &= list_empty(&ring->request_list);
1624 if (!dev_priv->mm.suspended && !idle)
1625 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1626 round_jiffies_up_relative(hz));
1628 intel_mark_idle(dev);
1633 * Ensures that an object will eventually get non-busy by flushing any required
1634 * write domains, emitting any outstanding lazy request and retiring and
1635 * completed requests.
1638 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1643 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1647 i915_gem_retire_requests_ring(obj->ring);
1654 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1655 * @DRM_IOCTL_ARGS: standard ioctl arguments
1657 * Returns 0 if successful, else an error is returned with the remaining time in
1658 * the timeout parameter.
1659 * -ETIME: object is still busy after timeout
1660 * -ERESTARTSYS: signal interrupted the wait
1661 * -ENONENT: object doesn't exist
1662 * Also possible, but rare:
1663 * -EAGAIN: GPU wedged
1665 * -ENODEV: Internal IRQ fail
1666 * -E?: The add request failed
1668 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1669 * non-zero timeout parameter the wait ioctl will wait for the given number of
1670 * nanoseconds on an object becoming unbusy. Since the wait itself does so
1671 * without holding struct_mutex the object may become re-busied before this
1672 * function completes. A similar but shorter * race condition exists in the busy
1676 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1678 struct drm_i915_gem_wait *args = data;
1679 struct drm_i915_gem_object *obj;
1680 struct intel_ring_buffer *ring = NULL;
1681 struct timespec timeout_stack, *timeout = NULL;
1685 if (args->timeout_ns >= 0) {
1686 timeout_stack = ns_to_timespec(args->timeout_ns);
1687 timeout = &timeout_stack;
1690 ret = i915_mutex_lock_interruptible(dev);
1694 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1695 if (&obj->base == NULL) {
1700 /* Need to make sure the object gets inactive eventually. */
1701 ret = i915_gem_object_flush_active(obj);
1706 seqno = obj->last_read_seqno;
1713 /* Do this after OLR check to make sure we make forward progress polling
1714 * on this IOCTL with a 0 timeout (like busy ioctl)
1716 if (!args->timeout_ns) {
1721 drm_gem_object_unreference(&obj->base);
1724 ret = __wait_seqno(ring, seqno, true, timeout);
1726 WARN_ON(!timespec_valid(timeout));
1727 args->timeout_ns = timespec_to_ns(timeout);
1732 drm_gem_object_unreference(&obj->base);
1738 * i915_gem_object_sync - sync an object to a ring.
1740 * @obj: object which may be in use on another ring.
1741 * @to: ring we wish to use the object on. May be NULL.
1743 * This code is meant to abstract object synchronization with the GPU.
1744 * Calling with NULL implies synchronizing the object with the CPU
1745 * rather than a particular GPU ring.
1747 * Returns 0 if successful, else propagates up the lower layer error.
1750 i915_gem_object_sync(struct drm_i915_gem_object *obj,
1751 struct intel_ring_buffer *to)
1753 struct intel_ring_buffer *from = obj->ring;
1757 if (from == NULL || to == from)
1760 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1761 return i915_gem_object_wait_rendering(obj, false);
1763 idx = intel_ring_sync_index(from, to);
1765 seqno = obj->last_read_seqno;
1766 if (seqno <= from->sync_seqno[idx])
1769 ret = i915_gem_check_olr(obj->ring, seqno);
1773 ret = to->sync_to(to, from, seqno);
1775 /* We use last_read_seqno because sync_to()
1776 * might have just caused seqno wrap under
1779 from->sync_seqno[idx] = obj->last_read_seqno;
1784 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1786 u32 old_write_domain, old_read_domains;
1788 /* Act a barrier for all accesses through the GTT */
1791 /* Force a pagefault for domain tracking on next user access */
1792 i915_gem_release_mmap(obj);
1794 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1797 old_read_domains = obj->base.read_domains;
1798 old_write_domain = obj->base.write_domain;
1800 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1801 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1806 * Unbinds an object from the GTT aperture.
1809 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
1811 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1814 if (obj->gtt_space == NULL)
1820 ret = i915_gem_object_finish_gpu(obj);
1823 /* Continue on if we fail due to EIO, the GPU is hung so we
1824 * should be safe and we need to cleanup or else we might
1825 * cause memory corruption through use-after-free.
1828 i915_gem_object_finish_gtt(obj);
1830 /* Move the object to the CPU domain to ensure that
1831 * any possible CPU writes while it's not in the GTT
1832 * are flushed when we go to remap it.
1835 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1836 if (ret == -ERESTART || ret == -EINTR)
1839 /* In the event of a disaster, abandon all caches and
1840 * hope for the best.
1842 i915_gem_clflush_object(obj);
1843 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1846 /* release the fence reg _after_ flushing */
1847 ret = i915_gem_object_put_fence(obj);
1851 if (obj->has_global_gtt_mapping)
1852 i915_gem_gtt_unbind_object(obj);
1853 if (obj->has_aliasing_ppgtt_mapping) {
1854 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1855 obj->has_aliasing_ppgtt_mapping = 0;
1857 i915_gem_gtt_finish_object(obj);
1859 i915_gem_object_put_pages_gtt(obj);
1861 list_del_init(&obj->gtt_list);
1862 list_del_init(&obj->mm_list);
1863 /* Avoid an unnecessary call to unbind on rebind. */
1864 obj->map_and_fenceable = true;
1866 drm_mm_put_block(obj->gtt_space);
1867 obj->gtt_space = NULL;
1868 obj->gtt_offset = 0;
1870 if (i915_gem_object_is_purgeable(obj))
1871 i915_gem_object_truncate(obj);
1876 int i915_gpu_idle(struct drm_device *dev)
1878 drm_i915_private_t *dev_priv = dev->dev_private;
1879 struct intel_ring_buffer *ring;
1882 /* Flush everything onto the inactive list. */
1883 for_each_ring(ring, dev_priv, i) {
1884 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
1888 ret = intel_ring_idle(ring);
1896 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
1897 struct drm_i915_gem_object *obj)
1899 drm_i915_private_t *dev_priv = dev->dev_private;
1903 u32 size = obj->gtt_space->size;
1905 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1907 val |= obj->gtt_offset & 0xfffff000;
1908 val |= (uint64_t)((obj->stride / 128) - 1) <<
1909 SANDYBRIDGE_FENCE_PITCH_SHIFT;
1911 if (obj->tiling_mode == I915_TILING_Y)
1912 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1913 val |= I965_FENCE_REG_VALID;
1917 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
1918 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
1921 static void i965_write_fence_reg(struct drm_device *dev, int reg,
1922 struct drm_i915_gem_object *obj)
1924 drm_i915_private_t *dev_priv = dev->dev_private;
1928 u32 size = obj->gtt_space->size;
1930 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1932 val |= obj->gtt_offset & 0xfffff000;
1933 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1934 if (obj->tiling_mode == I915_TILING_Y)
1935 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1936 val |= I965_FENCE_REG_VALID;
1940 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
1941 POSTING_READ(FENCE_REG_965_0 + reg * 8);
1944 static void i915_write_fence_reg(struct drm_device *dev, int reg,
1945 struct drm_i915_gem_object *obj)
1947 drm_i915_private_t *dev_priv = dev->dev_private;
1951 u32 size = obj->gtt_space->size;
1955 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
1956 (size & -size) != size ||
1957 (obj->gtt_offset & (size - 1)),
1958 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
1959 obj->gtt_offset, obj->map_and_fenceable, size);
1961 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1966 /* Note: pitch better be a power of two tile widths */
1967 pitch_val = obj->stride / tile_width;
1968 pitch_val = ffs(pitch_val) - 1;
1970 val = obj->gtt_offset;
1971 if (obj->tiling_mode == I915_TILING_Y)
1972 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1973 val |= I915_FENCE_SIZE_BITS(size);
1974 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1975 val |= I830_FENCE_REG_VALID;
1980 reg = FENCE_REG_830_0 + reg * 4;
1982 reg = FENCE_REG_945_8 + (reg - 8) * 4;
1984 I915_WRITE(reg, val);
1988 static void i830_write_fence_reg(struct drm_device *dev, int reg,
1989 struct drm_i915_gem_object *obj)
1991 drm_i915_private_t *dev_priv = dev->dev_private;
1995 u32 size = obj->gtt_space->size;
1998 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
1999 (size & -size) != size ||
2000 (obj->gtt_offset & (size - 1)),
2001 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2002 obj->gtt_offset, size);
2004 pitch_val = obj->stride / 128;
2005 pitch_val = ffs(pitch_val) - 1;
2007 val = obj->gtt_offset;
2008 if (obj->tiling_mode == I915_TILING_Y)
2009 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2010 val |= I830_FENCE_SIZE_BITS(size);
2011 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2012 val |= I830_FENCE_REG_VALID;
2016 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2017 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2020 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2021 struct drm_i915_gem_object *obj)
2023 switch (INTEL_INFO(dev)->gen) {
2025 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2027 case 4: i965_write_fence_reg(dev, reg, obj); break;
2028 case 3: i915_write_fence_reg(dev, reg, obj); break;
2029 case 2: i830_write_fence_reg(dev, reg, obj); break;
2034 static inline int fence_number(struct drm_i915_private *dev_priv,
2035 struct drm_i915_fence_reg *fence)
2037 return fence - dev_priv->fence_regs;
2040 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2041 struct drm_i915_fence_reg *fence,
2044 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2045 int reg = fence_number(dev_priv, fence);
2047 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2050 obj->fence_reg = reg;
2052 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2054 obj->fence_reg = I915_FENCE_REG_NONE;
2056 list_del_init(&fence->lru_list);
2061 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2063 if (obj->last_fenced_seqno) {
2064 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2068 obj->last_fenced_seqno = 0;
2071 /* Ensure that all CPU reads are completed before installing a fence
2072 * and all writes before removing the fence.
2074 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2077 obj->fenced_gpu_access = false;
2082 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2084 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2087 ret = i915_gem_object_flush_fence(obj);
2091 if (obj->fence_reg == I915_FENCE_REG_NONE)
2094 i915_gem_object_update_fence(obj,
2095 &dev_priv->fence_regs[obj->fence_reg],
2097 i915_gem_object_fence_lost(obj);
2102 static struct drm_i915_fence_reg *
2103 i915_find_fence_reg(struct drm_device *dev)
2105 struct drm_i915_private *dev_priv = dev->dev_private;
2106 struct drm_i915_fence_reg *reg, *avail;
2109 /* First try to find a free reg */
2111 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2112 reg = &dev_priv->fence_regs[i];
2116 if (!reg->pin_count)
2123 /* None available, try to steal one or wait for a user to finish */
2124 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2135 * i915_gem_object_get_fence - set up fencing for an object
2136 * @obj: object to map through a fence reg
2138 * When mapping objects through the GTT, userspace wants to be able to write
2139 * to them without having to worry about swizzling if the object is tiled.
2140 * This function walks the fence regs looking for a free one for @obj,
2141 * stealing one if it can't find any.
2143 * It then sets up the reg based on the object's properties: address, pitch
2144 * and tiling format.
2146 * For an untiled surface, this removes any existing fence.
2149 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2151 struct drm_device *dev = obj->base.dev;
2152 struct drm_i915_private *dev_priv = dev->dev_private;
2153 bool enable = obj->tiling_mode != I915_TILING_NONE;
2154 struct drm_i915_fence_reg *reg;
2157 /* Have we updated the tiling parameters upon the object and so
2158 * will need to serialise the write to the associated fence register?
2160 if (obj->fence_dirty) {
2161 ret = i915_gem_object_flush_fence(obj);
2166 /* Just update our place in the LRU if our fence is getting reused. */
2167 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2168 reg = &dev_priv->fence_regs[obj->fence_reg];
2169 if (!obj->fence_dirty) {
2170 list_move_tail(®->lru_list,
2171 &dev_priv->mm.fence_list);
2174 } else if (enable) {
2175 reg = i915_find_fence_reg(dev);
2180 struct drm_i915_gem_object *old = reg->obj;
2182 ret = i915_gem_object_flush_fence(old);
2186 i915_gem_object_fence_lost(old);
2191 i915_gem_object_update_fence(obj, reg, enable);
2192 obj->fence_dirty = false;
2197 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2198 struct drm_mm_node *gtt_space,
2199 unsigned long cache_level)
2201 struct drm_mm_node *other;
2203 /* On non-LLC machines we have to be careful when putting differing
2204 * types of snoopable memory together to avoid the prefetcher
2205 * crossing memory domains and dieing.
2210 if (gtt_space == NULL)
2213 if (list_empty(>t_space->node_list))
2216 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2217 if (other->allocated && !other->hole_follows && other->color != cache_level)
2220 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2221 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2227 static void i915_gem_verify_gtt(struct drm_device *dev)
2230 struct drm_i915_private *dev_priv = dev->dev_private;
2231 struct drm_i915_gem_object *obj;
2234 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2235 if (obj->gtt_space == NULL) {
2236 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2241 if (obj->cache_level != obj->gtt_space->color) {
2242 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2243 obj->gtt_space->start,
2244 obj->gtt_space->start + obj->gtt_space->size,
2246 obj->gtt_space->color);
2251 if (!i915_gem_valid_gtt_space(dev,
2253 obj->cache_level)) {
2254 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2255 obj->gtt_space->start,
2256 obj->gtt_space->start + obj->gtt_space->size,
2268 * Finds free space in the GTT aperture and binds the object there.
2271 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2273 bool map_and_fenceable,
2276 struct drm_device *dev = obj->base.dev;
2277 drm_i915_private_t *dev_priv = dev->dev_private;
2278 struct drm_mm_node *free_space;
2279 uint32_t size, fence_size, fence_alignment, unfenced_alignment;
2280 bool mappable, fenceable;
2283 if (obj->madv != I915_MADV_WILLNEED) {
2284 DRM_ERROR("Attempting to bind a purgeable object\n");
2288 fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
2290 fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
2292 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
2293 obj->base.size, obj->tiling_mode);
2295 alignment = map_and_fenceable ? fence_alignment :
2297 if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
2298 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2302 size = map_and_fenceable ? fence_size : obj->base.size;
2304 /* If the object is bigger than the entire aperture, reject it early
2305 * before evicting everything in a vain attempt to find space.
2307 if (obj->base.size > (map_and_fenceable ?
2308 dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2310 "Attempting to bind an object larger than the aperture\n");
2315 if (map_and_fenceable)
2317 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2318 size, alignment, obj->cache_level,
2319 0, dev_priv->mm.gtt_mappable_end,
2322 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2323 size, alignment, obj->cache_level,
2326 if (free_space != NULL) {
2327 if (map_and_fenceable)
2329 drm_mm_get_block_range_generic(free_space,
2330 size, alignment, obj->cache_level,
2331 0, dev_priv->mm.gtt_mappable_end,
2335 drm_mm_get_block_generic(free_space,
2336 size, alignment, obj->cache_level,
2339 if (obj->gtt_space == NULL) {
2340 ret = i915_gem_evict_something(dev, size, alignment,
2351 * NOTE: i915_gem_object_get_pages_gtt() cannot
2352 * return ENOMEM, since we used VM_ALLOC_RETRY.
2354 ret = i915_gem_object_get_pages_gtt(obj, 0);
2356 drm_mm_put_block(obj->gtt_space);
2357 obj->gtt_space = NULL;
2361 i915_gem_gtt_bind_object(obj, obj->cache_level);
2363 i915_gem_object_put_pages_gtt(obj);
2364 drm_mm_put_block(obj->gtt_space);
2365 obj->gtt_space = NULL;
2366 if (i915_gem_evict_everything(dev))
2371 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2372 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2374 obj->gtt_offset = obj->gtt_space->start;
2377 obj->gtt_space->size == fence_size &&
2378 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2381 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2382 obj->map_and_fenceable = mappable && fenceable;
2384 i915_gem_verify_gtt(dev);
2389 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2392 /* If we don't have a page list set up, then we're not pinned
2393 * to GPU, and we can ignore the cache flush because it'll happen
2394 * again at bind time.
2396 if (obj->pages == NULL)
2399 /* If the GPU is snooping the contents of the CPU cache,
2400 * we do not need to manually clear the CPU cache lines. However,
2401 * the caches are only snooped when the render cache is
2402 * flushed/invalidated. As we always have to emit invalidations
2403 * and flushes when moving into and out of the RENDER domain, correct
2404 * snooping behaviour occurs naturally as the result of our domain
2407 if (obj->cache_level != I915_CACHE_NONE)
2410 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2413 /** Flushes the GTT write domain for the object if it's dirty. */
2415 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2417 uint32_t old_write_domain;
2419 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2422 /* No actual flushing is required for the GTT write domain. Writes
2423 * to it immediately go to main memory as far as we know, so there's
2424 * no chipset flush. It also doesn't land in render cache.
2426 * However, we do have to enforce the order so that all writes through
2427 * the GTT land before any writes to the device, such as updates to
2432 old_write_domain = obj->base.write_domain;
2433 obj->base.write_domain = 0;
2436 /** Flushes the CPU write domain for the object if it's dirty. */
2438 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2440 uint32_t old_write_domain;
2442 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2445 i915_gem_clflush_object(obj);
2446 intel_gtt_chipset_flush();
2447 old_write_domain = obj->base.write_domain;
2448 obj->base.write_domain = 0;
2452 * Moves a single object to the GTT read, and possibly write domain.
2454 * This function returns when the move is complete, including waiting on
2458 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2460 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2461 uint32_t old_write_domain, old_read_domains;
2464 /* Not valid to be called on unbound objects. */
2465 if (obj->gtt_space == NULL)
2468 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2471 ret = i915_gem_object_wait_rendering(obj, !write);
2475 i915_gem_object_flush_cpu_write_domain(obj);
2477 old_write_domain = obj->base.write_domain;
2478 old_read_domains = obj->base.read_domains;
2480 /* It should now be out of any other write domains, and we can update
2481 * the domain values for our changes.
2483 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2484 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2486 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2487 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2491 /* And bump the LRU for this access */
2492 if (i915_gem_object_is_inactive(obj))
2493 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2498 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2499 enum i915_cache_level cache_level)
2501 struct drm_device *dev = obj->base.dev;
2502 drm_i915_private_t *dev_priv = dev->dev_private;
2505 if (obj->cache_level == cache_level)
2508 if (obj->pin_count) {
2509 DRM_DEBUG("can not change the cache level of pinned objects\n");
2513 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
2514 ret = i915_gem_object_unbind(obj);
2519 if (obj->gtt_space) {
2520 ret = i915_gem_object_finish_gpu(obj);
2524 i915_gem_object_finish_gtt(obj);
2526 /* Before SandyBridge, you could not use tiling or fence
2527 * registers with snooped memory, so relinquish any fences
2528 * currently pointing to our region in the aperture.
2530 if (INTEL_INFO(dev)->gen < 6) {
2531 ret = i915_gem_object_put_fence(obj);
2536 if (obj->has_global_gtt_mapping)
2537 i915_gem_gtt_bind_object(obj, cache_level);
2538 if (obj->has_aliasing_ppgtt_mapping)
2539 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2542 obj->gtt_space->color = cache_level;
2545 if (cache_level == I915_CACHE_NONE) {
2546 u32 old_read_domains, old_write_domain;
2548 /* If we're coming from LLC cached, then we haven't
2549 * actually been tracking whether the data is in the
2550 * CPU cache or not, since we only allow one bit set
2551 * in obj->write_domain and have been skipping the clflushes.
2552 * Just set it to the CPU cache for now.
2554 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
2555 ("obj %p in CPU write domain", obj));
2556 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
2557 ("obj %p in CPU read domain", obj));
2559 old_read_domains = obj->base.read_domains;
2560 old_write_domain = obj->base.write_domain;
2562 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2563 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2567 obj->cache_level = cache_level;
2568 i915_gem_verify_gtt(dev);
2572 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2573 struct drm_file *file)
2575 struct drm_i915_gem_caching *args = data;
2576 struct drm_i915_gem_object *obj;
2579 ret = i915_mutex_lock_interruptible(dev);
2583 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2584 if (&obj->base == NULL) {
2589 args->caching = obj->cache_level != I915_CACHE_NONE;
2591 drm_gem_object_unreference(&obj->base);
2597 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2598 struct drm_file *file)
2600 struct drm_i915_gem_caching *args = data;
2601 struct drm_i915_gem_object *obj;
2602 enum i915_cache_level level;
2605 switch (args->caching) {
2606 case I915_CACHING_NONE:
2607 level = I915_CACHE_NONE;
2609 case I915_CACHING_CACHED:
2610 level = I915_CACHE_LLC;
2616 ret = i915_mutex_lock_interruptible(dev);
2620 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2621 if (&obj->base == NULL) {
2626 ret = i915_gem_object_set_cache_level(obj, level);
2628 drm_gem_object_unreference(&obj->base);
2635 * Prepare buffer for display plane (scanout, cursors, etc).
2636 * Can be called from an uninterruptible phase (modesetting) and allows
2637 * any flushes to be pipelined (for pageflips).
2640 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2642 struct intel_ring_buffer *pipelined)
2644 u32 old_read_domains, old_write_domain;
2647 if (pipelined != obj->ring) {
2648 ret = i915_gem_object_sync(obj, pipelined);
2653 /* The display engine is not coherent with the LLC cache on gen6. As
2654 * a result, we make sure that the pinning that is about to occur is
2655 * done with uncached PTEs. This is lowest common denominator for all
2658 * However for gen6+, we could do better by using the GFDT bit instead
2659 * of uncaching, which would allow us to flush all the LLC-cached data
2660 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2662 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2666 /* As the user may map the buffer once pinned in the display plane
2667 * (e.g. libkms for the bootup splash), we have to ensure that we
2668 * always use map_and_fenceable for all scanout buffers.
2670 ret = i915_gem_object_pin(obj, alignment, true, false);
2674 i915_gem_object_flush_cpu_write_domain(obj);
2676 old_write_domain = obj->base.write_domain;
2677 old_read_domains = obj->base.read_domains;
2679 /* It should now be out of any other write domains, and we can update
2680 * the domain values for our changes.
2682 obj->base.write_domain = 0;
2683 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2689 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2693 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2696 ret = i915_gem_object_wait_rendering(obj, false);
2700 /* Ensure that we invalidate the GPU's caches and TLBs. */
2701 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2706 * Moves a single object to the CPU read, and possibly write domain.
2708 * This function returns when the move is complete, including waiting on
2712 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2714 uint32_t old_write_domain, old_read_domains;
2717 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2720 ret = i915_gem_object_wait_rendering(obj, !write);
2724 i915_gem_object_flush_gtt_write_domain(obj);
2726 old_write_domain = obj->base.write_domain;
2727 old_read_domains = obj->base.read_domains;
2729 /* Flush the CPU cache if it's still invalid. */
2730 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2731 i915_gem_clflush_object(obj);
2733 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2736 /* It should now be out of any other write domains, and we can update
2737 * the domain values for our changes.
2739 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2741 /* If we're writing through the CPU, then the GPU read domains will
2742 * need to be invalidated at next use.
2745 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2746 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2752 /* Throttle our rendering by waiting until the ring has completed our requests
2753 * emitted over 20 msec ago.
2755 * Note that if we were to use the current jiffies each time around the loop,
2756 * we wouldn't escape the function with any frames outstanding if the time to
2757 * render a frame was over 20ms.
2759 * This should get us reasonable parallelism between CPU and GPU but also
2760 * relatively low latency when blocking on a particular request to finish.
2763 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2765 struct drm_i915_private *dev_priv = dev->dev_private;
2766 struct drm_i915_file_private *file_priv = file->driver_priv;
2767 unsigned long recent_enough = ticks - (20 * hz / 1000);
2768 struct drm_i915_gem_request *request;
2769 struct intel_ring_buffer *ring = NULL;
2773 if (atomic_read(&dev_priv->mm.wedged))
2776 spin_lock(&file_priv->mm.lock);
2777 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
2778 if (time_after_eq(request->emitted_jiffies, recent_enough))
2781 ring = request->ring;
2782 seqno = request->seqno;
2784 spin_unlock(&file_priv->mm.lock);
2789 ret = __wait_seqno(ring, seqno, true, NULL);
2792 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2798 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2800 bool map_and_fenceable,
2805 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
2808 if (obj->gtt_space != NULL) {
2809 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2810 (map_and_fenceable && !obj->map_and_fenceable)) {
2811 WARN(obj->pin_count,
2812 "bo is already pinned with incorrect alignment:"
2813 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2814 " obj->map_and_fenceable=%d\n",
2815 obj->gtt_offset, alignment,
2817 obj->map_and_fenceable);
2818 ret = i915_gem_object_unbind(obj);
2824 if (obj->gtt_space == NULL) {
2825 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2827 ret = i915_gem_object_bind_to_gtt(obj, alignment,
2833 if (!dev_priv->mm.aliasing_ppgtt)
2834 i915_gem_gtt_bind_object(obj, obj->cache_level);
2837 if (!obj->has_global_gtt_mapping && map_and_fenceable)
2838 i915_gem_gtt_bind_object(obj, obj->cache_level);
2841 obj->pin_mappable |= map_and_fenceable;
2847 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
2849 BUG_ON(obj->pin_count == 0);
2850 BUG_ON(obj->gtt_space == NULL);
2852 if (--obj->pin_count == 0)
2853 obj->pin_mappable = false;
2857 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2858 struct drm_file *file)
2860 struct drm_i915_gem_pin *args = data;
2861 struct drm_i915_gem_object *obj;
2864 ret = i915_mutex_lock_interruptible(dev);
2868 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2869 if (&obj->base == NULL) {
2874 if (obj->madv != I915_MADV_WILLNEED) {
2875 DRM_ERROR("Attempting to pin a purgeable buffer\n");
2880 if (obj->pin_filp != NULL && obj->pin_filp != file) {
2881 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2887 if (obj->user_pin_count == 0) {
2888 ret = i915_gem_object_pin(obj, args->alignment, true, false);
2893 obj->user_pin_count++;
2894 obj->pin_filp = file;
2896 /* XXX - flush the CPU caches for pinned objects
2897 * as the X server doesn't manage domains yet
2899 i915_gem_object_flush_cpu_write_domain(obj);
2900 args->offset = obj->gtt_offset;
2902 drm_gem_object_unreference(&obj->base);
2909 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2910 struct drm_file *file)
2912 struct drm_i915_gem_pin *args = data;
2913 struct drm_i915_gem_object *obj;
2916 ret = i915_mutex_lock_interruptible(dev);
2920 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2921 if (&obj->base == NULL) {
2926 if (obj->pin_filp != file) {
2927 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2932 obj->user_pin_count--;
2933 if (obj->user_pin_count == 0) {
2934 obj->pin_filp = NULL;
2935 i915_gem_object_unpin(obj);
2939 drm_gem_object_unreference(&obj->base);
2946 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2947 struct drm_file *file)
2949 struct drm_i915_gem_busy *args = data;
2950 struct drm_i915_gem_object *obj;
2953 ret = i915_mutex_lock_interruptible(dev);
2957 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2958 if (&obj->base == NULL) {
2963 /* Count all active objects as busy, even if they are currently not used
2964 * by the gpu. Users of this interface expect objects to eventually
2965 * become non-busy without any further actions, therefore emit any
2966 * necessary flushes here.
2968 ret = i915_gem_object_flush_active(obj);
2970 args->busy = obj->active;
2972 args->busy |= intel_ring_flag(obj->ring) << 17;
2975 drm_gem_object_unreference(&obj->base);
2982 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2983 struct drm_file *file_priv)
2985 return i915_gem_ring_throttle(dev, file_priv);
2989 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2990 struct drm_file *file_priv)
2992 struct drm_i915_gem_madvise *args = data;
2993 struct drm_i915_gem_object *obj;
2996 switch (args->madv) {
2997 case I915_MADV_DONTNEED:
2998 case I915_MADV_WILLNEED:
3004 ret = i915_mutex_lock_interruptible(dev);
3008 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3009 if (&obj->base == NULL) {
3014 if (obj->pin_count) {
3019 if (obj->madv != __I915_MADV_PURGED)
3020 obj->madv = args->madv;
3022 /* if the object is no longer attached, discard its backing storage */
3023 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3024 i915_gem_object_truncate(obj);
3026 args->retained = obj->madv != __I915_MADV_PURGED;
3029 drm_gem_object_unreference(&obj->base);
3035 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3038 struct drm_i915_private *dev_priv;
3039 struct drm_i915_gem_object *obj;
3041 dev_priv = dev->dev_private;
3043 obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
3045 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3046 drm_free(obj, M_DRM);
3050 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3051 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3054 /* On some devices, we can have the GPU use the LLC (the CPU
3055 * cache) for about a 10% performance improvement
3056 * compared to uncached. Graphics requests other than
3057 * display scanout are coherent with the CPU in
3058 * accessing this cache. This means in this mode we
3059 * don't need to clflush on the CPU side, and on the
3060 * GPU side we only need to flush internal caches to
3061 * get data visible to the CPU.
3063 * However, we maintain the display planes as UC, and so
3064 * need to rebind when first used as such.
3066 obj->cache_level = I915_CACHE_LLC;
3068 obj->cache_level = I915_CACHE_NONE;
3069 obj->base.driver_private = NULL;
3070 obj->fence_reg = I915_FENCE_REG_NONE;
3071 INIT_LIST_HEAD(&obj->mm_list);
3072 INIT_LIST_HEAD(&obj->gtt_list);
3073 INIT_LIST_HEAD(&obj->ring_list);
3074 INIT_LIST_HEAD(&obj->exec_list);
3075 obj->madv = I915_MADV_WILLNEED;
3076 /* Avoid an unnecessary call to unbind on the first bind. */
3077 obj->map_and_fenceable = true;
3079 i915_gem_info_add_obj(dev_priv, size);
3084 int i915_gem_init_object(struct drm_gem_object *obj)
3091 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3093 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3094 struct drm_device *dev = obj->base.dev;
3095 drm_i915_private_t *dev_priv = dev->dev_private;
3098 i915_gem_detach_phys_object(dev, obj);
3101 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3102 bool was_interruptible;
3104 was_interruptible = dev_priv->mm.interruptible;
3105 dev_priv->mm.interruptible = false;
3107 WARN_ON(i915_gem_object_unbind(obj));
3109 dev_priv->mm.interruptible = was_interruptible;
3112 drm_gem_free_mmap_offset(&obj->base);
3114 drm_gem_object_release(&obj->base);
3115 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3117 drm_free(obj->bit_17, M_DRM);
3118 drm_free(obj, M_DRM);
3122 i915_gem_do_init(struct drm_device *dev, unsigned long start,
3123 unsigned long mappable_end, unsigned long end)
3125 drm_i915_private_t *dev_priv;
3126 unsigned long mappable;
3129 dev_priv = dev->dev_private;
3130 mappable = min(end, mappable_end) - start;
3132 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
3134 dev_priv->mm.gtt_start = start;
3135 dev_priv->mm.gtt_mappable_end = mappable_end;
3136 dev_priv->mm.gtt_end = end;
3137 dev_priv->mm.gtt_total = end - start;
3138 dev_priv->mm.mappable_gtt_total = mappable;
3140 /* Take over this portion of the GTT */
3141 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
3142 device_printf(dev->dev,
3143 "taking over the fictitious range 0x%lx-0x%lx\n",
3144 dev->agp->base + start, dev->agp->base + start + mappable);
3145 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
3146 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
3151 i915_gem_idle(struct drm_device *dev)
3153 drm_i915_private_t *dev_priv = dev->dev_private;
3158 if (dev_priv->mm.suspended) {
3163 ret = i915_gpu_idle(dev);
3168 i915_gem_retire_requests(dev);
3170 /* Under UMS, be paranoid and evict. */
3171 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3172 i915_gem_evict_everything(dev);
3174 i915_gem_reset_fences(dev);
3176 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3177 * We need to replace this with a semaphore, or something.
3178 * And not confound mm.suspended!
3180 dev_priv->mm.suspended = 1;
3181 del_timer_sync(&dev_priv->hangcheck_timer);
3183 i915_kernel_lost_context(dev);
3184 i915_gem_cleanup_ringbuffer(dev);
3188 /* Cancel the retire work handler, which should be idle now. */
3189 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3194 void i915_gem_l3_remap(struct drm_device *dev)
3196 drm_i915_private_t *dev_priv = dev->dev_private;
3200 if (!HAS_L3_GPU_CACHE(dev))
3203 if (!dev_priv->l3_parity.remap_info)
3206 misccpctl = I915_READ(GEN7_MISCCPCTL);
3207 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3208 POSTING_READ(GEN7_MISCCPCTL);
3210 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3211 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3212 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3213 DRM_DEBUG("0x%x was already programmed to %x\n",
3214 GEN7_L3LOG_BASE + i, remap);
3215 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3216 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3217 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3220 /* Make sure all the writes land before disabling dop clock gating */
3221 POSTING_READ(GEN7_L3LOG_BASE);
3223 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3226 void i915_gem_init_swizzling(struct drm_device *dev)
3228 drm_i915_private_t *dev_priv = dev->dev_private;
3230 if (INTEL_INFO(dev)->gen < 5 ||
3231 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3234 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3235 DISP_TILE_SURFACE_SWIZZLING);
3240 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3242 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3244 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3248 intel_enable_blt(struct drm_device *dev)
3255 /* The blitter was dysfunctional on early prototypes */
3256 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
3257 if (IS_GEN6(dev) && revision < 8) {
3258 DRM_INFO("BLT not supported on this pre-production hardware;"
3259 " graphics performance will be degraded.\n");
3267 i915_gem_init_hw(struct drm_device *dev)
3269 drm_i915_private_t *dev_priv = dev->dev_private;
3272 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3273 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3275 i915_gem_l3_remap(dev);
3277 i915_gem_init_swizzling(dev);
3279 ret = intel_init_render_ring_buffer(dev);
3284 ret = intel_init_bsd_ring_buffer(dev);
3286 goto cleanup_render_ring;
3289 if (intel_enable_blt(dev)) {
3290 ret = intel_init_blt_ring_buffer(dev);
3292 goto cleanup_bsd_ring;
3295 dev_priv->next_seqno = 1;
3298 * XXX: There was some w/a described somewhere suggesting loading
3299 * contexts before PPGTT.
3301 i915_gem_context_init(dev);
3302 i915_gem_init_ppgtt(dev);
3307 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3308 cleanup_render_ring:
3309 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3314 intel_enable_ppgtt(struct drm_device *dev)
3316 if (i915_enable_ppgtt >= 0)
3317 return i915_enable_ppgtt;
3319 /* Disable ppgtt on SNB if VT-d is on. */
3320 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
3326 int i915_gem_init(struct drm_device *dev)
3328 struct drm_i915_private *dev_priv = dev->dev_private;
3329 unsigned long prealloc_size, gtt_size, mappable_size;
3332 prealloc_size = dev_priv->mm.gtt->stolen_size;
3333 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3334 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3336 /* Basic memrange allocator for stolen space */
3337 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
3340 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3341 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3342 * aperture accordingly when using aliasing ppgtt. */
3343 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3344 /* For paranoia keep the guard page in between. */
3345 gtt_size -= PAGE_SIZE;
3347 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
3349 ret = i915_gem_init_aliasing_ppgtt(dev);
3355 /* Let GEM Manage all of the aperture.
3357 * However, leave one page at the end still bound to the scratch
3358 * page. There are a number of places where the hardware
3359 * apparently prefetches past the end of the object, and we've
3360 * seen multiple hangs with the GPU head pointer stuck in a
3361 * batchbuffer bound at the last page of the aperture. One page
3362 * should be enough to keep any prefetching inside of the
3365 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
3368 ret = i915_gem_init_hw(dev);
3371 i915_gem_cleanup_aliasing_ppgtt(dev);
3376 /* Try to set up FBC with a reasonable compressed buffer size */
3377 if (I915_HAS_FBC(dev) && i915_powersave) {
3380 /* Leave 1M for line length buffer & misc. */
3382 /* Try to get a 32M buffer... */
3383 if (prealloc_size > (36*1024*1024))
3384 cfb_size = 32*1024*1024;
3385 else /* fall back to 7/8 of the stolen space */
3386 cfb_size = prealloc_size * 7 / 8;
3387 i915_setup_compression(dev, cfb_size);
3391 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3392 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3393 dev_priv->dri1.allow_batchbuffer = 1;
3398 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3400 drm_i915_private_t *dev_priv = dev->dev_private;
3401 struct intel_ring_buffer *ring;
3404 for_each_ring(ring, dev_priv, i)
3405 intel_cleanup_ring_buffer(ring);
3409 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3410 struct drm_file *file_priv)
3412 drm_i915_private_t *dev_priv = dev->dev_private;
3415 if (drm_core_check_feature(dev, DRIVER_MODESET))
3418 if (atomic_read(&dev_priv->mm.wedged)) {
3419 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3420 atomic_set(&dev_priv->mm.wedged, 0);
3424 dev_priv->mm.suspended = 0;
3426 ret = i915_gem_init_hw(dev);
3432 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
3435 ret = drm_irq_install(dev);
3437 goto cleanup_ringbuffer;
3443 i915_gem_cleanup_ringbuffer(dev);
3444 dev_priv->mm.suspended = 1;
3451 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3452 struct drm_file *file_priv)
3454 if (drm_core_check_feature(dev, DRIVER_MODESET))
3457 drm_irq_uninstall(dev);
3458 return i915_gem_idle(dev);
3462 i915_gem_lastclose(struct drm_device *dev)
3466 if (drm_core_check_feature(dev, DRIVER_MODESET))
3469 ret = i915_gem_idle(dev);
3471 DRM_ERROR("failed to idle hardware: %d\n", ret);
3475 init_ring_lists(struct intel_ring_buffer *ring)
3477 INIT_LIST_HEAD(&ring->active_list);
3478 INIT_LIST_HEAD(&ring->request_list);
3482 i915_gem_load(struct drm_device *dev)
3485 drm_i915_private_t *dev_priv = dev->dev_private;
3487 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3488 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3489 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3490 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3491 for (i = 0; i < I915_NUM_RINGS; i++)
3492 init_ring_lists(&dev_priv->ring[i]);
3493 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3494 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3495 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3496 i915_gem_retire_work_handler);
3497 init_completion(&dev_priv->error_completion);
3499 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3501 I915_WRITE(MI_ARB_STATE,
3502 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3505 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3507 /* Old X drivers will take 0-2 for front, back, depth buffers */
3508 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3509 dev_priv->fence_reg_start = 3;
3511 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3512 dev_priv->num_fence_regs = 16;
3514 dev_priv->num_fence_regs = 8;
3516 /* Initialize fence registers to zero */
3517 i915_gem_reset_fences(dev);
3519 i915_gem_detect_bit_6_swizzle(dev);
3520 init_waitqueue_head(&dev_priv->pending_flip_queue);
3522 dev_priv->mm.interruptible = true;
3525 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3526 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3527 register_shrinker(&dev_priv->mm.inactive_shrinker);
3529 dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
3530 i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
3535 * Create a physically contiguous memory object for this object
3536 * e.g. for cursor + overlay regs
3538 static int i915_gem_init_phys_object(struct drm_device *dev,
3539 int id, int size, int align)
3541 drm_i915_private_t *dev_priv = dev->dev_private;
3542 struct drm_i915_gem_phys_object *phys_obj;
3545 if (dev_priv->mm.phys_objs[id - 1] || !size)
3548 phys_obj = kmalloc(sizeof(struct drm_i915_gem_phys_object), M_DRM,
3555 phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3556 if (!phys_obj->handle) {
3560 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3561 size / PAGE_SIZE, PAT_WRITE_COMBINING);
3563 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3568 drm_free(phys_obj, M_DRM);
3572 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3574 drm_i915_private_t *dev_priv = dev->dev_private;
3575 struct drm_i915_gem_phys_object *phys_obj;
3577 if (!dev_priv->mm.phys_objs[id - 1])
3580 phys_obj = dev_priv->mm.phys_objs[id - 1];
3581 if (phys_obj->cur_obj) {
3582 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3585 drm_pci_free(dev, phys_obj->handle);
3586 drm_free(phys_obj, M_DRM);
3587 dev_priv->mm.phys_objs[id - 1] = NULL;
3590 void i915_gem_free_all_phys_object(struct drm_device *dev)
3594 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3595 i915_gem_free_phys_object(dev, i);
3598 void i915_gem_detach_phys_object(struct drm_device *dev,
3599 struct drm_i915_gem_object *obj)
3608 vaddr = obj->phys_obj->handle->vaddr;
3610 page_count = obj->base.size / PAGE_SIZE;
3611 VM_OBJECT_LOCK(obj->base.vm_obj);
3612 for (i = 0; i < page_count; i++) {
3613 m = i915_gem_wire_page(obj->base.vm_obj, i);
3617 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3618 sf = sf_buf_alloc(m);
3620 dst = (char *)sf_buf_kva(sf);
3621 memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3624 drm_clflush_pages(&m, 1);
3626 VM_OBJECT_LOCK(obj->base.vm_obj);
3627 vm_page_reference(m);
3629 vm_page_busy_wait(m, FALSE, "i915gem");
3630 vm_page_unwire(m, 0);
3633 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3634 intel_gtt_chipset_flush();
3636 obj->phys_obj->cur_obj = NULL;
3637 obj->phys_obj = NULL;
3641 i915_gem_attach_phys_object(struct drm_device *dev,
3642 struct drm_i915_gem_object *obj,
3646 drm_i915_private_t *dev_priv = dev->dev_private;
3650 int i, page_count, ret;
3652 if (id > I915_MAX_PHYS_OBJECT)
3655 if (obj->phys_obj) {
3656 if (obj->phys_obj->id == id)
3658 i915_gem_detach_phys_object(dev, obj);
3661 /* create a new object */
3662 if (!dev_priv->mm.phys_objs[id - 1]) {
3663 ret = i915_gem_init_phys_object(dev, id,
3664 obj->base.size, align);
3666 DRM_ERROR("failed to init phys object %d size: %zu\n",
3667 id, obj->base.size);
3672 /* bind to the object */
3673 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3674 obj->phys_obj->cur_obj = obj;
3676 page_count = obj->base.size / PAGE_SIZE;
3678 VM_OBJECT_LOCK(obj->base.vm_obj);
3680 for (i = 0; i < page_count; i++) {
3681 m = i915_gem_wire_page(obj->base.vm_obj, i);
3686 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3687 sf = sf_buf_alloc(m);
3688 src = (char *)sf_buf_kva(sf);
3689 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3690 memcpy(dst, src, PAGE_SIZE);
3693 VM_OBJECT_LOCK(obj->base.vm_obj);
3695 vm_page_reference(m);
3696 vm_page_busy_wait(m, FALSE, "i915gem");
3697 vm_page_unwire(m, 0);
3700 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3706 i915_gem_phys_pwrite(struct drm_device *dev,
3707 struct drm_i915_gem_object *obj,
3708 struct drm_i915_gem_pwrite *args,
3709 struct drm_file *file_priv)
3711 void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
3712 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
3714 if (copyin_nofault(user_data, vaddr, args->size) != 0) {
3715 unsigned long unwritten;
3717 /* The physical object once assigned is fixed for the lifetime
3718 * of the obj, so we can safely drop the lock and continue
3722 unwritten = copy_from_user(vaddr, user_data, args->size);
3728 i915_gem_chipset_flush(dev);
3732 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3734 struct drm_i915_file_private *file_priv = file->driver_priv;
3736 /* Clean up our request list when the client is going away, so that
3737 * later retire_requests won't dereference our soon-to-be-gone
3740 spin_lock(&file_priv->mm.lock);
3741 while (!list_empty(&file_priv->mm.request_list)) {
3742 struct drm_i915_gem_request *request;
3744 request = list_first_entry(&file_priv->mm.request_list,
3745 struct drm_i915_gem_request,
3747 list_del(&request->client_list);
3748 request->file_priv = NULL;
3750 spin_unlock(&file_priv->mm.lock);
3754 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
3755 vm_ooffset_t foff, struct ucred *cred, u_short *color)
3758 *color = 0; /* XXXKIB */
3765 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
3768 struct drm_gem_object *gem_obj;
3769 struct drm_i915_gem_object *obj;
3770 struct drm_device *dev;
3771 drm_i915_private_t *dev_priv;
3776 gem_obj = vm_obj->handle;
3777 obj = to_intel_bo(gem_obj);
3778 dev = obj->base.dev;
3779 dev_priv = dev->dev_private;
3781 write = (prot & VM_PROT_WRITE) != 0;
3785 vm_object_pip_add(vm_obj, 1);
3788 * Remove the placeholder page inserted by vm_fault() from the
3789 * object before dropping the object lock. If
3790 * i915_gem_release_mmap() is active in parallel on this gem
3791 * object, then it owns the drm device sx and might find the
3792 * placeholder already. Then, since the page is busy,
3793 * i915_gem_release_mmap() sleeps waiting for the busy state
3794 * of the page cleared. We will be not able to acquire drm
3795 * device lock until i915_gem_release_mmap() is able to make a
3798 if (*mres != NULL) {
3800 vm_page_remove(oldm);
3805 VM_OBJECT_UNLOCK(vm_obj);
3811 ret = i915_mutex_lock_interruptible(dev);
3820 * Since the object lock was dropped, other thread might have
3821 * faulted on the same GTT address and instantiated the
3822 * mapping for the page. Recheck.
3824 VM_OBJECT_LOCK(vm_obj);
3825 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
3827 if ((m->flags & PG_BUSY) != 0) {
3830 vm_page_sleep(m, "915pee");
3836 VM_OBJECT_UNLOCK(vm_obj);
3838 /* Now bind it into the GTT if needed */
3839 if (!obj->map_and_fenceable) {
3840 ret = i915_gem_object_unbind(obj);
3846 if (!obj->gtt_space) {
3847 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
3853 ret = i915_gem_object_set_to_gtt_domain(obj, write);
3860 if (obj->tiling_mode == I915_TILING_NONE)
3861 ret = i915_gem_object_put_fence(obj);
3863 ret = i915_gem_object_get_fence(obj);
3869 if (i915_gem_object_is_inactive(obj))
3870 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3872 obj->fault_mappable = true;
3873 VM_OBJECT_LOCK(vm_obj);
3874 m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
3881 KASSERT((m->flags & PG_FICTITIOUS) != 0,
3882 ("not fictitious %p", m));
3883 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
3885 if ((m->flags & PG_BUSY) != 0) {
3888 vm_page_sleep(m, "915pbs");
3892 m->valid = VM_PAGE_BITS_ALL;
3893 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
3896 vm_page_busy_try(m, false);
3902 vm_object_pip_wakeup(vm_obj);
3903 return (VM_PAGER_OK);
3908 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
3909 if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
3910 goto unlocked_vmobj;
3912 VM_OBJECT_LOCK(vm_obj);
3913 vm_object_pip_wakeup(vm_obj);
3914 return (VM_PAGER_ERROR);
3918 i915_gem_pager_dtor(void *handle)
3920 struct drm_gem_object *obj;
3921 struct drm_device *dev;
3927 drm_gem_free_mmap_offset(obj);
3928 i915_gem_release_mmap(to_intel_bo(obj));
3929 drm_gem_object_unreference(obj);
3933 struct cdev_pager_ops i915_gem_pager_ops = {
3934 .cdev_pg_fault = i915_gem_pager_fault,
3935 .cdev_pg_ctor = i915_gem_pager_ctor,
3936 .cdev_pg_dtor = i915_gem_pager_dtor
3939 #define GEM_PARANOID_CHECK_GTT 0
3940 #if GEM_PARANOID_CHECK_GTT
3942 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
3945 struct drm_i915_private *dev_priv;
3947 unsigned long start, end;
3951 dev_priv = dev->dev_private;
3952 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
3953 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
3954 for (i = start; i < end; i++) {
3955 pa = intel_gtt_read_pte_paddr(i);
3956 for (j = 0; j < page_count; j++) {
3957 if (pa == VM_PAGE_TO_PHYS(ma[j])) {
3958 panic("Page %p in GTT pte index %d pte %x",
3959 ma[i], i, intel_gtt_read_pte(i));
3966 #define VM_OBJECT_LOCK_ASSERT_OWNED(object)
3969 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
3974 VM_OBJECT_LOCK_ASSERT_OWNED(object);
3975 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3976 if (m->valid != VM_PAGE_BITS_ALL) {
3977 if (vm_pager_has_page(object, pindex)) {
3978 rv = vm_pager_get_page(object, &m, 1);
3979 m = vm_page_lookup(object, pindex);
3982 if (rv != VM_PAGER_OK) {
3987 pmap_zero_page(VM_PAGE_TO_PHYS(m));
3988 m->valid = VM_PAGE_BITS_ALL;
3998 i915_gpu_is_active(struct drm_device *dev)
4000 drm_i915_private_t *dev_priv = dev->dev_private;
4002 return !list_empty(&dev_priv->mm.active_list);
4006 i915_gem_lowmem(void *arg)
4008 struct drm_device *dev;
4009 struct drm_i915_private *dev_priv;
4010 struct drm_i915_gem_object *obj, *next;
4011 int cnt, cnt_fail, cnt_total;
4014 dev_priv = dev->dev_private;
4016 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
4020 /* first scan for clean buffers */
4021 i915_gem_retire_requests(dev);
4023 cnt_total = cnt_fail = cnt = 0;
4025 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4027 if (i915_gem_object_is_purgeable(obj)) {
4028 if (i915_gem_object_unbind(obj) != 0)
4034 /* second pass, evict/count anything still on the inactive list */
4035 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
4037 if (i915_gem_object_unbind(obj) == 0)
4043 if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
4045 * We are desperate for pages, so as a last resort, wait
4046 * for the GPU to finish and discard whatever we can.
4047 * This has a dramatic impact to reduce the number of
4048 * OOM-killer events whilst running the GPU aggressively.
4050 if (i915_gpu_idle(dev) == 0)
4057 i915_gem_unload(struct drm_device *dev)
4059 struct drm_i915_private *dev_priv;
4061 dev_priv = dev->dev_private;
4062 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);