2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
26 * Copyright (c) 2011 The FreeBSD Foundation
27 * All rights reserved.
29 * This software was developed by Konstantin Belousov under sponsorship from
30 * the FreeBSD Foundation.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/resourcevar.h>
56 #include <sys/sfbuf.h>
59 #include <drm/i915_drm.h>
61 #include "intel_drv.h"
62 #include "intel_ringbuffer.h"
63 #include <linux/completion.h>
64 #include <linux/jiffies.h>
65 #include <linux/time.h>
67 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
68 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
69 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
71 bool map_and_fenceable,
73 static int i915_gem_phys_pwrite(struct drm_device *dev,
74 struct drm_i915_gem_object *obj, uint64_t data_ptr, uint64_t offset,
75 uint64_t size, struct drm_file *file_priv);
77 static void i915_gem_write_fence(struct drm_device *dev, int reg,
78 struct drm_i915_gem_object *obj);
79 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
80 struct drm_i915_fence_reg *fence,
83 static uint32_t i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size,
85 static uint32_t i915_gem_get_gtt_alignment(struct drm_device *dev,
86 uint32_t size, int tiling_mode);
87 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
89 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj);
90 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
92 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
95 i915_gem_release_mmap(obj);
97 /* As we do not have an associated fence register, we will force
98 * a tiling change if we ever need to acquire one.
100 obj->fence_dirty = false;
101 obj->fence_reg = I915_FENCE_REG_NONE;
104 static int i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj);
105 static bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj);
106 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj);
107 static vm_page_t i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex);
108 static void i915_gem_reset_fences(struct drm_device *dev);
109 static void i915_gem_lowmem(void *arg);
111 static int i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
112 uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file);
114 /* some bookkeeping */
115 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
118 dev_priv->mm.object_count++;
119 dev_priv->mm.object_memory += size;
122 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
125 dev_priv->mm.object_count--;
126 dev_priv->mm.object_memory -= size;
130 i915_gem_wait_for_error(struct drm_device *dev)
132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct completion *x = &dev_priv->error_completion;
136 if (!atomic_read(&dev_priv->mm.wedged))
140 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
141 * userspace. If it takes that long something really bad is going on and
142 * we should simply try to bail out and fail as gracefully as possible.
144 ret = wait_for_completion_interruptible_timeout(x, 10*hz);
146 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
148 } else if (ret < 0) {
152 if (atomic_read(&dev_priv->mm.wedged)) {
153 /* GPU is hung, bump the completion count to account for
154 * the token we just consumed so that we never hit zero and
155 * end up waiting upon a subsequent completion event that
158 spin_lock(&x->wait.lock);
160 spin_unlock(&x->wait.lock);
165 int i915_mutex_lock_interruptible(struct drm_device *dev)
169 ret = i915_gem_wait_for_error(dev);
173 ret = lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_SLEEPFAIL);
177 WARN_ON(i915_verify_lists(dev));
182 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
188 i915_gem_init_ioctl(struct drm_device *dev, void *data,
189 struct drm_file *file)
191 struct drm_i915_gem_init *args = data;
193 if (drm_core_check_feature(dev, DRIVER_MODESET))
196 if (args->gtt_start >= args->gtt_end ||
197 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
200 /* GEM with user mode setting was never supported on ilk and later. */
201 if (INTEL_INFO(dev)->gen >= 5)
204 lockmgr(&dev->dev_lock, LK_EXCLUSIVE|LK_RETRY|LK_CANRECURSE);
205 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
206 lockmgr(&dev->dev_lock, LK_RELEASE);
212 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
213 struct drm_file *file)
215 struct drm_i915_private *dev_priv = dev->dev_private;
216 struct drm_i915_gem_get_aperture *args = data;
217 struct drm_i915_gem_object *obj;
222 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list)
224 pinned += obj->gtt_space->size;
227 args->aper_size = dev_priv->mm.gtt_total;
228 args->aper_available_size = args->aper_size - pinned;
234 i915_gem_create(struct drm_file *file,
235 struct drm_device *dev,
239 struct drm_i915_gem_object *obj;
243 size = roundup(size, PAGE_SIZE);
247 /* Allocate the new object */
248 obj = i915_gem_alloc_object(dev, size);
253 ret = drm_gem_handle_create(file, &obj->base, &handle);
255 drm_gem_object_release(&obj->base);
256 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
257 drm_free(obj, M_DRM);
261 /* drop reference from allocate - handle holds it now */
262 drm_gem_object_unreference(&obj->base);
268 i915_gem_dumb_create(struct drm_file *file,
269 struct drm_device *dev,
270 struct drm_mode_create_dumb *args)
273 /* have to work out size/pitch and return them */
274 args->pitch = roundup2(args->width * ((args->bpp + 7) / 8), 64);
275 args->size = args->pitch * args->height;
276 return i915_gem_create(file, dev,
277 args->size, &args->handle);
280 int i915_gem_dumb_destroy(struct drm_file *file,
281 struct drm_device *dev,
285 return drm_gem_handle_delete(file, handle);
289 * Creates a new mm object and returns a handle to it.
292 i915_gem_create_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *file)
295 struct drm_i915_gem_create *args = data;
297 return i915_gem_create(file, dev,
298 args->size, &args->handle);
301 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
303 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
305 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
306 obj->tiling_mode != I915_TILING_NONE;
310 * Reads data from the object referenced by handle.
312 * On error, the contents of *data are undefined.
315 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
316 struct drm_file *file)
318 struct drm_i915_gem_pread *args = data;
320 return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
321 args->offset, UIO_READ, file));
325 * Writes data to the object referenced by handle.
327 * On error, the contents of the buffer that were to be modified are undefined.
330 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file)
333 struct drm_i915_gem_pwrite *args = data;
335 return (i915_gem_obj_io(dev, args->handle, args->data_ptr, args->size,
336 args->offset, UIO_WRITE, file));
340 i915_gem_check_wedge(struct drm_i915_private *dev_priv,
343 if (atomic_read(&dev_priv->mm.wedged)) {
344 struct completion *x = &dev_priv->error_completion;
345 bool recovery_complete;
347 /* Give the error handler a chance to run. */
348 spin_lock(&x->wait.lock);
349 recovery_complete = x->done > 0;
350 spin_unlock(&x->wait.lock);
352 /* Non-interruptible callers can't handle -EAGAIN, hence return
353 * -EIO unconditionally for these. */
357 /* Recovery complete, but still wedged means reset failure. */
358 if (recovery_complete)
368 * Compare seqno against outstanding lazy request. Emit a request if they are
372 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
376 DRM_LOCK_ASSERT(ring->dev);
379 if (seqno == ring->outstanding_lazy_request)
380 ret = i915_add_request(ring, NULL, NULL);
386 * __wait_seqno - wait until execution of seqno has finished
387 * @ring: the ring expected to report seqno
389 * @interruptible: do an interruptible wait (normally yes)
390 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
392 * Returns 0 if the seqno was found within the alloted time. Else returns the
393 * errno with remaining time filled in timeout argument.
395 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
396 bool interruptible, struct timespec *timeout)
398 drm_i915_private_t *dev_priv = ring->dev->dev_private;
399 struct timespec before, now, wait_time={1,0};
400 unsigned long timeout_jiffies;
402 bool wait_forever = true;
405 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
408 if (timeout != NULL) {
409 wait_time = *timeout;
410 wait_forever = false;
413 timeout_jiffies = timespec_to_jiffies(&wait_time);
415 if (WARN_ON(!ring->irq_get(ring)))
418 /* Record current time in case interrupted by signal, or wedged * */
419 getrawmonotonic(&before);
422 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
423 atomic_read(&dev_priv->mm.wedged))
426 end = wait_event_interruptible_timeout(ring->irq_queue,
430 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
433 ret = i915_gem_check_wedge(dev_priv, interruptible);
436 } while (end == 0 && wait_forever);
438 getrawmonotonic(&now);
444 struct timespec sleep_time = timespec_sub(now, before);
445 *timeout = timespec_sub(*timeout, sleep_time);
450 case -EAGAIN: /* Wedged */
451 case -ERESTARTSYS: /* Signal */
453 case 0: /* Timeout */
455 set_normalized_timespec(timeout, 0, 0);
456 return -ETIMEDOUT; /* -ETIME on Linux */
457 default: /* Completed */
458 WARN_ON(end < 0); /* We're not aware of other errors */
464 * Waits for a sequence number to be signaled, and cleans up the
465 * request and object lists appropriately for that event.
468 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
470 struct drm_device *dev = ring->dev;
471 struct drm_i915_private *dev_priv = dev->dev_private;
474 DRM_LOCK_ASSERT(dev);
477 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
481 ret = i915_gem_check_olr(ring, seqno);
485 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
491 * Ensures that all rendering to the object has completed and the object is
492 * safe to unbind from the GTT or access from the CPU.
494 static __must_check int
495 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
498 struct intel_ring_buffer *ring = obj->ring;
502 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
506 ret = i915_wait_seqno(ring, seqno);
510 i915_gem_retire_requests_ring(ring);
512 /* Manually manage the write flush as we may have not yet
513 * retired the buffer.
515 if (obj->last_write_seqno &&
516 i915_seqno_passed(seqno, obj->last_write_seqno)) {
517 obj->last_write_seqno = 0;
518 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
524 /* A nonblocking variant of the above wait. This is a highly dangerous routine
525 * as the object state may change during this call.
527 static __must_check int
528 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
531 struct drm_device *dev = obj->base.dev;
532 struct drm_i915_private *dev_priv = dev->dev_private;
533 struct intel_ring_buffer *ring = obj->ring;
537 DRM_LOCK_ASSERT(dev);
538 BUG_ON(!dev_priv->mm.interruptible);
540 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
544 ret = i915_gem_check_wedge(dev_priv, true);
548 ret = i915_gem_check_olr(ring, seqno);
553 ret = __wait_seqno(ring, seqno, true, NULL);
556 i915_gem_retire_requests_ring(ring);
558 /* Manually manage the write flush as we may have not yet
559 * retired the buffer.
561 if (obj->last_write_seqno &&
562 i915_seqno_passed(seqno, obj->last_write_seqno)) {
563 obj->last_write_seqno = 0;
564 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
571 * Called when user space prepares to use an object with the CPU, either
572 * through the mmap ioctl's mapping or a GTT mapping.
575 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *file)
578 struct drm_i915_gem_set_domain *args = data;
579 struct drm_i915_gem_object *obj;
580 uint32_t read_domains = args->read_domains;
581 uint32_t write_domain = args->write_domain;
584 /* Only handle setting domains to types used by the CPU. */
585 if (write_domain & I915_GEM_GPU_DOMAINS)
588 if (read_domains & I915_GEM_GPU_DOMAINS)
591 /* Having something in the write domain implies it's in the read
592 * domain, and only that read domain. Enforce that in the request.
594 if (write_domain != 0 && read_domains != write_domain)
597 ret = i915_mutex_lock_interruptible(dev);
601 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
602 if (&obj->base == NULL) {
607 /* Try to flush the object off the GPU without holding the lock.
608 * We will repeat the flush holding the lock in the normal manner
609 * to catch cases where we are gazumped.
611 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
615 if (read_domains & I915_GEM_DOMAIN_GTT) {
616 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
618 /* Silently promote "you're not bound, there was nothing to do"
619 * to success, since the client was just asking us to
620 * make sure everything was done.
625 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
629 drm_gem_object_unreference(&obj->base);
636 * Called when user space has done writes to this buffer
639 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
640 struct drm_file *file)
642 struct drm_i915_gem_sw_finish *args = data;
643 struct drm_i915_gem_object *obj;
646 ret = i915_mutex_lock_interruptible(dev);
649 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
650 if (&obj->base == NULL) {
655 /* Pinned buffers may be scanout, so flush the cache */
657 i915_gem_object_flush_cpu_write_domain(obj);
659 drm_gem_object_unreference(&obj->base);
666 * Maps the contents of an object, returning the address it is mapped
669 * While the mapping holds a reference on the contents of the object, it doesn't
670 * imply a ref on the object itself.
673 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
674 struct drm_file *file)
676 struct drm_i915_gem_mmap *args = data;
677 struct drm_gem_object *obj;
678 struct proc *p = curproc;
679 vm_map_t map = &p->p_vmspace->vm_map;
684 obj = drm_gem_object_lookup(dev, file, args->handle);
691 size = round_page(args->size);
693 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
701 vm_object_hold(obj->vm_obj);
702 vm_object_reference_locked(obj->vm_obj);
703 vm_object_drop(obj->vm_obj);
704 rv = vm_map_find(map, obj->vm_obj, NULL,
705 args->offset, &addr, args->size,
706 PAGE_SIZE, /* align */
708 VM_MAPTYPE_NORMAL, /* maptype */
709 VM_PROT_READ | VM_PROT_WRITE, /* prot */
710 VM_PROT_READ | VM_PROT_WRITE, /* max */
711 MAP_SHARED /* cow */);
712 if (rv != KERN_SUCCESS) {
713 vm_object_deallocate(obj->vm_obj);
714 error = -vm_mmap_to_errno(rv);
716 args->addr_ptr = (uint64_t)addr;
719 drm_gem_object_unreference(obj);
724 * i915_gem_release_mmap - remove physical page mappings
725 * @obj: obj in question
727 * Preserve the reservation of the mmapping with the DRM core code, but
728 * relinquish ownership of the pages back to the system.
730 * It is vital that we remove the page mapping if we have mapped a tiled
731 * object through the GTT and then lose the fence register due to
732 * resource pressure. Similarly if the object has been moved out of the
733 * aperture, than pages mapped into userspace must be revoked. Removing the
734 * mapping will then trigger a page fault on the next user access, allowing
735 * fixup by i915_gem_fault().
738 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
744 if (!obj->fault_mappable)
747 devobj = cdev_pager_lookup(obj);
748 if (devobj != NULL) {
749 page_count = OFF_TO_IDX(obj->base.size);
751 VM_OBJECT_LOCK(devobj);
752 for (i = 0; i < page_count; i++) {
753 m = vm_page_lookup_busy_wait(devobj, i, TRUE, "915unm");
756 cdev_pager_free_page(devobj, m);
758 VM_OBJECT_UNLOCK(devobj);
759 vm_object_deallocate(devobj);
762 obj->fault_mappable = false;
766 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
770 if (INTEL_INFO(dev)->gen >= 4 ||
771 tiling_mode == I915_TILING_NONE)
774 /* Previous chips need a power-of-two fence region when tiling */
775 if (INTEL_INFO(dev)->gen == 3)
776 gtt_size = 1024*1024;
780 while (gtt_size < size)
787 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
788 * @obj: object to check
790 * Return the required GTT alignment for an object, taking into account
791 * potential fence register mapping.
794 i915_gem_get_gtt_alignment(struct drm_device *dev,
800 * Minimum alignment is 4k (GTT page size), but might be greater
801 * if a fence register is needed for the object.
803 if (INTEL_INFO(dev)->gen >= 4 ||
804 tiling_mode == I915_TILING_NONE)
808 * Previous chips need to be aligned to the size of the smallest
809 * fence register that can contain the object.
811 return i915_gem_get_gtt_size(dev, size, tiling_mode);
815 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
818 * @size: size of the object
819 * @tiling_mode: tiling mode of the object
821 * Return the required GTT alignment for an object, only taking into account
822 * unfenced tiled surface requirements.
825 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
830 * Minimum alignment is 4k (GTT page size) for sane hw.
832 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
833 tiling_mode == I915_TILING_NONE)
836 /* Previous hardware however needs to be aligned to a power-of-two
837 * tile height. The simplest method for determining this is to reuse
838 * the power-of-tile object size.
840 return i915_gem_get_gtt_size(dev, size, tiling_mode);
844 i915_gem_mmap_gtt(struct drm_file *file,
845 struct drm_device *dev,
849 struct drm_i915_private *dev_priv = dev->dev_private;
850 struct drm_i915_gem_object *obj;
853 ret = i915_mutex_lock_interruptible(dev);
857 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
858 if (&obj->base == NULL) {
863 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
868 if (obj->madv != I915_MADV_WILLNEED) {
869 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
874 ret = drm_gem_create_mmap_offset(&obj->base);
878 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) |
881 drm_gem_object_unreference(&obj->base);
888 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
890 * @data: GTT mapping ioctl data
891 * @file: GEM object info
893 * Simply returns the fake offset to userspace so it can mmap it.
894 * The mmap call will end up in drm_gem_mmap(), which will set things
895 * up so we can get faults in the handler above.
897 * The fault handler will take care of binding the object into the GTT
898 * (since it may have been evicted to make room for something), allocating
899 * a fence register, and mapping the appropriate aperture address into
903 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
904 struct drm_file *file)
906 struct drm_i915_gem_mmap_gtt *args = data;
908 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
911 /* Immediately discard the backing storage */
913 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
917 vm_obj = obj->base.vm_obj;
918 VM_OBJECT_LOCK(vm_obj);
919 vm_object_page_remove(vm_obj, 0, 0, false);
920 VM_OBJECT_UNLOCK(vm_obj);
921 obj->madv = __I915_MADV_PURGED;
925 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
927 return obj->madv == I915_MADV_DONTNEED;
930 static inline void vm_page_reference(vm_page_t m)
932 vm_page_flag_set(m, PG_REFERENCED);
936 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
941 BUG_ON(obj->madv == __I915_MADV_PURGED);
943 if (obj->tiling_mode != I915_TILING_NONE)
944 i915_gem_object_save_bit_17_swizzle(obj);
945 if (obj->madv == I915_MADV_DONTNEED)
947 page_count = obj->base.size / PAGE_SIZE;
948 VM_OBJECT_LOCK(obj->base.vm_obj);
949 #if GEM_PARANOID_CHECK_GTT
950 i915_gem_assert_pages_not_mapped(obj->base.dev, obj->pages, page_count);
952 for (i = 0; i < page_count; i++) {
956 if (obj->madv == I915_MADV_WILLNEED)
957 vm_page_reference(m);
958 vm_page_busy_wait(obj->pages[i], FALSE, "i915gem");
959 vm_page_unwire(obj->pages[i], 1);
960 vm_page_wakeup(obj->pages[i]);
962 VM_OBJECT_UNLOCK(obj->base.vm_obj);
964 drm_free(obj->pages, M_DRM);
969 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
972 struct drm_device *dev;
975 int page_count, i, j;
978 KASSERT(obj->pages == NULL, ("Obj already has pages"));
979 page_count = obj->base.size / PAGE_SIZE;
980 obj->pages = kmalloc(page_count * sizeof(vm_page_t), M_DRM,
982 vm_obj = obj->base.vm_obj;
983 VM_OBJECT_LOCK(vm_obj);
984 for (i = 0; i < page_count; i++) {
985 if ((obj->pages[i] = i915_gem_wire_page(vm_obj, i)) == NULL)
988 VM_OBJECT_UNLOCK(vm_obj);
989 if (i915_gem_object_needs_bit17_swizzle(obj))
990 i915_gem_object_do_bit_17_swizzle(obj);
994 for (j = 0; j < i; j++) {
996 vm_page_busy_wait(m, FALSE, "i915gem");
997 vm_page_unwire(m, 0);
1000 VM_OBJECT_UNLOCK(vm_obj);
1001 drm_free(obj->pages, M_DRM);
1007 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1008 struct intel_ring_buffer *ring)
1010 struct drm_device *dev = obj->base.dev;
1011 struct drm_i915_private *dev_priv = dev->dev_private;
1012 u32 seqno = intel_ring_get_seqno(ring);
1014 BUG_ON(ring == NULL);
1017 /* Add a reference if we're newly entering the active list. */
1019 drm_gem_object_reference(&obj->base);
1023 /* Move from whatever list we were on to the tail of execution. */
1024 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1025 list_move_tail(&obj->ring_list, &ring->active_list);
1027 obj->last_read_seqno = seqno;
1029 if (obj->fenced_gpu_access) {
1030 obj->last_fenced_seqno = seqno;
1032 /* Bump MRU to take account of the delayed flush */
1033 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1034 struct drm_i915_fence_reg *reg;
1036 reg = &dev_priv->fence_regs[obj->fence_reg];
1037 list_move_tail(®->lru_list,
1038 &dev_priv->mm.fence_list);
1044 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1046 struct drm_device *dev = obj->base.dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private;
1049 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1050 BUG_ON(!obj->active);
1052 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1054 list_del_init(&obj->ring_list);
1057 obj->last_read_seqno = 0;
1058 obj->last_write_seqno = 0;
1059 obj->base.write_domain = 0;
1061 obj->last_fenced_seqno = 0;
1062 obj->fenced_gpu_access = false;
1065 drm_gem_object_unreference(&obj->base);
1067 WARN_ON(i915_verify_lists(dev));
1071 i915_gem_handle_seqno_wrap(struct drm_device *dev)
1073 struct drm_i915_private *dev_priv = dev->dev_private;
1074 struct intel_ring_buffer *ring;
1077 /* The hardware uses various monotonic 32-bit counters, if we
1078 * detect that they will wraparound we need to idle the GPU
1079 * and reset those counters.
1082 for_each_ring(ring, dev_priv, i) {
1083 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1084 ret |= ring->sync_seqno[j] != 0;
1089 ret = i915_gpu_idle(dev);
1093 i915_gem_retire_requests(dev);
1094 for_each_ring(ring, dev_priv, i) {
1095 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1096 ring->sync_seqno[j] = 0;
1103 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1105 struct drm_i915_private *dev_priv = dev->dev_private;
1107 /* reserve 0 for non-seqno */
1108 if (dev_priv->next_seqno == 0) {
1109 int ret = i915_gem_handle_seqno_wrap(dev);
1113 dev_priv->next_seqno = 1;
1116 *seqno = dev_priv->next_seqno++;
1121 i915_add_request(struct intel_ring_buffer *ring,
1122 struct drm_file *file,
1125 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1126 struct drm_i915_gem_request *request;
1127 u32 request_ring_position;
1132 * Emit any outstanding flushes - execbuf can fail to emit the flush
1133 * after having emitted the batchbuffer command. Hence we need to fix
1134 * things up similar to emitting the lazy request. The difference here
1135 * is that the flush _must_ happen before the next request, no matter
1138 ret = intel_ring_flush_all_caches(ring);
1142 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK | M_ZERO);
1143 if (request == NULL)
1147 /* Record the position of the start of the request so that
1148 * should we detect the updated seqno part-way through the
1149 * GPU processing the request, we never over-estimate the
1150 * position of the head.
1152 request_ring_position = intel_ring_get_tail(ring);
1154 ret = ring->add_request(ring);
1156 kfree(request, M_DRM);
1160 request->seqno = intel_ring_get_seqno(ring);
1161 request->ring = ring;
1162 request->tail = request_ring_position;
1163 request->emitted_jiffies = jiffies;
1164 was_empty = list_empty(&ring->request_list);
1165 list_add_tail(&request->list, &ring->request_list);
1166 request->file_priv = NULL;
1169 struct drm_i915_file_private *file_priv = file->driver_priv;
1171 spin_lock(&file_priv->mm.lock);
1172 request->file_priv = file_priv;
1173 list_add_tail(&request->client_list,
1174 &file_priv->mm.request_list);
1175 spin_unlock(&file_priv->mm.lock);
1178 ring->outstanding_lazy_request = 0;
1180 if (!dev_priv->mm.suspended) {
1181 if (i915_enable_hangcheck) {
1182 mod_timer(&dev_priv->hangcheck_timer,
1183 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1186 queue_delayed_work(dev_priv->wq,
1187 &dev_priv->mm.retire_work,
1188 round_jiffies_up_relative(hz));
1189 intel_mark_busy(dev_priv->dev);
1194 *out_seqno = request->seqno;
1199 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1201 struct drm_i915_file_private *file_priv = request->file_priv;
1206 spin_lock(&file_priv->mm.lock);
1207 if (request->file_priv) {
1208 list_del(&request->client_list);
1209 request->file_priv = NULL;
1211 spin_unlock(&file_priv->mm.lock);
1214 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1215 struct intel_ring_buffer *ring)
1217 while (!list_empty(&ring->request_list)) {
1218 struct drm_i915_gem_request *request;
1220 request = list_first_entry(&ring->request_list,
1221 struct drm_i915_gem_request,
1224 list_del(&request->list);
1225 i915_gem_request_remove_from_client(request);
1226 drm_free(request, M_DRM);
1229 while (!list_empty(&ring->active_list)) {
1230 struct drm_i915_gem_object *obj;
1232 obj = list_first_entry(&ring->active_list,
1233 struct drm_i915_gem_object,
1236 i915_gem_object_move_to_inactive(obj);
1240 static void i915_gem_reset_fences(struct drm_device *dev)
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1245 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1246 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1248 i915_gem_write_fence(dev, i, NULL);
1251 i915_gem_object_fence_lost(reg->obj);
1255 INIT_LIST_HEAD(®->lru_list);
1258 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1261 void i915_gem_reset(struct drm_device *dev)
1263 struct drm_i915_private *dev_priv = dev->dev_private;
1264 struct drm_i915_gem_object *obj;
1265 struct intel_ring_buffer *ring;
1268 for_each_ring(ring, dev_priv, i)
1269 i915_gem_reset_ring_lists(dev_priv, ring);
1271 /* Move everything out of the GPU domains to ensure we do any
1272 * necessary invalidation upon reuse.
1274 list_for_each_entry(obj,
1275 &dev_priv->mm.inactive_list,
1278 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1281 /* The fence registers are invalidated so clear them out */
1282 i915_gem_reset_fences(dev);
1286 * This function clears the request list as sequence numbers are passed.
1289 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1293 if (list_empty(&ring->request_list))
1296 WARN_ON(i915_verify_lists(ring->dev));
1298 seqno = ring->get_seqno(ring, true);
1300 while (!list_empty(&ring->request_list)) {
1301 struct drm_i915_gem_request *request;
1303 request = list_first_entry(&ring->request_list,
1304 struct drm_i915_gem_request,
1307 if (!i915_seqno_passed(seqno, request->seqno))
1310 /* We know the GPU must have read the request to have
1311 * sent us the seqno + interrupt, so use the position
1312 * of tail of the request to update the last known position
1315 ring->last_retired_head = request->tail;
1317 list_del(&request->list);
1318 i915_gem_request_remove_from_client(request);
1319 kfree(request, M_DRM);
1322 /* Move any buffers on the active list that are no longer referenced
1323 * by the ringbuffer to the flushing/inactive lists as appropriate.
1325 while (!list_empty(&ring->active_list)) {
1326 struct drm_i915_gem_object *obj;
1328 obj = list_first_entry(&ring->active_list,
1329 struct drm_i915_gem_object,
1332 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1335 i915_gem_object_move_to_inactive(obj);
1338 if (unlikely(ring->trace_irq_seqno &&
1339 i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1340 ring->irq_put(ring);
1341 ring->trace_irq_seqno = 0;
1347 i915_gem_retire_requests(struct drm_device *dev)
1349 drm_i915_private_t *dev_priv = dev->dev_private;
1350 struct intel_ring_buffer *ring;
1353 for_each_ring(ring, dev_priv, i)
1354 i915_gem_retire_requests_ring(ring);
1358 i915_gem_retire_work_handler(struct work_struct *work)
1360 drm_i915_private_t *dev_priv;
1361 struct drm_device *dev;
1362 struct intel_ring_buffer *ring;
1366 dev_priv = container_of(work, drm_i915_private_t,
1367 mm.retire_work.work);
1368 dev = dev_priv->dev;
1370 /* Come back later if the device is busy... */
1371 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT)) {
1372 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1373 round_jiffies_up_relative(hz));
1377 i915_gem_retire_requests(dev);
1379 /* Send a periodic flush down the ring so we don't hold onto GEM
1380 * objects indefinitely.
1383 for_each_ring(ring, dev_priv, i) {
1384 if (ring->gpu_caches_dirty)
1385 i915_add_request(ring, NULL, NULL);
1387 idle &= list_empty(&ring->request_list);
1390 if (!dev_priv->mm.suspended && !idle)
1391 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
1392 round_jiffies_up_relative(hz));
1394 intel_mark_idle(dev);
1399 * Ensures that an object will eventually get non-busy by flushing any required
1400 * write domains, emitting any outstanding lazy request and retiring and
1401 * completed requests.
1404 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
1409 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
1413 i915_gem_retire_requests_ring(obj->ring);
1420 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
1421 * @DRM_IOCTL_ARGS: standard ioctl arguments
1423 * Returns 0 if successful, else an error is returned with the remaining time in
1424 * the timeout parameter.
1425 * -ETIME: object is still busy after timeout
1426 * -ERESTARTSYS: signal interrupted the wait
1427 * -ENONENT: object doesn't exist
1428 * Also possible, but rare:
1429 * -EAGAIN: GPU wedged
1431 * -ENODEV: Internal IRQ fail
1432 * -E?: The add request failed
1434 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
1435 * non-zero timeout parameter the wait ioctl will wait for the given number of
1436 * nanoseconds on an object becoming unbusy. Since the wait itself does so
1437 * without holding struct_mutex the object may become re-busied before this
1438 * function completes. A similar but shorter * race condition exists in the busy
1442 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
1444 struct drm_i915_gem_wait *args = data;
1445 struct drm_i915_gem_object *obj;
1446 struct intel_ring_buffer *ring = NULL;
1447 struct timespec timeout_stack, *timeout = NULL;
1451 if (args->timeout_ns >= 0) {
1452 timeout_stack = ns_to_timespec(args->timeout_ns);
1453 timeout = &timeout_stack;
1456 ret = i915_mutex_lock_interruptible(dev);
1460 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
1461 if (&obj->base == NULL) {
1466 /* Need to make sure the object gets inactive eventually. */
1467 ret = i915_gem_object_flush_active(obj);
1472 seqno = obj->last_read_seqno;
1479 /* Do this after OLR check to make sure we make forward progress polling
1480 * on this IOCTL with a 0 timeout (like busy ioctl)
1482 if (!args->timeout_ns) {
1487 drm_gem_object_unreference(&obj->base);
1490 ret = __wait_seqno(ring, seqno, true, timeout);
1492 WARN_ON(!timespec_valid(timeout));
1493 args->timeout_ns = timespec_to_ns(timeout);
1498 drm_gem_object_unreference(&obj->base);
1504 * i915_gem_object_sync - sync an object to a ring.
1506 * @obj: object which may be in use on another ring.
1507 * @to: ring we wish to use the object on. May be NULL.
1509 * This code is meant to abstract object synchronization with the GPU.
1510 * Calling with NULL implies synchronizing the object with the CPU
1511 * rather than a particular GPU ring.
1513 * Returns 0 if successful, else propagates up the lower layer error.
1516 i915_gem_object_sync(struct drm_i915_gem_object *obj,
1517 struct intel_ring_buffer *to)
1519 struct intel_ring_buffer *from = obj->ring;
1523 if (from == NULL || to == from)
1526 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1527 return i915_gem_object_wait_rendering(obj, false);
1529 idx = intel_ring_sync_index(from, to);
1531 seqno = obj->last_read_seqno;
1532 if (seqno <= from->sync_seqno[idx])
1535 ret = i915_gem_check_olr(obj->ring, seqno);
1539 ret = to->sync_to(to, from, seqno);
1541 /* We use last_read_seqno because sync_to()
1542 * might have just caused seqno wrap under
1545 from->sync_seqno[idx] = obj->last_read_seqno;
1550 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1552 u32 old_write_domain, old_read_domains;
1554 /* Act a barrier for all accesses through the GTT */
1557 /* Force a pagefault for domain tracking on next user access */
1558 i915_gem_release_mmap(obj);
1560 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
1563 old_read_domains = obj->base.read_domains;
1564 old_write_domain = obj->base.write_domain;
1566 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
1567 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
1572 * Unbinds an object from the GTT aperture.
1575 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
1577 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1580 if (obj->gtt_space == NULL)
1586 ret = i915_gem_object_finish_gpu(obj);
1589 /* Continue on if we fail due to EIO, the GPU is hung so we
1590 * should be safe and we need to cleanup or else we might
1591 * cause memory corruption through use-after-free.
1594 i915_gem_object_finish_gtt(obj);
1596 /* Move the object to the CPU domain to ensure that
1597 * any possible CPU writes while it's not in the GTT
1598 * are flushed when we go to remap it.
1601 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1602 if (ret == -ERESTART || ret == -EINTR)
1605 /* In the event of a disaster, abandon all caches and
1606 * hope for the best.
1608 i915_gem_clflush_object(obj);
1609 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1612 /* release the fence reg _after_ flushing */
1613 ret = i915_gem_object_put_fence(obj);
1617 if (obj->has_global_gtt_mapping)
1618 i915_gem_gtt_unbind_object(obj);
1619 if (obj->has_aliasing_ppgtt_mapping) {
1620 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
1621 obj->has_aliasing_ppgtt_mapping = 0;
1623 i915_gem_gtt_finish_object(obj);
1625 i915_gem_object_put_pages_gtt(obj);
1627 list_del_init(&obj->gtt_list);
1628 list_del_init(&obj->mm_list);
1629 /* Avoid an unnecessary call to unbind on rebind. */
1630 obj->map_and_fenceable = true;
1632 drm_mm_put_block(obj->gtt_space);
1633 obj->gtt_space = NULL;
1634 obj->gtt_offset = 0;
1636 if (i915_gem_object_is_purgeable(obj))
1637 i915_gem_object_truncate(obj);
1642 int i915_gpu_idle(struct drm_device *dev)
1644 drm_i915_private_t *dev_priv = dev->dev_private;
1645 struct intel_ring_buffer *ring;
1648 /* Flush everything onto the inactive list. */
1649 for_each_ring(ring, dev_priv, i) {
1650 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
1654 ret = intel_ring_idle(ring);
1662 static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
1663 struct drm_i915_gem_object *obj)
1665 drm_i915_private_t *dev_priv = dev->dev_private;
1669 u32 size = obj->gtt_space->size;
1671 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1673 val |= obj->gtt_offset & 0xfffff000;
1674 val |= (uint64_t)((obj->stride / 128) - 1) <<
1675 SANDYBRIDGE_FENCE_PITCH_SHIFT;
1677 if (obj->tiling_mode == I915_TILING_Y)
1678 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1679 val |= I965_FENCE_REG_VALID;
1683 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
1684 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
1687 static void i965_write_fence_reg(struct drm_device *dev, int reg,
1688 struct drm_i915_gem_object *obj)
1690 drm_i915_private_t *dev_priv = dev->dev_private;
1694 u32 size = obj->gtt_space->size;
1696 val = (uint64_t)((obj->gtt_offset + size - 4096) &
1698 val |= obj->gtt_offset & 0xfffff000;
1699 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1700 if (obj->tiling_mode == I915_TILING_Y)
1701 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1702 val |= I965_FENCE_REG_VALID;
1706 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
1707 POSTING_READ(FENCE_REG_965_0 + reg * 8);
1710 static void i915_write_fence_reg(struct drm_device *dev, int reg,
1711 struct drm_i915_gem_object *obj)
1713 drm_i915_private_t *dev_priv = dev->dev_private;
1717 u32 size = obj->gtt_space->size;
1721 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
1722 (size & -size) != size ||
1723 (obj->gtt_offset & (size - 1)),
1724 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
1725 obj->gtt_offset, obj->map_and_fenceable, size);
1727 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1732 /* Note: pitch better be a power of two tile widths */
1733 pitch_val = obj->stride / tile_width;
1734 pitch_val = ffs(pitch_val) - 1;
1736 val = obj->gtt_offset;
1737 if (obj->tiling_mode == I915_TILING_Y)
1738 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1739 val |= I915_FENCE_SIZE_BITS(size);
1740 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1741 val |= I830_FENCE_REG_VALID;
1746 reg = FENCE_REG_830_0 + reg * 4;
1748 reg = FENCE_REG_945_8 + (reg - 8) * 4;
1750 I915_WRITE(reg, val);
1754 static void i830_write_fence_reg(struct drm_device *dev, int reg,
1755 struct drm_i915_gem_object *obj)
1757 drm_i915_private_t *dev_priv = dev->dev_private;
1761 u32 size = obj->gtt_space->size;
1764 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
1765 (size & -size) != size ||
1766 (obj->gtt_offset & (size - 1)),
1767 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
1768 obj->gtt_offset, size);
1770 pitch_val = obj->stride / 128;
1771 pitch_val = ffs(pitch_val) - 1;
1773 val = obj->gtt_offset;
1774 if (obj->tiling_mode == I915_TILING_Y)
1775 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1776 val |= I830_FENCE_SIZE_BITS(size);
1777 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1778 val |= I830_FENCE_REG_VALID;
1782 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
1783 POSTING_READ(FENCE_REG_830_0 + reg * 4);
1786 static void i915_gem_write_fence(struct drm_device *dev, int reg,
1787 struct drm_i915_gem_object *obj)
1789 switch (INTEL_INFO(dev)->gen) {
1791 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
1793 case 4: i965_write_fence_reg(dev, reg, obj); break;
1794 case 3: i915_write_fence_reg(dev, reg, obj); break;
1795 case 2: i830_write_fence_reg(dev, reg, obj); break;
1800 static inline int fence_number(struct drm_i915_private *dev_priv,
1801 struct drm_i915_fence_reg *fence)
1803 return fence - dev_priv->fence_regs;
1806 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
1807 struct drm_i915_fence_reg *fence,
1810 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1811 int reg = fence_number(dev_priv, fence);
1813 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
1816 obj->fence_reg = reg;
1818 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
1820 obj->fence_reg = I915_FENCE_REG_NONE;
1822 list_del_init(&fence->lru_list);
1827 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
1829 if (obj->last_fenced_seqno) {
1830 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
1834 obj->last_fenced_seqno = 0;
1837 /* Ensure that all CPU reads are completed before installing a fence
1838 * and all writes before removing the fence.
1840 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
1843 obj->fenced_gpu_access = false;
1848 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
1850 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1853 ret = i915_gem_object_flush_fence(obj);
1857 if (obj->fence_reg == I915_FENCE_REG_NONE)
1860 i915_gem_object_update_fence(obj,
1861 &dev_priv->fence_regs[obj->fence_reg],
1863 i915_gem_object_fence_lost(obj);
1868 static struct drm_i915_fence_reg *
1869 i915_find_fence_reg(struct drm_device *dev)
1871 struct drm_i915_private *dev_priv = dev->dev_private;
1872 struct drm_i915_fence_reg *reg, *avail;
1875 /* First try to find a free reg */
1877 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1878 reg = &dev_priv->fence_regs[i];
1882 if (!reg->pin_count)
1889 /* None available, try to steal one or wait for a user to finish */
1890 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
1901 * i915_gem_object_get_fence - set up fencing for an object
1902 * @obj: object to map through a fence reg
1904 * When mapping objects through the GTT, userspace wants to be able to write
1905 * to them without having to worry about swizzling if the object is tiled.
1906 * This function walks the fence regs looking for a free one for @obj,
1907 * stealing one if it can't find any.
1909 * It then sets up the reg based on the object's properties: address, pitch
1910 * and tiling format.
1912 * For an untiled surface, this removes any existing fence.
1915 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
1917 struct drm_device *dev = obj->base.dev;
1918 struct drm_i915_private *dev_priv = dev->dev_private;
1919 bool enable = obj->tiling_mode != I915_TILING_NONE;
1920 struct drm_i915_fence_reg *reg;
1923 /* Have we updated the tiling parameters upon the object and so
1924 * will need to serialise the write to the associated fence register?
1926 if (obj->fence_dirty) {
1927 ret = i915_gem_object_flush_fence(obj);
1932 /* Just update our place in the LRU if our fence is getting reused. */
1933 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1934 reg = &dev_priv->fence_regs[obj->fence_reg];
1935 if (!obj->fence_dirty) {
1936 list_move_tail(®->lru_list,
1937 &dev_priv->mm.fence_list);
1940 } else if (enable) {
1941 reg = i915_find_fence_reg(dev);
1946 struct drm_i915_gem_object *old = reg->obj;
1948 ret = i915_gem_object_flush_fence(old);
1952 i915_gem_object_fence_lost(old);
1957 i915_gem_object_update_fence(obj, reg, enable);
1958 obj->fence_dirty = false;
1964 * Finds free space in the GTT aperture and binds the object there.
1967 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
1969 bool map_and_fenceable,
1972 struct drm_device *dev = obj->base.dev;
1973 drm_i915_private_t *dev_priv = dev->dev_private;
1974 struct drm_mm_node *free_space;
1975 uint32_t size, fence_size, fence_alignment, unfenced_alignment;
1976 bool mappable, fenceable;
1979 if (obj->madv != I915_MADV_WILLNEED) {
1980 DRM_ERROR("Attempting to bind a purgeable object\n");
1984 fence_size = i915_gem_get_gtt_size(dev, obj->base.size,
1986 fence_alignment = i915_gem_get_gtt_alignment(dev, obj->base.size,
1988 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(dev,
1989 obj->base.size, obj->tiling_mode);
1991 alignment = map_and_fenceable ? fence_alignment :
1993 if (map_and_fenceable && (alignment & (fence_alignment - 1)) != 0) {
1994 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1998 size = map_and_fenceable ? fence_size : obj->base.size;
2000 /* If the object is bigger than the entire aperture, reject it early
2001 * before evicting everything in a vain attempt to find space.
2003 if (obj->base.size > (map_and_fenceable ?
2004 dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2006 "Attempting to bind an object larger than the aperture\n");
2011 if (map_and_fenceable)
2012 free_space = drm_mm_search_free_in_range(
2013 &dev_priv->mm.gtt_space, size, alignment, 0,
2014 dev_priv->mm.gtt_mappable_end, 0);
2016 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2017 size, alignment, 0);
2018 if (free_space != NULL) {
2020 if (map_and_fenceable)
2021 obj->gtt_space = drm_mm_get_block_range_generic(
2022 free_space, size, alignment, color, 0,
2023 dev_priv->mm.gtt_mappable_end, 1);
2025 obj->gtt_space = drm_mm_get_block_generic(free_space,
2026 size, alignment, color, 1);
2028 if (obj->gtt_space == NULL) {
2029 ret = i915_gem_evict_something(dev, size, alignment,
2039 * NOTE: i915_gem_object_get_pages_gtt() cannot
2040 * return ENOMEM, since we used VM_ALLOC_RETRY.
2042 ret = i915_gem_object_get_pages_gtt(obj, 0);
2044 drm_mm_put_block(obj->gtt_space);
2045 obj->gtt_space = NULL;
2049 i915_gem_gtt_bind_object(obj, obj->cache_level);
2051 i915_gem_object_put_pages_gtt(obj);
2052 drm_mm_put_block(obj->gtt_space);
2053 obj->gtt_space = NULL;
2054 if (i915_gem_evict_everything(dev))
2059 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2060 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2062 obj->gtt_offset = obj->gtt_space->start;
2065 obj->gtt_space->size == fence_size &&
2066 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2069 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2070 obj->map_and_fenceable = mappable && fenceable;
2076 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2079 /* If we don't have a page list set up, then we're not pinned
2080 * to GPU, and we can ignore the cache flush because it'll happen
2081 * again at bind time.
2083 if (obj->pages == NULL)
2086 /* If the GPU is snooping the contents of the CPU cache,
2087 * we do not need to manually clear the CPU cache lines. However,
2088 * the caches are only snooped when the render cache is
2089 * flushed/invalidated. As we always have to emit invalidations
2090 * and flushes when moving into and out of the RENDER domain, correct
2091 * snooping behaviour occurs naturally as the result of our domain
2094 if (obj->cache_level != I915_CACHE_NONE)
2097 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2100 /** Flushes the GTT write domain for the object if it's dirty. */
2102 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2104 uint32_t old_write_domain;
2106 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2109 /* No actual flushing is required for the GTT write domain. Writes
2110 * to it immediately go to main memory as far as we know, so there's
2111 * no chipset flush. It also doesn't land in render cache.
2113 * However, we do have to enforce the order so that all writes through
2114 * the GTT land before any writes to the device, such as updates to
2119 old_write_domain = obj->base.write_domain;
2120 obj->base.write_domain = 0;
2123 /** Flushes the CPU write domain for the object if it's dirty. */
2125 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2127 uint32_t old_write_domain;
2129 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2132 i915_gem_clflush_object(obj);
2133 intel_gtt_chipset_flush();
2134 old_write_domain = obj->base.write_domain;
2135 obj->base.write_domain = 0;
2139 * Moves a single object to the GTT read, and possibly write domain.
2141 * This function returns when the move is complete, including waiting on
2145 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2147 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2148 uint32_t old_write_domain, old_read_domains;
2151 /* Not valid to be called on unbound objects. */
2152 if (obj->gtt_space == NULL)
2155 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2158 ret = i915_gem_object_wait_rendering(obj, !write);
2162 i915_gem_object_flush_cpu_write_domain(obj);
2164 old_write_domain = obj->base.write_domain;
2165 old_read_domains = obj->base.read_domains;
2167 /* It should now be out of any other write domains, and we can update
2168 * the domain values for our changes.
2170 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2171 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2173 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2174 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2178 /* And bump the LRU for this access */
2179 if (i915_gem_object_is_inactive(obj))
2180 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2185 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2186 enum i915_cache_level cache_level)
2188 struct drm_device *dev = obj->base.dev;
2189 drm_i915_private_t *dev_priv = dev->dev_private;
2192 if (obj->cache_level == cache_level)
2195 if (obj->pin_count) {
2196 DRM_DEBUG("can not change the cache level of pinned objects\n");
2200 if (obj->gtt_space) {
2201 ret = i915_gem_object_finish_gpu(obj);
2205 i915_gem_object_finish_gtt(obj);
2207 /* Before SandyBridge, you could not use tiling or fence
2208 * registers with snooped memory, so relinquish any fences
2209 * currently pointing to our region in the aperture.
2211 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2212 ret = i915_gem_object_put_fence(obj);
2217 if (obj->has_global_gtt_mapping)
2218 i915_gem_gtt_bind_object(obj, cache_level);
2219 if (obj->has_aliasing_ppgtt_mapping)
2220 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2224 if (cache_level == I915_CACHE_NONE) {
2225 u32 old_read_domains, old_write_domain;
2227 /* If we're coming from LLC cached, then we haven't
2228 * actually been tracking whether the data is in the
2229 * CPU cache or not, since we only allow one bit set
2230 * in obj->write_domain and have been skipping the clflushes.
2231 * Just set it to the CPU cache for now.
2233 KASSERT((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) == 0,
2234 ("obj %p in CPU write domain", obj));
2235 KASSERT((obj->base.read_domains & ~I915_GEM_DOMAIN_CPU) == 0,
2236 ("obj %p in CPU read domain", obj));
2238 old_read_domains = obj->base.read_domains;
2239 old_write_domain = obj->base.write_domain;
2241 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2242 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2246 obj->cache_level = cache_level;
2250 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
2251 struct drm_file *file)
2253 struct drm_i915_gem_caching *args = data;
2254 struct drm_i915_gem_object *obj;
2257 ret = i915_mutex_lock_interruptible(dev);
2261 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2262 if (&obj->base == NULL) {
2267 args->caching = obj->cache_level != I915_CACHE_NONE;
2269 drm_gem_object_unreference(&obj->base);
2275 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
2276 struct drm_file *file)
2278 struct drm_i915_gem_caching *args = data;
2279 struct drm_i915_gem_object *obj;
2280 enum i915_cache_level level;
2283 switch (args->caching) {
2284 case I915_CACHING_NONE:
2285 level = I915_CACHE_NONE;
2287 case I915_CACHING_CACHED:
2288 level = I915_CACHE_LLC;
2294 ret = i915_mutex_lock_interruptible(dev);
2298 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2299 if (&obj->base == NULL) {
2304 ret = i915_gem_object_set_cache_level(obj, level);
2306 drm_gem_object_unreference(&obj->base);
2313 * Prepare buffer for display plane (scanout, cursors, etc).
2314 * Can be called from an uninterruptible phase (modesetting) and allows
2315 * any flushes to be pipelined (for pageflips).
2318 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2320 struct intel_ring_buffer *pipelined)
2322 u32 old_read_domains, old_write_domain;
2325 if (pipelined != obj->ring) {
2326 ret = i915_gem_object_sync(obj, pipelined);
2331 /* The display engine is not coherent with the LLC cache on gen6. As
2332 * a result, we make sure that the pinning that is about to occur is
2333 * done with uncached PTEs. This is lowest common denominator for all
2336 * However for gen6+, we could do better by using the GFDT bit instead
2337 * of uncaching, which would allow us to flush all the LLC-cached data
2338 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
2340 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
2344 /* As the user may map the buffer once pinned in the display plane
2345 * (e.g. libkms for the bootup splash), we have to ensure that we
2346 * always use map_and_fenceable for all scanout buffers.
2348 ret = i915_gem_object_pin(obj, alignment, true, false);
2352 i915_gem_object_flush_cpu_write_domain(obj);
2354 old_write_domain = obj->base.write_domain;
2355 old_read_domains = obj->base.read_domains;
2357 /* It should now be out of any other write domains, and we can update
2358 * the domain values for our changes.
2360 obj->base.write_domain = 0;
2361 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2367 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
2371 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
2374 ret = i915_gem_object_wait_rendering(obj, false);
2378 /* Ensure that we invalidate the GPU's caches and TLBs. */
2379 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2384 * Moves a single object to the CPU read, and possibly write domain.
2386 * This function returns when the move is complete, including waiting on
2390 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2392 uint32_t old_write_domain, old_read_domains;
2395 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
2398 ret = i915_gem_object_wait_rendering(obj, !write);
2402 i915_gem_object_flush_gtt_write_domain(obj);
2404 old_write_domain = obj->base.write_domain;
2405 old_read_domains = obj->base.read_domains;
2407 /* Flush the CPU cache if it's still invalid. */
2408 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2409 i915_gem_clflush_object(obj);
2411 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2414 /* It should now be out of any other write domains, and we can update
2415 * the domain values for our changes.
2417 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2419 /* If we're writing through the CPU, then the GPU read domains will
2420 * need to be invalidated at next use.
2423 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2424 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2430 /* Throttle our rendering by waiting until the ring has completed our requests
2431 * emitted over 20 msec ago.
2433 * Note that if we were to use the current jiffies each time around the loop,
2434 * we wouldn't escape the function with any frames outstanding if the time to
2435 * render a frame was over 20ms.
2437 * This should get us reasonable parallelism between CPU and GPU but also
2438 * relatively low latency when blocking on a particular request to finish.
2441 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2443 struct drm_i915_private *dev_priv = dev->dev_private;
2444 struct drm_i915_file_private *file_priv = file->driver_priv;
2445 unsigned long recent_enough = ticks - (20 * hz / 1000);
2446 struct drm_i915_gem_request *request;
2447 struct intel_ring_buffer *ring = NULL;
2451 if (atomic_read(&dev_priv->mm.wedged))
2454 spin_lock(&file_priv->mm.lock);
2455 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
2456 if (time_after_eq(request->emitted_jiffies, recent_enough))
2459 ring = request->ring;
2460 seqno = request->seqno;
2462 spin_unlock(&file_priv->mm.lock);
2467 ret = __wait_seqno(ring, seqno, true, NULL);
2470 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2476 i915_gem_object_pin(struct drm_i915_gem_object *obj,
2478 bool map_and_fenceable,
2483 if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
2486 if (obj->gtt_space != NULL) {
2487 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
2488 (map_and_fenceable && !obj->map_and_fenceable)) {
2489 WARN(obj->pin_count,
2490 "bo is already pinned with incorrect alignment:"
2491 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
2492 " obj->map_and_fenceable=%d\n",
2493 obj->gtt_offset, alignment,
2495 obj->map_and_fenceable);
2496 ret = i915_gem_object_unbind(obj);
2502 if (obj->gtt_space == NULL) {
2503 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2505 ret = i915_gem_object_bind_to_gtt(obj, alignment,
2511 if (!dev_priv->mm.aliasing_ppgtt)
2512 i915_gem_gtt_bind_object(obj, obj->cache_level);
2515 if (!obj->has_global_gtt_mapping && map_and_fenceable)
2516 i915_gem_gtt_bind_object(obj, obj->cache_level);
2519 obj->pin_mappable |= map_and_fenceable;
2525 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
2527 BUG_ON(obj->pin_count == 0);
2528 BUG_ON(obj->gtt_space == NULL);
2530 if (--obj->pin_count == 0)
2531 obj->pin_mappable = false;
2535 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2536 struct drm_file *file)
2538 struct drm_i915_gem_pin *args = data;
2539 struct drm_i915_gem_object *obj;
2542 ret = i915_mutex_lock_interruptible(dev);
2546 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2547 if (&obj->base == NULL) {
2552 if (obj->madv != I915_MADV_WILLNEED) {
2553 DRM_ERROR("Attempting to pin a purgeable buffer\n");
2558 if (obj->pin_filp != NULL && obj->pin_filp != file) {
2559 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2565 if (obj->user_pin_count == 0) {
2566 ret = i915_gem_object_pin(obj, args->alignment, true, false);
2571 obj->user_pin_count++;
2572 obj->pin_filp = file;
2574 /* XXX - flush the CPU caches for pinned objects
2575 * as the X server doesn't manage domains yet
2577 i915_gem_object_flush_cpu_write_domain(obj);
2578 args->offset = obj->gtt_offset;
2580 drm_gem_object_unreference(&obj->base);
2587 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2588 struct drm_file *file)
2590 struct drm_i915_gem_pin *args = data;
2591 struct drm_i915_gem_object *obj;
2594 ret = i915_mutex_lock_interruptible(dev);
2598 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2599 if (&obj->base == NULL) {
2604 if (obj->pin_filp != file) {
2605 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2610 obj->user_pin_count--;
2611 if (obj->user_pin_count == 0) {
2612 obj->pin_filp = NULL;
2613 i915_gem_object_unpin(obj);
2617 drm_gem_object_unreference(&obj->base);
2624 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2625 struct drm_file *file)
2627 struct drm_i915_gem_busy *args = data;
2628 struct drm_i915_gem_object *obj;
2631 ret = i915_mutex_lock_interruptible(dev);
2635 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
2636 if (&obj->base == NULL) {
2641 /* Count all active objects as busy, even if they are currently not used
2642 * by the gpu. Users of this interface expect objects to eventually
2643 * become non-busy without any further actions, therefore emit any
2644 * necessary flushes here.
2646 ret = i915_gem_object_flush_active(obj);
2648 args->busy = obj->active;
2650 args->busy |= intel_ring_flag(obj->ring) << 17;
2653 drm_gem_object_unreference(&obj->base);
2660 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2661 struct drm_file *file_priv)
2663 return i915_gem_ring_throttle(dev, file_priv);
2667 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2668 struct drm_file *file_priv)
2670 struct drm_i915_gem_madvise *args = data;
2671 struct drm_i915_gem_object *obj;
2674 switch (args->madv) {
2675 case I915_MADV_DONTNEED:
2676 case I915_MADV_WILLNEED:
2682 ret = i915_mutex_lock_interruptible(dev);
2686 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
2687 if (&obj->base == NULL) {
2692 if (obj->pin_count) {
2697 if (obj->madv != __I915_MADV_PURGED)
2698 obj->madv = args->madv;
2700 /* if the object is no longer attached, discard its backing storage */
2701 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
2702 i915_gem_object_truncate(obj);
2704 args->retained = obj->madv != __I915_MADV_PURGED;
2707 drm_gem_object_unreference(&obj->base);
2713 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2716 struct drm_i915_private *dev_priv;
2717 struct drm_i915_gem_object *obj;
2719 dev_priv = dev->dev_private;
2721 obj = kmalloc(sizeof(*obj), M_DRM, M_WAITOK | M_ZERO);
2723 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
2724 drm_free(obj, M_DRM);
2728 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2729 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2732 /* On some devices, we can have the GPU use the LLC (the CPU
2733 * cache) for about a 10% performance improvement
2734 * compared to uncached. Graphics requests other than
2735 * display scanout are coherent with the CPU in
2736 * accessing this cache. This means in this mode we
2737 * don't need to clflush on the CPU side, and on the
2738 * GPU side we only need to flush internal caches to
2739 * get data visible to the CPU.
2741 * However, we maintain the display planes as UC, and so
2742 * need to rebind when first used as such.
2744 obj->cache_level = I915_CACHE_LLC;
2746 obj->cache_level = I915_CACHE_NONE;
2747 obj->base.driver_private = NULL;
2748 obj->fence_reg = I915_FENCE_REG_NONE;
2749 INIT_LIST_HEAD(&obj->mm_list);
2750 INIT_LIST_HEAD(&obj->gtt_list);
2751 INIT_LIST_HEAD(&obj->ring_list);
2752 INIT_LIST_HEAD(&obj->exec_list);
2753 obj->madv = I915_MADV_WILLNEED;
2754 /* Avoid an unnecessary call to unbind on the first bind. */
2755 obj->map_and_fenceable = true;
2757 i915_gem_info_add_obj(dev_priv, size);
2762 int i915_gem_init_object(struct drm_gem_object *obj)
2769 void i915_gem_free_object(struct drm_gem_object *gem_obj)
2771 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
2772 struct drm_device *dev = obj->base.dev;
2773 drm_i915_private_t *dev_priv = dev->dev_private;
2776 i915_gem_detach_phys_object(dev, obj);
2779 if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
2780 bool was_interruptible;
2782 was_interruptible = dev_priv->mm.interruptible;
2783 dev_priv->mm.interruptible = false;
2785 WARN_ON(i915_gem_object_unbind(obj));
2787 dev_priv->mm.interruptible = was_interruptible;
2790 drm_gem_free_mmap_offset(&obj->base);
2792 drm_gem_object_release(&obj->base);
2793 i915_gem_info_remove_obj(dev_priv, obj->base.size);
2795 drm_free(obj->bit_17, M_DRM);
2796 drm_free(obj, M_DRM);
2800 i915_gem_do_init(struct drm_device *dev, unsigned long start,
2801 unsigned long mappable_end, unsigned long end)
2803 drm_i915_private_t *dev_priv;
2804 unsigned long mappable;
2807 dev_priv = dev->dev_private;
2808 mappable = min(end, mappable_end) - start;
2810 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
2812 dev_priv->mm.gtt_start = start;
2813 dev_priv->mm.gtt_mappable_end = mappable_end;
2814 dev_priv->mm.gtt_end = end;
2815 dev_priv->mm.gtt_total = end - start;
2816 dev_priv->mm.mappable_gtt_total = mappable;
2818 /* Take over this portion of the GTT */
2819 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
2820 device_printf(dev->dev,
2821 "taking over the fictitious range 0x%lx-0x%lx\n",
2822 dev->agp->base + start, dev->agp->base + start + mappable);
2823 error = -vm_phys_fictitious_reg_range(dev->agp->base + start,
2824 dev->agp->base + start + mappable, VM_MEMATTR_WRITE_COMBINING);
2829 i915_gem_idle(struct drm_device *dev)
2831 drm_i915_private_t *dev_priv = dev->dev_private;
2836 if (dev_priv->mm.suspended) {
2841 ret = i915_gpu_idle(dev);
2846 i915_gem_retire_requests(dev);
2848 /* Under UMS, be paranoid and evict. */
2849 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2850 i915_gem_evict_everything(dev);
2852 i915_gem_reset_fences(dev);
2854 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2855 * We need to replace this with a semaphore, or something.
2856 * And not confound mm.suspended!
2858 dev_priv->mm.suspended = 1;
2859 del_timer_sync(&dev_priv->hangcheck_timer);
2861 i915_kernel_lost_context(dev);
2862 i915_gem_cleanup_ringbuffer(dev);
2866 /* Cancel the retire work handler, which should be idle now. */
2867 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2872 void i915_gem_l3_remap(struct drm_device *dev)
2874 drm_i915_private_t *dev_priv = dev->dev_private;
2878 if (!HAS_L3_GPU_CACHE(dev))
2881 if (!dev_priv->l3_parity.remap_info)
2884 misccpctl = I915_READ(GEN7_MISCCPCTL);
2885 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
2886 POSTING_READ(GEN7_MISCCPCTL);
2888 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
2889 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
2890 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
2891 DRM_DEBUG("0x%x was already programmed to %x\n",
2892 GEN7_L3LOG_BASE + i, remap);
2893 if (remap && !dev_priv->l3_parity.remap_info[i/4])
2894 DRM_DEBUG_DRIVER("Clearing remapped register\n");
2895 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
2898 /* Make sure all the writes land before disabling dop clock gating */
2899 POSTING_READ(GEN7_L3LOG_BASE);
2901 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
2904 void i915_gem_init_swizzling(struct drm_device *dev)
2906 drm_i915_private_t *dev_priv = dev->dev_private;
2908 if (INTEL_INFO(dev)->gen < 5 ||
2909 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
2912 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
2913 DISP_TILE_SURFACE_SWIZZLING);
2918 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
2920 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
2922 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
2926 intel_enable_blt(struct drm_device *dev)
2933 /* The blitter was dysfunctional on early prototypes */
2934 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
2935 if (IS_GEN6(dev) && revision < 8) {
2936 DRM_INFO("BLT not supported on this pre-production hardware;"
2937 " graphics performance will be degraded.\n");
2945 i915_gem_init_hw(struct drm_device *dev)
2947 drm_i915_private_t *dev_priv = dev->dev_private;
2950 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
2951 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
2953 i915_gem_l3_remap(dev);
2955 i915_gem_init_swizzling(dev);
2957 ret = intel_init_render_ring_buffer(dev);
2962 ret = intel_init_bsd_ring_buffer(dev);
2964 goto cleanup_render_ring;
2967 if (intel_enable_blt(dev)) {
2968 ret = intel_init_blt_ring_buffer(dev);
2970 goto cleanup_bsd_ring;
2973 dev_priv->next_seqno = 1;
2976 * XXX: There was some w/a described somewhere suggesting loading
2977 * contexts before PPGTT.
2979 i915_gem_context_init(dev);
2980 i915_gem_init_ppgtt(dev);
2985 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
2986 cleanup_render_ring:
2987 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
2992 intel_enable_ppgtt(struct drm_device *dev)
2994 if (i915_enable_ppgtt >= 0)
2995 return i915_enable_ppgtt;
2997 /* Disable ppgtt on SNB if VT-d is on. */
2998 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_enabled)
3004 int i915_gem_init(struct drm_device *dev)
3006 struct drm_i915_private *dev_priv = dev->dev_private;
3007 unsigned long prealloc_size, gtt_size, mappable_size;
3010 prealloc_size = dev_priv->mm.gtt->stolen_size;
3011 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3012 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3014 /* Basic memrange allocator for stolen space */
3015 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
3018 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
3019 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3020 * aperture accordingly when using aliasing ppgtt. */
3021 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3022 /* For paranoia keep the guard page in between. */
3023 gtt_size -= PAGE_SIZE;
3025 i915_gem_do_init(dev, 0, mappable_size, gtt_size);
3027 ret = i915_gem_init_aliasing_ppgtt(dev);
3033 /* Let GEM Manage all of the aperture.
3035 * However, leave one page at the end still bound to the scratch
3036 * page. There are a number of places where the hardware
3037 * apparently prefetches past the end of the object, and we've
3038 * seen multiple hangs with the GPU head pointer stuck in a
3039 * batchbuffer bound at the last page of the aperture. One page
3040 * should be enough to keep any prefetching inside of the
3043 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
3046 ret = i915_gem_init_hw(dev);
3049 i915_gem_cleanup_aliasing_ppgtt(dev);
3054 /* Try to set up FBC with a reasonable compressed buffer size */
3055 if (I915_HAS_FBC(dev) && i915_powersave) {
3058 /* Leave 1M for line length buffer & misc. */
3060 /* Try to get a 32M buffer... */
3061 if (prealloc_size > (36*1024*1024))
3062 cfb_size = 32*1024*1024;
3063 else /* fall back to 7/8 of the stolen space */
3064 cfb_size = prealloc_size * 7 / 8;
3065 i915_setup_compression(dev, cfb_size);
3069 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
3070 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3071 dev_priv->dri1.allow_batchbuffer = 1;
3076 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3078 drm_i915_private_t *dev_priv = dev->dev_private;
3079 struct intel_ring_buffer *ring;
3082 for_each_ring(ring, dev_priv, i)
3083 intel_cleanup_ring_buffer(ring);
3087 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3088 struct drm_file *file_priv)
3090 drm_i915_private_t *dev_priv = dev->dev_private;
3093 if (drm_core_check_feature(dev, DRIVER_MODESET))
3096 if (atomic_read(&dev_priv->mm.wedged)) {
3097 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3098 atomic_set(&dev_priv->mm.wedged, 0);
3102 dev_priv->mm.suspended = 0;
3104 ret = i915_gem_init_hw(dev);
3110 KASSERT(list_empty(&dev_priv->mm.active_list), ("active list"));
3113 ret = drm_irq_install(dev);
3115 goto cleanup_ringbuffer;
3121 i915_gem_cleanup_ringbuffer(dev);
3122 dev_priv->mm.suspended = 1;
3129 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3130 struct drm_file *file_priv)
3132 if (drm_core_check_feature(dev, DRIVER_MODESET))
3135 drm_irq_uninstall(dev);
3136 return i915_gem_idle(dev);
3140 i915_gem_lastclose(struct drm_device *dev)
3144 if (drm_core_check_feature(dev, DRIVER_MODESET))
3147 ret = i915_gem_idle(dev);
3149 DRM_ERROR("failed to idle hardware: %d\n", ret);
3153 init_ring_lists(struct intel_ring_buffer *ring)
3155 INIT_LIST_HEAD(&ring->active_list);
3156 INIT_LIST_HEAD(&ring->request_list);
3160 i915_gem_load(struct drm_device *dev)
3163 drm_i915_private_t *dev_priv = dev->dev_private;
3165 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3166 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3167 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3168 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3169 for (i = 0; i < I915_NUM_RINGS; i++)
3170 init_ring_lists(&dev_priv->ring[i]);
3171 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3172 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3173 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3174 i915_gem_retire_work_handler);
3175 init_completion(&dev_priv->error_completion);
3177 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3179 I915_WRITE(MI_ARB_STATE,
3180 _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
3183 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3185 /* Old X drivers will take 0-2 for front, back, depth buffers */
3186 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3187 dev_priv->fence_reg_start = 3;
3189 if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3190 dev_priv->num_fence_regs = 16;
3192 dev_priv->num_fence_regs = 8;
3194 /* Initialize fence registers to zero */
3195 i915_gem_reset_fences(dev);
3197 i915_gem_detect_bit_6_swizzle(dev);
3198 init_waitqueue_head(&dev_priv->pending_flip_queue);
3200 dev_priv->mm.interruptible = true;
3203 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3204 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3205 register_shrinker(&dev_priv->mm.inactive_shrinker);
3207 dev_priv->mm.i915_lowmem = EVENTHANDLER_REGISTER(vm_lowmem,
3208 i915_gem_lowmem, dev, EVENTHANDLER_PRI_ANY);
3213 * Create a physically contiguous memory object for this object
3214 * e.g. for cursor + overlay regs
3216 static int i915_gem_init_phys_object(struct drm_device *dev,
3217 int id, int size, int align)
3219 drm_i915_private_t *dev_priv = dev->dev_private;
3220 struct drm_i915_gem_phys_object *phys_obj;
3223 if (dev_priv->mm.phys_objs[id - 1] || !size)
3226 phys_obj = kmalloc(sizeof(struct drm_i915_gem_phys_object), M_DRM,
3233 phys_obj->handle = drm_pci_alloc(dev, size, align, ~0);
3234 if (!phys_obj->handle) {
3238 pmap_change_attr((vm_offset_t)phys_obj->handle->vaddr,
3239 size / PAGE_SIZE, PAT_WRITE_COMBINING);
3241 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3246 drm_free(phys_obj, M_DRM);
3250 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3252 drm_i915_private_t *dev_priv = dev->dev_private;
3253 struct drm_i915_gem_phys_object *phys_obj;
3255 if (!dev_priv->mm.phys_objs[id - 1])
3258 phys_obj = dev_priv->mm.phys_objs[id - 1];
3259 if (phys_obj->cur_obj) {
3260 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3263 drm_pci_free(dev, phys_obj->handle);
3264 drm_free(phys_obj, M_DRM);
3265 dev_priv->mm.phys_objs[id - 1] = NULL;
3268 void i915_gem_free_all_phys_object(struct drm_device *dev)
3272 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3273 i915_gem_free_phys_object(dev, i);
3276 void i915_gem_detach_phys_object(struct drm_device *dev,
3277 struct drm_i915_gem_object *obj)
3286 vaddr = obj->phys_obj->handle->vaddr;
3288 page_count = obj->base.size / PAGE_SIZE;
3289 VM_OBJECT_LOCK(obj->base.vm_obj);
3290 for (i = 0; i < page_count; i++) {
3291 m = i915_gem_wire_page(obj->base.vm_obj, i);
3295 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3296 sf = sf_buf_alloc(m);
3298 dst = (char *)sf_buf_kva(sf);
3299 memcpy(dst, vaddr + IDX_TO_OFF(i), PAGE_SIZE);
3302 drm_clflush_pages(&m, 1);
3304 VM_OBJECT_LOCK(obj->base.vm_obj);
3305 vm_page_reference(m);
3307 vm_page_busy_wait(m, FALSE, "i915gem");
3308 vm_page_unwire(m, 0);
3311 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3312 intel_gtt_chipset_flush();
3314 obj->phys_obj->cur_obj = NULL;
3315 obj->phys_obj = NULL;
3319 i915_gem_attach_phys_object(struct drm_device *dev,
3320 struct drm_i915_gem_object *obj,
3324 drm_i915_private_t *dev_priv = dev->dev_private;
3328 int i, page_count, ret;
3330 if (id > I915_MAX_PHYS_OBJECT)
3333 if (obj->phys_obj) {
3334 if (obj->phys_obj->id == id)
3336 i915_gem_detach_phys_object(dev, obj);
3339 /* create a new object */
3340 if (!dev_priv->mm.phys_objs[id - 1]) {
3341 ret = i915_gem_init_phys_object(dev, id,
3342 obj->base.size, align);
3344 DRM_ERROR("failed to init phys object %d size: %zu\n",
3345 id, obj->base.size);
3350 /* bind to the object */
3351 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
3352 obj->phys_obj->cur_obj = obj;
3354 page_count = obj->base.size / PAGE_SIZE;
3356 VM_OBJECT_LOCK(obj->base.vm_obj);
3358 for (i = 0; i < page_count; i++) {
3359 m = i915_gem_wire_page(obj->base.vm_obj, i);
3364 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3365 sf = sf_buf_alloc(m);
3366 src = (char *)sf_buf_kva(sf);
3367 dst = (char *)obj->phys_obj->handle->vaddr + IDX_TO_OFF(i);
3368 memcpy(dst, src, PAGE_SIZE);
3371 VM_OBJECT_LOCK(obj->base.vm_obj);
3373 vm_page_reference(m);
3374 vm_page_busy_wait(m, FALSE, "i915gem");
3375 vm_page_unwire(m, 0);
3378 VM_OBJECT_UNLOCK(obj->base.vm_obj);
3384 i915_gem_phys_pwrite(struct drm_device *dev,
3385 struct drm_i915_gem_object *obj,
3389 struct drm_file *file_priv)
3391 char *user_data, *vaddr;
3394 vaddr = (char *)obj->phys_obj->handle->vaddr + offset;
3395 user_data = (char *)(uintptr_t)data_ptr;
3397 if (copyin_nofault(user_data, vaddr, size) != 0) {
3398 /* The physical object once assigned is fixed for the lifetime
3399 * of the obj, so we can safely drop the lock and continue
3403 ret = -copyin(user_data, vaddr, size);
3409 intel_gtt_chipset_flush();
3413 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
3415 struct drm_i915_file_private *file_priv = file->driver_priv;
3417 /* Clean up our request list when the client is going away, so that
3418 * later retire_requests won't dereference our soon-to-be-gone
3421 spin_lock(&file_priv->mm.lock);
3422 while (!list_empty(&file_priv->mm.request_list)) {
3423 struct drm_i915_gem_request *request;
3425 request = list_first_entry(&file_priv->mm.request_list,
3426 struct drm_i915_gem_request,
3428 list_del(&request->client_list);
3429 request->file_priv = NULL;
3431 spin_unlock(&file_priv->mm.lock);
3435 i915_gem_swap_io(struct drm_device *dev, struct drm_i915_gem_object *obj,
3436 uint64_t data_ptr, uint64_t size, uint64_t offset, enum uio_rw rw,
3437 struct drm_file *file)
3444 int cnt, do_bit17_swizzling, length, obj_po, ret, swizzled_po;
3446 if (obj->gtt_offset != 0 && rw == UIO_READ)
3447 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
3449 do_bit17_swizzling = 0;
3452 vm_obj = obj->base.vm_obj;
3455 VM_OBJECT_LOCK(vm_obj);
3456 vm_object_pip_add(vm_obj, 1);
3458 obj_pi = OFF_TO_IDX(offset);
3459 obj_po = offset & PAGE_MASK;
3461 m = i915_gem_wire_page(vm_obj, obj_pi);
3462 VM_OBJECT_UNLOCK(vm_obj);
3464 sf = sf_buf_alloc(m);
3465 mkva = sf_buf_kva(sf);
3466 length = min(size, PAGE_SIZE - obj_po);
3467 while (length > 0) {
3468 if (do_bit17_swizzling &&
3469 (VM_PAGE_TO_PHYS(m) & (1 << 17)) != 0) {
3470 cnt = roundup2(obj_po + 1, 64);
3471 cnt = min(cnt - obj_po, length);
3472 swizzled_po = obj_po ^ 64;
3475 swizzled_po = obj_po;
3478 ret = -copyout_nofault(
3479 (char *)mkva + swizzled_po,
3480 (void *)(uintptr_t)data_ptr, cnt);
3482 ret = -copyin_nofault(
3483 (void *)(uintptr_t)data_ptr,
3484 (char *)mkva + swizzled_po, cnt);
3494 VM_OBJECT_LOCK(vm_obj);
3495 if (rw == UIO_WRITE)
3497 vm_page_reference(m);
3498 vm_page_busy_wait(m, FALSE, "i915gem");
3499 vm_page_unwire(m, 1);
3505 vm_object_pip_wakeup(vm_obj);
3506 VM_OBJECT_UNLOCK(vm_obj);
3512 i915_gem_gtt_write(struct drm_device *dev, struct drm_i915_gem_object *obj,
3513 uint64_t data_ptr, uint64_t size, uint64_t offset, struct drm_file *file)
3519 * Pass the unaligned physical address and size to pmap_mapdev_attr()
3520 * so it can properly calculate whether an extra page needs to be
3521 * mapped or not to cover the requested range. The function will
3522 * add the page offset into the returned mkva for us.
3524 mkva = (vm_offset_t)pmap_mapdev_attr(dev->agp->base + obj->gtt_offset +
3525 offset, size, PAT_WRITE_COMBINING);
3526 ret = -copyin_nofault((void *)(uintptr_t)data_ptr, (char *)mkva, size);
3527 pmap_unmapdev(mkva, size);
3532 i915_gem_obj_io(struct drm_device *dev, uint32_t handle, uint64_t data_ptr,
3533 uint64_t size, uint64_t offset, enum uio_rw rw, struct drm_file *file)
3535 struct drm_i915_gem_object *obj;
3537 vm_offset_t start, end;
3542 start = trunc_page(data_ptr);
3543 end = round_page(data_ptr + size);
3544 npages = howmany(end - start, PAGE_SIZE);
3545 ma = kmalloc(npages * sizeof(vm_page_t), M_DRM, M_WAITOK |
3547 npages = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
3548 (vm_offset_t)data_ptr, size,
3549 (rw == UIO_READ ? VM_PROT_WRITE : 0 ) | VM_PROT_READ, ma, npages);
3555 ret = i915_mutex_lock_interruptible(dev);
3559 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
3560 if (&obj->base == NULL) {
3564 if (offset > obj->base.size || size > obj->base.size - offset) {
3569 if (rw == UIO_READ) {
3570 ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
3573 if (obj->phys_obj) {
3574 ret = i915_gem_phys_pwrite(dev, obj, data_ptr, offset,
3576 } else if (obj->gtt_space &&
3577 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
3578 ret = i915_gem_object_pin(obj, 0, true, false);
3581 ret = i915_gem_object_set_to_gtt_domain(obj, true);
3584 ret = i915_gem_object_put_fence(obj);
3587 ret = i915_gem_gtt_write(dev, obj, data_ptr, size,
3590 i915_gem_object_unpin(obj);
3592 ret = i915_gem_object_set_to_cpu_domain(obj, true);
3595 ret = i915_gem_swap_io(dev, obj, data_ptr, size, offset,
3600 drm_gem_object_unreference(&obj->base);
3604 vm_page_unhold_pages(ma, npages);
3606 drm_free(ma, M_DRM);
3611 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
3612 vm_ooffset_t foff, struct ucred *cred, u_short *color)
3615 *color = 0; /* XXXKIB */
3622 i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
3625 struct drm_gem_object *gem_obj;
3626 struct drm_i915_gem_object *obj;
3627 struct drm_device *dev;
3628 drm_i915_private_t *dev_priv;
3633 gem_obj = vm_obj->handle;
3634 obj = to_intel_bo(gem_obj);
3635 dev = obj->base.dev;
3636 dev_priv = dev->dev_private;
3638 write = (prot & VM_PROT_WRITE) != 0;
3642 vm_object_pip_add(vm_obj, 1);
3645 * Remove the placeholder page inserted by vm_fault() from the
3646 * object before dropping the object lock. If
3647 * i915_gem_release_mmap() is active in parallel on this gem
3648 * object, then it owns the drm device sx and might find the
3649 * placeholder already. Then, since the page is busy,
3650 * i915_gem_release_mmap() sleeps waiting for the busy state
3651 * of the page cleared. We will be not able to acquire drm
3652 * device lock until i915_gem_release_mmap() is able to make a
3655 if (*mres != NULL) {
3657 vm_page_remove(oldm);
3662 VM_OBJECT_UNLOCK(vm_obj);
3668 ret = i915_mutex_lock_interruptible(dev);
3677 * Since the object lock was dropped, other thread might have
3678 * faulted on the same GTT address and instantiated the
3679 * mapping for the page. Recheck.
3681 VM_OBJECT_LOCK(vm_obj);
3682 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
3684 if ((m->flags & PG_BUSY) != 0) {
3687 vm_page_sleep(m, "915pee");
3693 VM_OBJECT_UNLOCK(vm_obj);
3695 /* Now bind it into the GTT if needed */
3696 if (!obj->map_and_fenceable) {
3697 ret = i915_gem_object_unbind(obj);
3703 if (!obj->gtt_space) {
3704 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
3710 ret = i915_gem_object_set_to_gtt_domain(obj, write);
3717 if (obj->tiling_mode == I915_TILING_NONE)
3718 ret = i915_gem_object_put_fence(obj);
3720 ret = i915_gem_object_get_fence(obj);
3726 if (i915_gem_object_is_inactive(obj))
3727 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3729 obj->fault_mappable = true;
3730 VM_OBJECT_LOCK(vm_obj);
3731 m = vm_phys_fictitious_to_vm_page(dev->agp->base + obj->gtt_offset +
3738 KASSERT((m->flags & PG_FICTITIOUS) != 0,
3739 ("not fictitious %p", m));
3740 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));
3742 if ((m->flags & PG_BUSY) != 0) {
3745 vm_page_sleep(m, "915pbs");
3749 m->valid = VM_PAGE_BITS_ALL;
3750 vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
3753 vm_page_busy_try(m, false);
3759 vm_object_pip_wakeup(vm_obj);
3760 return (VM_PAGER_OK);
3765 KASSERT(ret != 0, ("i915_gem_pager_fault: wrong return"));
3766 if (ret == -EAGAIN || ret == -EIO || ret == -EINTR) {
3767 goto unlocked_vmobj;
3769 VM_OBJECT_LOCK(vm_obj);
3770 vm_object_pip_wakeup(vm_obj);
3771 return (VM_PAGER_ERROR);
3775 i915_gem_pager_dtor(void *handle)
3777 struct drm_gem_object *obj;
3778 struct drm_device *dev;
3784 drm_gem_free_mmap_offset(obj);
3785 i915_gem_release_mmap(to_intel_bo(obj));
3786 drm_gem_object_unreference(obj);
3790 struct cdev_pager_ops i915_gem_pager_ops = {
3791 .cdev_pg_fault = i915_gem_pager_fault,
3792 .cdev_pg_ctor = i915_gem_pager_ctor,
3793 .cdev_pg_dtor = i915_gem_pager_dtor
3796 #define GEM_PARANOID_CHECK_GTT 0
3797 #if GEM_PARANOID_CHECK_GTT
3799 i915_gem_assert_pages_not_mapped(struct drm_device *dev, vm_page_t *ma,
3802 struct drm_i915_private *dev_priv;
3804 unsigned long start, end;
3808 dev_priv = dev->dev_private;
3809 start = OFF_TO_IDX(dev_priv->mm.gtt_start);
3810 end = OFF_TO_IDX(dev_priv->mm.gtt_end);
3811 for (i = start; i < end; i++) {
3812 pa = intel_gtt_read_pte_paddr(i);
3813 for (j = 0; j < page_count; j++) {
3814 if (pa == VM_PAGE_TO_PHYS(ma[j])) {
3815 panic("Page %p in GTT pte index %d pte %x",
3816 ma[i], i, intel_gtt_read_pte(i));
3823 #define VM_OBJECT_LOCK_ASSERT_OWNED(object)
3826 i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
3831 VM_OBJECT_LOCK_ASSERT_OWNED(object);
3832 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
3833 if (m->valid != VM_PAGE_BITS_ALL) {
3834 if (vm_pager_has_page(object, pindex)) {
3835 rv = vm_pager_get_page(object, &m, 1);
3836 m = vm_page_lookup(object, pindex);
3839 if (rv != VM_PAGER_OK) {
3844 pmap_zero_page(VM_PAGE_TO_PHYS(m));
3845 m->valid = VM_PAGE_BITS_ALL;
3855 i915_gpu_is_active(struct drm_device *dev)
3857 drm_i915_private_t *dev_priv = dev->dev_private;
3859 return !list_empty(&dev_priv->mm.active_list);
3863 i915_gem_lowmem(void *arg)
3865 struct drm_device *dev;
3866 struct drm_i915_private *dev_priv;
3867 struct drm_i915_gem_object *obj, *next;
3868 int cnt, cnt_fail, cnt_total;
3871 dev_priv = dev->dev_private;
3873 if (lockmgr(&dev->dev_struct_lock, LK_EXCLUSIVE|LK_NOWAIT))
3877 /* first scan for clean buffers */
3878 i915_gem_retire_requests(dev);
3880 cnt_total = cnt_fail = cnt = 0;
3882 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3884 if (i915_gem_object_is_purgeable(obj)) {
3885 if (i915_gem_object_unbind(obj) != 0)
3891 /* second pass, evict/count anything still on the inactive list */
3892 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list,
3894 if (i915_gem_object_unbind(obj) == 0)
3900 if (cnt_fail > cnt_total / 100 && i915_gpu_is_active(dev)) {
3902 * We are desperate for pages, so as a last resort, wait
3903 * for the GPU to finish and discard whatever we can.
3904 * This has a dramatic impact to reduce the number of
3905 * OOM-killer events whilst running the GPU aggressively.
3907 if (i915_gpu_idle(dev) == 0)
3914 i915_gem_unload(struct drm_device *dev)
3916 struct drm_i915_private *dev_priv;
3918 dev_priv = dev->dev_private;
3919 EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);