2 * Copyright © 2008,2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35 #include <linux/uaccess.h>
36 #include <asm/cpufeature.h>
38 #define __EXEC_OBJECT_HAS_PIN (1<<31)
39 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
40 #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
41 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
42 #define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
44 #define BATCH_OFFSET_BIAS (256*1024)
47 struct list_head vmas;
50 struct i915_vma *lut[0];
51 struct hlist_head buckets[0];
55 static struct eb_vmas *
56 eb_create(struct drm_i915_gem_execbuffer2 *args)
58 struct eb_vmas *eb = NULL;
60 if (args->flags & I915_EXEC_HANDLE_LUT) {
61 unsigned size = args->buffer_count;
62 size *= sizeof(struct i915_vma *);
63 size += sizeof(struct eb_vmas);
64 eb = kmalloc(size, M_DRM, M_NOWAIT);
68 unsigned size = args->buffer_count;
69 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
70 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
71 while (count > 2*size)
73 eb = kzalloc(count*sizeof(struct hlist_head) +
74 sizeof(struct eb_vmas),
81 eb->and = -args->buffer_count;
83 INIT_LIST_HEAD(&eb->vmas);
88 eb_reset(struct eb_vmas *eb)
91 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
95 eb_lookup_vmas(struct eb_vmas *eb,
96 struct drm_i915_gem_exec_object2 *exec,
97 const struct drm_i915_gem_execbuffer2 *args,
98 struct i915_address_space *vm,
99 struct drm_file *file)
101 struct drm_i915_gem_object *obj;
102 struct list_head objects;
105 INIT_LIST_HEAD(&objects);
106 lockmgr(&file->table_lock, LK_EXCLUSIVE);
107 /* Grab a reference to the object and release the lock so we can lookup
108 * or create the VMA without using GFP_ATOMIC */
109 for (i = 0; i < args->buffer_count; i++) {
110 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
112 lockmgr(&file->table_lock, LK_RELEASE);
113 DRM_DEBUG("Invalid object handle %d at index %d\n",
119 if (!list_empty(&obj->obj_exec_link)) {
120 lockmgr(&file->table_lock, LK_RELEASE);
121 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
122 obj, exec[i].handle, i);
127 i915_gem_object_get(obj);
128 list_add_tail(&obj->obj_exec_link, &objects);
130 lockmgr(&file->table_lock, LK_RELEASE);
133 while (!list_empty(&objects)) {
134 struct i915_vma *vma;
136 obj = list_first_entry(&objects,
137 struct drm_i915_gem_object,
141 * NOTE: We can leak any vmas created here when something fails
142 * later on. But that's no issue since vma_unbind can deal with
143 * vmas which are not actually bound. And since only
144 * lookup_or_create exists as an interface to get at the vma
145 * from the (obj, vm) we don't run the risk of creating
146 * duplicated vmas for the same vm.
148 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
150 DRM_DEBUG("Failed to lookup VMA\n");
155 /* Transfer ownership from the objects list to the vmas list. */
156 list_add_tail(&vma->exec_list, &eb->vmas);
157 list_del_init(&obj->obj_exec_link);
159 vma->exec_entry = &exec[i];
163 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
164 vma->exec_handle = handle;
165 hlist_add_head(&vma->exec_node,
166 &eb->buckets[handle & eb->and]);
175 while (!list_empty(&objects)) {
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
179 list_del_init(&obj->obj_exec_link);
180 i915_gem_object_put(obj);
183 * Objects already transfered to the vmas list will be unreferenced by
190 static inline struct i915_vma *
191 eb_get_batch_vma(struct eb_vmas *eb)
193 /* The batch is always the LAST item in the VMA list */
194 struct i915_vma *vma = list_last_entry(&eb->vmas, typeof(*vma), exec_list);
199 static struct drm_i915_gem_object *
200 eb_get_batch(struct eb_vmas *eb)
202 struct i915_vma *vma = eb_get_batch_vma(eb);
205 * SNA is doing fancy tricks with compressing batch buffers, which leads
206 * to negative relocation deltas. Usually that works out ok since the
207 * relocate address is still positive, except when the batch is placed
208 * very low in the GTT. Ensure this doesn't happen.
210 * Note that actual hangs have only been observed on gen7, but for
211 * paranoia do it everywhere.
213 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
214 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
219 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
222 if (handle >= -eb->and)
224 return eb->lut[handle];
226 struct hlist_head *head;
227 struct i915_vma *vma;
229 head = &eb->buckets[handle & eb->and];
230 hlist_for_each_entry(vma, head, exec_node) {
231 if (vma->exec_handle == handle)
239 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
241 struct drm_i915_gem_exec_object2 *entry;
242 struct drm_i915_gem_object *obj = vma->obj;
244 if (!drm_mm_node_allocated(&vma->node))
247 entry = vma->exec_entry;
249 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
250 i915_gem_object_unpin_fence(obj);
252 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
255 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
258 static void eb_destroy(struct eb_vmas *eb)
260 while (!list_empty(&eb->vmas)) {
261 struct i915_vma *vma;
263 vma = list_first_entry(&eb->vmas,
266 list_del_init(&vma->exec_list);
267 i915_gem_execbuffer_unreserve_vma(vma);
268 i915_gem_object_put(vma->obj);
273 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
275 return (HAS_LLC(obj->base.dev) ||
276 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
277 obj->cache_level != I915_CACHE_NONE);
280 /* Used to convert any address to canonical form.
281 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
282 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
283 * addresses to be in a canonical form:
284 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
285 * canonical form [63:48] == [47]."
287 #define GEN8_HIGH_ADDRESS_BIT 47
288 static inline uint64_t gen8_canonical_addr(uint64_t address)
290 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
293 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
295 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
298 static inline uint64_t
299 relocation_target(struct drm_i915_gem_relocation_entry *reloc,
300 uint64_t target_offset)
302 return gen8_canonical_addr((int)reloc->delta + target_offset);
306 relocate_entry_cpu(struct drm_i915_gem_object *obj,
307 struct drm_i915_gem_relocation_entry *reloc,
308 uint64_t target_offset)
310 struct drm_device *dev = obj->base.dev;
311 uint32_t page_offset = offset_in_page(reloc->offset);
312 uint64_t delta = relocation_target(reloc, target_offset);
316 ret = i915_gem_object_set_to_cpu_domain(obj, true);
320 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
321 reloc->offset >> PAGE_SHIFT));
322 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
324 if (INTEL_INFO(dev)->gen >= 8) {
325 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
327 if (page_offset == 0) {
328 kunmap_atomic(vaddr);
329 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
330 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
333 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
336 kunmap_atomic(vaddr);
342 relocate_entry_gtt(struct drm_i915_gem_object *obj,
343 struct drm_i915_gem_relocation_entry *reloc,
344 uint64_t target_offset)
346 struct drm_device *dev = obj->base.dev;
347 struct drm_i915_private *dev_priv = to_i915(dev);
348 struct i915_ggtt *ggtt = &dev_priv->ggtt;
349 uint64_t delta = relocation_target(reloc, target_offset);
351 void __iomem *reloc_page;
354 ret = i915_gem_object_set_to_gtt_domain(obj, true);
358 ret = i915_gem_object_put_fence(obj);
362 /* Map the page containing the relocation we're going to perform. */
363 offset = i915_gem_obj_ggtt_offset(obj);
364 offset += reloc->offset;
365 reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
366 offset & LINUX_PAGE_MASK);
367 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
369 if (INTEL_INFO(dev)->gen >= 8) {
370 offset += sizeof(uint32_t);
372 if (offset_in_page(offset) == 0) {
373 io_mapping_unmap_atomic(reloc_page);
375 io_mapping_map_atomic_wc(ggtt->mappable,
379 iowrite32(upper_32_bits(delta),
380 reloc_page + offset_in_page(offset));
383 io_mapping_unmap_atomic(reloc_page);
389 clflush_write32(void *addr, uint32_t value)
391 /* This is not a fast path, so KISS. */
392 drm_clflush_virt_range(addr, sizeof(uint32_t));
393 *(uint32_t *)addr = value;
394 drm_clflush_virt_range(addr, sizeof(uint32_t));
398 relocate_entry_clflush(struct drm_i915_gem_object *obj,
399 struct drm_i915_gem_relocation_entry *reloc,
400 uint64_t target_offset)
402 struct drm_device *dev = obj->base.dev;
403 uint32_t page_offset = offset_in_page(reloc->offset);
404 uint64_t delta = relocation_target(reloc, target_offset);
408 ret = i915_gem_object_set_to_gtt_domain(obj, true);
412 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
413 reloc->offset >> PAGE_SHIFT));
414 clflush_write32(vaddr + page_offset, lower_32_bits(delta));
416 if (INTEL_INFO(dev)->gen >= 8) {
417 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
419 if (page_offset == 0) {
420 kunmap_atomic(vaddr);
421 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj,
422 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
425 clflush_write32(vaddr + page_offset, upper_32_bits(delta));
428 kunmap_atomic(vaddr);
434 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
436 struct drm_i915_gem_relocation_entry *reloc)
438 struct drm_device *dev = obj->base.dev;
439 struct drm_gem_object *target_obj;
440 struct drm_i915_gem_object *target_i915_obj;
441 struct i915_vma *target_vma;
442 uint64_t target_offset;
445 /* we've already hold a reference to all valid objects */
446 target_vma = eb_get_vma(eb, reloc->target_handle);
447 if (unlikely(target_vma == NULL))
449 target_i915_obj = target_vma->obj;
450 target_obj = &target_vma->obj->base;
452 target_offset = gen8_canonical_addr(target_vma->node.start);
454 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
455 * pipe_control writes because the gpu doesn't properly redirect them
456 * through the ppgtt for non_secure batchbuffers. */
457 if (unlikely(IS_GEN6(dev) &&
458 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
459 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
461 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
465 /* Validate that the target is in a valid r/w GPU domain */
466 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
467 DRM_DEBUG("reloc with multiple write domains: "
468 "obj %p target %d offset %d "
469 "read %08x write %08x",
470 obj, reloc->target_handle,
473 reloc->write_domain);
476 if (unlikely((reloc->write_domain | reloc->read_domains)
477 & ~I915_GEM_GPU_DOMAINS)) {
478 DRM_DEBUG("reloc with read/write non-GPU domains: "
479 "obj %p target %d offset %d "
480 "read %08x write %08x",
481 obj, reloc->target_handle,
484 reloc->write_domain);
488 target_obj->pending_read_domains |= reloc->read_domains;
489 target_obj->pending_write_domain |= reloc->write_domain;
491 /* If the relocation already has the right value in it, no
492 * more work needs to be done.
494 if (target_offset == reloc->presumed_offset)
497 /* Check that the relocation address is valid... */
498 if (unlikely(reloc->offset >
499 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
500 DRM_DEBUG("Relocation beyond object bounds: "
501 "obj %p target %d offset %d size %d.\n",
502 obj, reloc->target_handle,
504 (int) obj->base.size);
507 if (unlikely(reloc->offset & 3)) {
508 DRM_DEBUG("Relocation not 4-byte aligned: "
509 "obj %p target %d offset %d.\n",
510 obj, reloc->target_handle,
511 (int) reloc->offset);
515 /* We can't wait for rendering with pagefaults disabled */
516 if (obj->active && (curthread->td_flags & TDF_NOFAULT))
519 if (use_cpu_reloc(obj))
520 ret = relocate_entry_cpu(obj, reloc, target_offset);
521 else if (obj->map_and_fenceable)
522 ret = relocate_entry_gtt(obj, reloc, target_offset);
523 else if (static_cpu_has(X86_FEATURE_CLFLUSH))
524 ret = relocate_entry_clflush(obj, reloc, target_offset);
526 WARN_ONCE(1, "Impossible case in relocation handling\n");
533 /* and update the user's relocation entry */
534 reloc->presumed_offset = target_offset;
540 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
543 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
544 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
545 struct drm_i915_gem_relocation_entry __user *user_relocs;
546 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
549 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
551 remain = entry->relocation_count;
553 struct drm_i915_gem_relocation_entry *r = stack_reloc;
555 if (count > ARRAY_SIZE(stack_reloc))
556 count = ARRAY_SIZE(stack_reloc);
559 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
563 u64 offset = r->presumed_offset;
565 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
569 if (r->presumed_offset != offset &&
570 __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
584 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
586 struct drm_i915_gem_relocation_entry *relocs)
588 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
591 for (i = 0; i < entry->relocation_count; i++) {
592 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
601 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
603 struct i915_vma *vma;
606 /* This is the fast path and we cannot handle a pagefault whilst
607 * holding the struct mutex lest the user pass in the relocations
608 * contained within a mmaped bo. For in such a case we, the page
609 * fault handler would call i915_gem_fault() and we would try to
610 * acquire the struct mutex again. Obviously this is bad and so
611 * lockdep complains vehemently.
614 list_for_each_entry(vma, &eb->vmas, exec_list) {
615 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
624 static bool only_mappable_for_reloc(unsigned int flags)
626 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
627 __EXEC_OBJECT_NEEDS_MAP;
631 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
632 struct intel_engine_cs *engine,
635 struct drm_i915_gem_object *obj = vma->obj;
636 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
641 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
644 if (!drm_mm_node_allocated(&vma->node)) {
645 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
646 * limit address to the first 4GBs for unflagged objects.
648 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
649 flags |= PIN_ZONE_4G;
650 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
651 flags |= PIN_GLOBAL | PIN_MAPPABLE;
652 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
653 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
654 if (entry->flags & EXEC_OBJECT_PINNED)
655 flags |= entry->offset | PIN_OFFSET_FIXED;
656 if ((flags & PIN_MAPPABLE) == 0)
660 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
661 if ((ret == -ENOSPC || ret == -E2BIG) &&
662 only_mappable_for_reloc(entry->flags))
663 ret = i915_gem_object_pin(obj, vma->vm,
665 flags & ~PIN_MAPPABLE);
669 entry->flags |= __EXEC_OBJECT_HAS_PIN;
671 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
672 ret = i915_gem_object_get_fence(obj);
676 if (i915_gem_object_pin_fence(obj))
677 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
680 if (entry->offset != vma->node.start) {
681 entry->offset = vma->node.start;
685 if (entry->flags & EXEC_OBJECT_WRITE) {
686 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
687 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
694 need_reloc_mappable(struct i915_vma *vma)
696 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
698 if (entry->relocation_count == 0)
704 /* See also use_cpu_reloc() */
705 if (HAS_LLC(vma->obj->base.dev))
708 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
715 eb_vma_misplaced(struct i915_vma *vma)
717 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
718 struct drm_i915_gem_object *obj = vma->obj;
720 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
722 if (entry->alignment &&
723 vma->node.start & (entry->alignment - 1))
726 if (entry->flags & EXEC_OBJECT_PINNED &&
727 vma->node.start != entry->offset)
730 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
731 vma->node.start < BATCH_OFFSET_BIAS)
734 /* avoid costly ping-pong once a batch bo ended up non-mappable */
735 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
736 return !only_mappable_for_reloc(entry->flags);
738 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
739 (vma->node.start + vma->node.size - 1) >> 32)
746 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
747 struct list_head *vmas,
748 struct i915_gem_context *ctx,
751 struct drm_i915_gem_object *obj;
752 struct i915_vma *vma;
753 struct i915_address_space *vm;
754 struct list_head ordered_vmas;
755 struct list_head pinned_vmas;
756 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
759 i915_gem_retire_requests_ring(engine);
761 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
763 INIT_LIST_HEAD(&ordered_vmas);
764 INIT_LIST_HEAD(&pinned_vmas);
765 while (!list_empty(vmas)) {
766 struct drm_i915_gem_exec_object2 *entry;
767 bool need_fence, need_mappable;
769 vma = list_first_entry(vmas, struct i915_vma, exec_list);
771 entry = vma->exec_entry;
773 if (ctx->flags & CONTEXT_NO_ZEROMAP)
774 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
776 if (!has_fenced_gpu_access)
777 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
779 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
780 obj->tiling_mode != I915_TILING_NONE;
781 need_mappable = need_fence || need_reloc_mappable(vma);
783 if (entry->flags & EXEC_OBJECT_PINNED)
784 list_move_tail(&vma->exec_list, &pinned_vmas);
785 else if (need_mappable) {
786 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
787 list_move(&vma->exec_list, &ordered_vmas);
789 list_move_tail(&vma->exec_list, &ordered_vmas);
791 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
792 obj->base.pending_write_domain = 0;
794 list_splice(&ordered_vmas, vmas);
795 list_splice(&pinned_vmas, vmas);
797 /* Attempt to pin all of the buffers into the GTT.
798 * This is done in 3 phases:
800 * 1a. Unbind all objects that do not match the GTT constraints for
801 * the execbuffer (fenceable, mappable, alignment etc).
802 * 1b. Increment pin count for already bound objects.
803 * 2. Bind new objects.
804 * 3. Decrement pin count.
806 * This avoid unnecessary unbinding of later objects in order to make
807 * room for the earlier objects *unless* we need to defragment.
813 /* Unbind any ill-fitting objects or pin. */
814 list_for_each_entry(vma, vmas, exec_list) {
815 if (!drm_mm_node_allocated(&vma->node))
818 if (eb_vma_misplaced(vma))
819 ret = i915_vma_unbind(vma);
821 ret = i915_gem_execbuffer_reserve_vma(vma,
828 /* Bind fresh objects */
829 list_for_each_entry(vma, vmas, exec_list) {
830 if (drm_mm_node_allocated(&vma->node))
833 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
840 if (ret != -ENOSPC || retry++)
843 /* Decrement pin count for bound objects */
844 list_for_each_entry(vma, vmas, exec_list)
845 i915_gem_execbuffer_unreserve_vma(vma);
847 ret = i915_gem_evict_vm(vm, true);
854 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
855 struct drm_i915_gem_execbuffer2 *args,
856 struct drm_file *file,
857 struct intel_engine_cs *engine,
859 struct drm_i915_gem_exec_object2 *exec,
860 struct i915_gem_context *ctx)
862 struct drm_i915_gem_relocation_entry *reloc;
863 struct i915_address_space *vm;
864 struct i915_vma *vma;
868 unsigned count = args->buffer_count;
870 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
872 /* We may process another execbuffer during the unlock... */
873 while (!list_empty(&eb->vmas)) {
874 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
875 list_del_init(&vma->exec_list);
876 i915_gem_execbuffer_unreserve_vma(vma);
877 i915_gem_object_put(vma->obj);
880 mutex_unlock(&dev->struct_mutex);
883 for (i = 0; i < count; i++)
884 total += exec[i].relocation_count;
886 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
887 reloc = drm_malloc_ab(total, sizeof(*reloc));
888 if (reloc == NULL || reloc_offset == NULL) {
889 drm_free_large(reloc);
890 drm_free_large(reloc_offset);
891 mutex_lock(&dev->struct_mutex);
896 for (i = 0; i < count; i++) {
897 struct drm_i915_gem_relocation_entry __user *user_relocs;
898 u64 invalid_offset = (u64)-1;
901 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
903 if (copy_from_user(reloc+total, user_relocs,
904 exec[i].relocation_count * sizeof(*reloc))) {
906 mutex_lock(&dev->struct_mutex);
910 /* As we do not update the known relocation offsets after
911 * relocating (due to the complexities in lock handling),
912 * we need to mark them as invalid now so that we force the
913 * relocation processing next time. Just in case the target
914 * object is evicted and then rebound into its old
915 * presumed_offset before the next execbuffer - if that
916 * happened we would make the mistake of assuming that the
917 * relocations were valid.
919 for (j = 0; j < exec[i].relocation_count; j++) {
920 if (__copy_to_user(&user_relocs[j].presumed_offset,
922 sizeof(invalid_offset))) {
924 mutex_lock(&dev->struct_mutex);
929 reloc_offset[i] = total;
930 total += exec[i].relocation_count;
933 ret = i915_mutex_lock_interruptible(dev);
935 mutex_lock(&dev->struct_mutex);
939 /* reacquire the objects */
941 ret = eb_lookup_vmas(eb, exec, args, vm, file);
945 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
946 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
951 list_for_each_entry(vma, &eb->vmas, exec_list) {
952 int offset = vma->exec_entry - exec;
953 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
954 reloc + reloc_offset[offset]);
959 /* Leave the user relocations as are, this is the painfully slow path,
960 * and we want to avoid the complication of dropping the lock whilst
961 * having buffers reserved in the aperture and so causing spurious
962 * ENOSPC for random operations.
966 drm_free_large(reloc);
967 drm_free_large(reloc_offset);
972 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
973 struct list_head *vmas)
975 const unsigned other_rings = ~intel_engine_flag(req->engine);
976 struct i915_vma *vma;
979 list_for_each_entry(vma, vmas, exec_list) {
980 struct drm_i915_gem_object *obj = vma->obj;
982 if (obj->active & other_rings) {
983 ret = i915_gem_object_sync(obj, req->engine, &req);
988 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
989 i915_gem_clflush_object(obj, false);
992 /* Unconditionally flush any chipset caches (for streaming writes). */
993 i915_gem_chipset_flush(req->engine->i915);
995 /* Unconditionally invalidate gpu caches and ensure that we do flush
996 * any residual writes from the previous batch.
998 return intel_ring_invalidate_all_caches(req);
1002 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1004 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1007 /* Kernel clipping was a DRI1 misfeature */
1008 if (exec->num_cliprects || exec->cliprects_ptr)
1011 if (exec->DR4 == 0xffffffff) {
1012 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1015 if (exec->DR1 || exec->DR4)
1018 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1025 validate_exec_list(struct drm_device *dev,
1026 struct drm_i915_gem_exec_object2 *exec,
1029 unsigned relocs_total = 0;
1030 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1031 unsigned invalid_flags;
1034 /* INTERNAL flags must not overlap with external ones */
1035 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1037 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1038 if (USES_FULL_PPGTT(dev))
1039 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1041 for (i = 0; i < count; i++) {
1042 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1043 int length; /* limited by fault_in_pages_readable() */
1045 if (exec[i].flags & invalid_flags)
1048 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1049 * any non-page-aligned or non-canonical addresses.
1051 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1052 if (exec[i].offset !=
1053 gen8_canonical_addr(exec[i].offset & I915_GTT_PAGE_MASK))
1056 /* From drm_mm perspective address space is continuous,
1057 * so from this point we're always using non-canonical
1060 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1063 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1066 /* First check for malicious input causing overflow in
1067 * the worst case where we need to allocate the entire
1068 * relocation tree as a single array.
1070 if (exec[i].relocation_count > relocs_max - relocs_total)
1072 relocs_total += exec[i].relocation_count;
1074 length = exec[i].relocation_count *
1075 sizeof(struct drm_i915_gem_relocation_entry);
1077 * We must check that the entire relocation array is safe
1078 * to read, but since we may need to update the presumed
1079 * offsets during execution, check for full write access.
1082 if (!access_ok(VERIFY_WRITE, ptr, length))
1086 if (likely(!i915.prefault_disable)) {
1087 if (fault_in_multipages_readable(ptr, length))
1095 static struct i915_gem_context *
1096 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1097 struct intel_engine_cs *engine, const u32 ctx_id)
1099 struct i915_gem_context *ctx = NULL;
1100 struct i915_ctx_hang_stats *hs;
1102 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1103 return ERR_PTR(-EINVAL);
1105 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1109 hs = &ctx->hang_stats;
1111 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1112 return ERR_PTR(-EIO);
1119 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1120 struct drm_i915_gem_request *req)
1122 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1123 struct i915_vma *vma;
1125 list_for_each_entry(vma, vmas, exec_list) {
1126 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1127 struct drm_i915_gem_object *obj = vma->obj;
1128 u32 old_read = obj->base.read_domains;
1129 u32 old_write = obj->base.write_domain;
1131 obj->dirty = 1; /* be paranoid */
1132 obj->base.write_domain = obj->base.pending_write_domain;
1133 if (obj->base.write_domain == 0)
1134 obj->base.pending_read_domains |= obj->base.read_domains;
1135 obj->base.read_domains = obj->base.pending_read_domains;
1137 i915_vma_move_to_active(vma, req);
1138 if (obj->base.write_domain) {
1139 i915_gem_request_assign(&obj->last_write_req, req);
1141 intel_fb_obj_invalidate(obj, ORIGIN_CS);
1143 /* update for the implicit flush after a batch */
1144 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1146 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1147 i915_gem_request_assign(&obj->last_fenced_req, req);
1148 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1149 struct drm_i915_private *dev_priv = engine->i915;
1150 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1151 &dev_priv->mm.fence_list);
1155 trace_i915_gem_object_change_domain(obj, old_read, old_write);
1160 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1162 /* Unconditionally force add_request to emit a full flush. */
1163 params->engine->gpu_caches_dirty = true;
1165 /* Add a breadcrumb for the completion of the batch buffer */
1166 __i915_add_request(params->request, params->batch_obj, true);
1170 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1171 struct drm_i915_gem_request *req)
1173 struct intel_engine_cs *engine = req->engine;
1174 struct drm_i915_private *dev_priv = to_i915(dev);
1177 if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
1178 DRM_DEBUG("sol reset is gen7/rcs only\n");
1182 ret = intel_ring_begin(req, 4 * 3);
1186 for (i = 0; i < 4; i++) {
1187 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
1188 intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
1189 intel_ring_emit(engine, 0);
1192 intel_ring_advance(engine);
1197 static struct drm_i915_gem_object*
1198 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1199 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1201 struct drm_i915_gem_object *batch_obj,
1202 u32 batch_start_offset,
1206 struct drm_i915_gem_object *shadow_batch_obj;
1207 struct i915_vma *vma;
1210 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1211 PAGE_ALIGN(batch_len));
1212 if (IS_ERR(shadow_batch_obj))
1213 return shadow_batch_obj;
1215 ret = i915_parse_cmds(engine,
1224 ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
1228 i915_gem_object_unpin_pages(shadow_batch_obj);
1230 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1232 vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
1233 vma->exec_entry = shadow_exec_entry;
1234 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1235 i915_gem_object_get(shadow_batch_obj);
1236 list_add_tail(&vma->exec_list, &eb->vmas);
1238 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1240 return shadow_batch_obj;
1243 i915_gem_object_unpin_pages(shadow_batch_obj);
1244 if (ret == -EACCES) /* unhandled chained batch */
1247 return ERR_PTR(ret);
1251 i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1252 struct drm_i915_gem_execbuffer2 *args,
1253 struct list_head *vmas)
1255 struct drm_device *dev = params->dev;
1256 struct intel_engine_cs *engine = params->engine;
1257 struct drm_i915_private *dev_priv = to_i915(dev);
1258 u64 exec_start, exec_len;
1263 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1267 ret = i915_switch_context(params->request);
1271 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
1272 "%s didn't clear reload\n", engine->name);
1274 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1275 instp_mask = I915_EXEC_CONSTANTS_MASK;
1276 switch (instp_mode) {
1277 case I915_EXEC_CONSTANTS_REL_GENERAL:
1278 case I915_EXEC_CONSTANTS_ABSOLUTE:
1279 case I915_EXEC_CONSTANTS_REL_SURFACE:
1280 if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
1281 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1285 if (instp_mode != dev_priv->relative_constants_mode) {
1286 if (INTEL_INFO(dev)->gen < 4) {
1287 DRM_DEBUG("no rel constants on pre-gen4\n");
1291 if (INTEL_INFO(dev)->gen > 5 &&
1292 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1293 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1297 /* The HW changed the meaning on this bit on gen6 */
1298 if (INTEL_INFO(dev)->gen >= 6)
1299 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1303 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1307 if (engine == &dev_priv->engine[RCS] &&
1308 instp_mode != dev_priv->relative_constants_mode) {
1309 ret = intel_ring_begin(params->request, 4);
1313 intel_ring_emit(engine, MI_NOOP);
1314 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
1315 intel_ring_emit_reg(engine, INSTPM);
1316 intel_ring_emit(engine, instp_mask << 16 | instp_mode);
1317 intel_ring_advance(engine);
1319 dev_priv->relative_constants_mode = instp_mode;
1322 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1323 ret = i915_reset_gen7_sol_offsets(dev, params->request);
1328 exec_len = args->batch_len;
1329 exec_start = params->batch_obj_vm_offset +
1330 params->args_batch_start_offset;
1333 exec_len = params->batch_obj->base.size;
1335 ret = engine->dispatch_execbuffer(params->request,
1336 exec_start, exec_len,
1337 params->dispatch_flags);
1341 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1343 i915_gem_execbuffer_move_to_active(vmas, params->request);
1349 * Find one BSD ring to dispatch the corresponding BSD command.
1350 * The ring index is returned.
1353 gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1355 struct drm_i915_file_private *file_priv = file->driver_priv;
1357 /* Check whether the file_priv has already selected one ring. */
1358 if ((int)file_priv->bsd_ring < 0) {
1359 /* If not, use the ping-pong mechanism to select one. */
1360 mutex_lock(&dev_priv->drm.struct_mutex);
1361 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1362 dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1363 mutex_unlock(&dev_priv->drm.struct_mutex);
1366 return file_priv->bsd_ring;
1369 #define I915_USER_RINGS (4)
1371 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1372 [I915_EXEC_DEFAULT] = RCS,
1373 [I915_EXEC_RENDER] = RCS,
1374 [I915_EXEC_BLT] = BCS,
1375 [I915_EXEC_BSD] = VCS,
1376 [I915_EXEC_VEBOX] = VECS
1379 static struct intel_engine_cs *
1380 eb_select_engine(struct drm_i915_private *dev_priv,
1381 struct drm_file *file,
1382 struct drm_i915_gem_execbuffer2 *args)
1384 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1385 struct intel_engine_cs *engine;
1387 if (user_ring_id > I915_USER_RINGS) {
1388 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1392 if ((user_ring_id != I915_EXEC_BSD) &&
1393 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1394 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1395 "bsd dispatch flags: %d\n", (int)(args->flags));
1399 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1400 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1402 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1403 bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
1404 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1405 bsd_idx <= I915_EXEC_BSD_RING2) {
1406 bsd_idx >>= I915_EXEC_BSD_SHIFT;
1409 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1414 engine = &dev_priv->engine[_VCS(bsd_idx)];
1416 engine = &dev_priv->engine[user_ring_map[user_ring_id]];
1419 if (!intel_engine_initialized(engine)) {
1420 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1428 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1429 struct drm_file *file,
1430 struct drm_i915_gem_execbuffer2 *args,
1431 struct drm_i915_gem_exec_object2 *exec)
1433 struct drm_i915_private *dev_priv = to_i915(dev);
1434 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1435 struct drm_i915_gem_request *req = NULL;
1437 struct drm_i915_gem_object *batch_obj;
1438 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1439 struct intel_engine_cs *engine;
1440 struct i915_gem_context *ctx;
1441 struct i915_address_space *vm;
1442 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1443 struct i915_execbuffer_params *params = ¶ms_master;
1444 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1449 if (!i915_gem_check_execbuffer(args))
1452 ret = validate_exec_list(dev, exec, args->buffer_count);
1457 if (args->flags & I915_EXEC_SECURE) {
1459 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1463 dispatch_flags |= I915_DISPATCH_SECURE;
1465 if (args->flags & I915_EXEC_IS_PINNED)
1466 dispatch_flags |= I915_DISPATCH_PINNED;
1468 engine = eb_select_engine(dev_priv, file, args);
1472 if (args->buffer_count < 1) {
1473 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1477 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1478 if (!HAS_RESOURCE_STREAMER(dev)) {
1479 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1482 if (engine->id != RCS) {
1483 DRM_DEBUG("RS is not available on %s\n",
1488 dispatch_flags |= I915_DISPATCH_RS;
1491 /* Take a local wakeref for preparing to dispatch the execbuf as
1492 * we expect to access the hardware fairly frequently in the
1493 * process. Upon first dispatch, we acquire another prolonged
1494 * wakeref that we hold until the GPU has been idle for at least
1497 intel_runtime_pm_get(dev_priv);
1499 ret = i915_mutex_lock_interruptible(dev);
1503 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1505 mutex_unlock(&dev->struct_mutex);
1510 i915_gem_context_get(ctx);
1513 vm = &ctx->ppgtt->base;
1517 memset(¶ms_master, 0x00, sizeof(params_master));
1519 eb = eb_create(args);
1521 i915_gem_context_put(ctx);
1522 mutex_unlock(&dev->struct_mutex);
1527 /* Look up object handles */
1528 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1532 /* take note of the batch buffer before we might reorder the lists */
1533 batch_obj = eb_get_batch(eb);
1535 /* Move the objects en-masse into the GTT, evicting if necessary. */
1536 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1537 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1542 /* The objects are in their final locations, apply the relocations. */
1544 ret = i915_gem_execbuffer_relocate(eb);
1546 if (ret == -EFAULT) {
1547 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1550 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1556 /* Set the pending read domains for the batch buffer to COMMAND */
1557 if (batch_obj->base.pending_write_domain) {
1558 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1563 params->args_batch_start_offset = args->batch_start_offset;
1564 if (i915_needs_cmd_parser(engine) && args->batch_len) {
1565 struct drm_i915_gem_object *parsed_batch_obj;
1567 parsed_batch_obj = i915_gem_execbuffer_parse(engine,
1571 args->batch_start_offset,
1573 drm_is_current_master(file));
1574 if (IS_ERR(parsed_batch_obj)) {
1575 ret = PTR_ERR(parsed_batch_obj);
1580 * parsed_batch_obj == batch_obj means batch not fully parsed:
1581 * Accept, but don't promote to secure.
1584 if (parsed_batch_obj != batch_obj) {
1586 * Batch parsed and accepted:
1588 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1589 * bit from MI_BATCH_BUFFER_START commands issued in
1590 * the dispatch_execbuffer implementations. We
1591 * specifically don't want that set on batches the
1592 * command parser has accepted.
1594 dispatch_flags |= I915_DISPATCH_SECURE;
1595 params->args_batch_start_offset = 0;
1596 batch_obj = parsed_batch_obj;
1600 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1602 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1603 * batch" bit. Hence we need to pin secure batches into the global gtt.
1604 * hsw should have this fixed, but bdw mucks it up again. */
1605 if (dispatch_flags & I915_DISPATCH_SECURE) {
1607 * So on first glance it looks freaky that we pin the batch here
1608 * outside of the reservation loop. But:
1609 * - The batch is already pinned into the relevant ppgtt, so we
1610 * already have the backing storage fully allocated.
1611 * - No other BO uses the global gtt (well contexts, but meh),
1612 * so we don't really have issues with multiple objects not
1613 * fitting due to fragmentation.
1614 * So this is actually safe.
1616 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1620 params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1622 params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1624 /* Allocate a request for this batch buffer nice and early. */
1625 req = i915_gem_request_alloc(engine, ctx);
1628 goto err_batch_unpin;
1631 ret = i915_gem_request_add_to_client(req, file);
1636 * Save assorted stuff away to pass through to *_submission().
1637 * NB: This data should be 'persistent' and not local as it will
1638 * kept around beyond the duration of the IOCTL once the GPU
1639 * scheduler arrives.
1642 params->file = file;
1643 params->engine = engine;
1644 params->dispatch_flags = dispatch_flags;
1645 params->batch_obj = batch_obj;
1647 params->request = req;
1649 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1651 i915_gem_execbuffer_retire_commands(params);
1655 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1656 * batch vma for correctness. For less ugly and less fragility this
1657 * needs to be adjusted to also track the ggtt batch vma properly as
1660 if (dispatch_flags & I915_DISPATCH_SECURE)
1661 i915_gem_object_ggtt_unpin(batch_obj);
1664 /* the request owns the ref now */
1665 i915_gem_context_put(ctx);
1668 mutex_unlock(&dev->struct_mutex);
1671 /* intel_gpu_busy should also get a ref, so it will free when the device
1672 * is really idle. */
1673 intel_runtime_pm_put(dev_priv);
1678 * Legacy execbuffer just creates an exec2 list from the original exec object
1679 * list array and passes it to the real function.
1682 i915_gem_execbuffer(struct drm_device *dev, void *data,
1683 struct drm_file *file)
1685 struct drm_i915_gem_execbuffer *args = data;
1686 struct drm_i915_gem_execbuffer2 exec2;
1687 struct drm_i915_gem_exec_object *exec_list = NULL;
1688 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1691 if (args->buffer_count < 1) {
1692 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1696 /* Copy in the exec list from userland */
1697 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1698 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1699 if (exec_list == NULL || exec2_list == NULL) {
1700 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1701 args->buffer_count);
1702 drm_free_large(exec_list);
1703 drm_free_large(exec2_list);
1706 ret = copy_from_user(exec_list,
1707 u64_to_user_ptr(args->buffers_ptr),
1708 sizeof(*exec_list) * args->buffer_count);
1710 DRM_DEBUG("copy %d exec entries failed %d\n",
1711 args->buffer_count, ret);
1712 drm_free_large(exec_list);
1713 drm_free_large(exec2_list);
1717 for (i = 0; i < args->buffer_count; i++) {
1718 exec2_list[i].handle = exec_list[i].handle;
1719 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1720 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1721 exec2_list[i].alignment = exec_list[i].alignment;
1722 exec2_list[i].offset = exec_list[i].offset;
1723 if (INTEL_INFO(dev)->gen < 4)
1724 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1726 exec2_list[i].flags = 0;
1729 exec2.buffers_ptr = args->buffers_ptr;
1730 exec2.buffer_count = args->buffer_count;
1731 exec2.batch_start_offset = args->batch_start_offset;
1732 exec2.batch_len = args->batch_len;
1733 exec2.DR1 = args->DR1;
1734 exec2.DR4 = args->DR4;
1735 exec2.num_cliprects = args->num_cliprects;
1736 exec2.cliprects_ptr = args->cliprects_ptr;
1737 exec2.flags = I915_EXEC_RENDER;
1738 i915_execbuffer2_set_context_id(exec2, 0);
1740 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1742 struct drm_i915_gem_exec_object __user *user_exec_list =
1743 u64_to_user_ptr(args->buffers_ptr);
1745 /* Copy the new buffer offsets back to the user's exec list. */
1746 for (i = 0; i < args->buffer_count; i++) {
1747 exec2_list[i].offset =
1748 gen8_canonical_addr(exec2_list[i].offset);
1749 ret = __copy_to_user(&user_exec_list[i].offset,
1750 &exec2_list[i].offset,
1751 sizeof(user_exec_list[i].offset));
1754 DRM_DEBUG("failed to copy %d exec entries "
1755 "back to user (%d)\n",
1756 args->buffer_count, ret);
1762 drm_free_large(exec_list);
1763 drm_free_large(exec2_list);
1768 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1769 struct drm_file *file)
1771 struct drm_i915_gem_execbuffer2 *args = data;
1772 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1775 if (args->buffer_count < 1 ||
1776 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1777 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1781 if (args->rsvd2 != 0) {
1782 DRM_DEBUG("dirty rvsd2 field\n");
1786 exec2_list = drm_malloc_gfp(args->buffer_count,
1787 sizeof(*exec2_list),
1789 if (exec2_list == NULL) {
1790 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1791 args->buffer_count);
1794 ret = copy_from_user(exec2_list,
1795 u64_to_user_ptr(args->buffers_ptr),
1796 sizeof(*exec2_list) * args->buffer_count);
1798 DRM_DEBUG("copy %d exec entries failed %d\n",
1799 args->buffer_count, ret);
1800 drm_free_large(exec2_list);
1804 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1806 /* Copy the new buffer offsets back to the user's exec list. */
1807 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1808 u64_to_user_ptr(args->buffers_ptr);
1811 for (i = 0; i < args->buffer_count; i++) {
1812 exec2_list[i].offset =
1813 gen8_canonical_addr(exec2_list[i].offset);
1814 ret = __copy_to_user(&user_exec_list[i].offset,
1815 &exec2_list[i].offset,
1816 sizeof(user_exec_list[i].offset));
1819 DRM_DEBUG("failed to copy %d exec entries "
1821 args->buffer_count);
1827 drm_free_large(exec2_list);