2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
30 #include <drm/i915_drm.h>
33 #include "intel_drv.h"
34 #include "i915_trace.h"
37 mark_free(struct i915_vma *vma, struct list_head *unwind)
39 if (vma->obj->pin_count)
42 if (WARN_ON(!list_empty(&vma->exec_list)))
45 list_add(&vma->exec_list, unwind);
46 return drm_mm_scan_add_block(&vma->node);
50 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
51 int min_size, unsigned alignment, unsigned cache_level,
52 bool mappable, bool nonblocking)
54 drm_i915_private_t *dev_priv = dev->dev_private;
55 struct list_head eviction_list, unwind_list;
60 trace_i915_gem_evict(dev, min_size, alignment, mappable);
63 * The goal is to evict objects and amalgamate space in LRU order.
64 * The oldest idle objects reside on the inactive list, which is in
65 * retirement order. The next objects to retire are those on the (per
66 * ring) active list that do not have an outstanding flush. Once the
67 * hardware reports completion (the seqno is updated after the
68 * batchbuffer has been finished) the clean buffer objects would
69 * be retired to the inactive list. Any dirty objects would be added
70 * to the tail of the flushing list. So after processing the clean
71 * active objects we need to emit a MI_FLUSH to retire the flushing
72 * list, hence the retirement order of the flushing list is in
73 * advance of the dirty objects on the active lists.
75 * The retirement sequence is thus:
76 * 1. Inactive objects (already retired)
77 * 2. Clean active objects
79 * 4. Dirty active objects.
81 * On each list, the oldest objects lie at the HEAD with the freshest
85 INIT_LIST_HEAD(&unwind_list);
87 BUG_ON(!i915_is_ggtt(vm));
88 drm_mm_init_scan_with_range(&vm->mm, min_size,
89 alignment, cache_level, 0,
90 dev_priv->gtt.mappable_end);
92 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
95 /* First see if there is a large enough contiguous idle region... */
96 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
97 if (mark_free(vma, &unwind_list))
104 /* Now merge in the soon-to-be-expired objects... */
105 list_for_each_entry(vma, &vm->active_list, mm_list) {
106 if (mark_free(vma, &unwind_list))
111 /* Nothing found, clean up and bail out! */
112 while (!list_empty(&unwind_list)) {
113 vma = list_first_entry(&unwind_list,
116 ret = drm_mm_scan_remove_block(&vma->node);
119 list_del_init(&vma->exec_list);
122 /* Can we unpin some objects such as idle hw contents,
128 /* Only idle the GPU and repeat the search once */
130 ret = i915_gpu_idle(dev);
134 i915_gem_retire_requests(dev);
138 /* If we still have pending pageflip completions, drop
139 * back to userspace to give our workqueues time to
140 * acquire our locks and unpin the old scanouts.
142 return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
145 /* drm_mm doesn't allow any other other operations while
146 * scanning, therefore store to be evicted objects on a
148 INIT_LIST_HEAD(&eviction_list);
149 while (!list_empty(&unwind_list)) {
150 vma = list_first_entry(&unwind_list,
153 if (drm_mm_scan_remove_block(&vma->node)) {
154 list_move(&vma->exec_list, &eviction_list);
155 drm_gem_object_reference(&vma->obj->base);
158 list_del_init(&vma->exec_list);
161 /* Unbinding will emit any required flushes */
162 while (!list_empty(&eviction_list)) {
163 struct drm_gem_object *obj;
164 vma = list_first_entry(&eviction_list,
168 obj = &vma->obj->base;
169 list_del_init(&vma->exec_list);
171 ret = i915_vma_unbind(vma);
173 drm_gem_object_unreference(obj);
180 * i915_gem_evict_vm - Try to free up VM space
182 * @vm: Address space to evict from
183 * @do_idle: Boolean directing whether to idle first.
185 * VM eviction is about freeing up virtual address space. If one wants fine
186 * grained eviction, they should see evict something for more details. In terms
187 * of freeing up actual system memory, this function may not accomplish the
188 * desired result. An object may be shared in multiple address space, and this
189 * function will not assert those objects be freed.
191 * Using do_idle will result in a more complete eviction because it retires, and
192 * inactivates current BOs.
194 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
196 struct i915_vma *vma, *next;
199 trace_i915_gem_evict_vm(vm);
202 ret = i915_gpu_idle(vm->dev);
206 i915_gem_retire_requests(vm->dev);
209 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
210 if (vma->obj->pin_count == 0)
211 WARN_ON(i915_vma_unbind(vma));
217 i915_gem_evict_everything(struct drm_device *dev)
219 drm_i915_private_t *dev_priv = dev->dev_private;
220 struct i915_address_space *vm;
221 bool lists_empty = true;
224 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
225 lists_empty = (list_empty(&vm->inactive_list) &&
226 list_empty(&vm->active_list));
234 trace_i915_gem_evict_everything(dev);
236 /* The gpu_idle will flush everything in the write domain to the
237 * active list. Then we must move everything off the active list
238 * with retire requests.
240 ret = i915_gpu_idle(dev);
244 i915_gem_retire_requests(dev);
246 /* Having flushed everything, unbind() should never raise an error */
247 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
248 WARN_ON(i915_gem_evict_vm(vm, false));