2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/slab.h>
28 #include <linux/swap.h>
29 #include <linux/pci.h>
30 #include <linux/dma-buf.h>
31 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
40 switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
41 case MUTEX_TRYLOCK_RECURSIVE:
45 case MUTEX_TRYLOCK_FAILED:
50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
54 } while (!need_resched());
58 case MUTEX_TRYLOCK_SUCCESS:
66 static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
71 mutex_unlock(&dev_priv->drm.struct_mutex);
74 static bool swap_available(void)
76 return get_nr_swap_pages() > 0;
79 static bool can_release_pages(struct drm_i915_gem_object *obj)
81 /* Consider only shrinkable ojects. */
82 if (!i915_gem_object_is_shrinkable(obj))
85 /* Only report true if by unbinding the object and putting its pages
86 * we can actually make forward progress towards freeing physical
89 * If the pages are pinned for any other reason than being bound
90 * to the GPU, simply unbinding from the GPU is not going to succeed
91 * in releasing our pin count on the pages themselves.
93 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
96 /* If any vma are "permanently" pinned, it will prevent us from
97 * reclaiming the obj->mm.pages. We only allow scanout objects to claim
98 * a permanent pin, along with a few others like the context objects.
99 * To simplify the scan, and to avoid walking the list of vma under the
100 * object, we just check the count of its permanently pinned.
102 if (READ_ONCE(obj->pin_global))
105 /* We can only return physical pages to the system if we can either
106 * discard the contents (because the user has marked them as being
107 * purgeable) or if we can move their contents out to swap.
109 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
112 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
114 if (i915_gem_object_unbind(obj) == 0)
115 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
116 return !i915_gem_object_has_pages(obj);
120 * i915_gem_shrink - Shrink buffer object caches
121 * @dev_priv: i915 device
122 * @target: amount of memory to make available, in pages
123 * @nr_scanned: optional output for number of pages scanned (incremental)
124 * @flags: control flags for selecting cache types
126 * This function is the main interface to the shrinker. It will try to release
127 * up to @target pages of main memory backing storage from buffer objects.
128 * Selection of the specific caches can be done with @flags. This is e.g. useful
129 * when purgeable objects should be removed from caches preferentially.
131 * Note that it's not guaranteed that released amount is actually available as
132 * free system memory - the pages might still be in-used to due to other reasons
133 * (like cpu mmaps) or the mm core has reused them before we could grab them.
134 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
135 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
137 * Also note that any kind of pinning (both per-vma address space pins and
138 * backing storage pins at the buffer object level) result in the shrinker code
139 * having to skip the object.
142 * The number of pages of backing storage actually released.
145 i915_gem_shrink(struct drm_i915_private *dev_priv,
146 unsigned long target,
147 unsigned long *nr_scanned,
151 struct list_head *list;
154 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
155 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
158 unsigned long count = 0;
159 unsigned long scanned = 0;
162 if (!shrinker_lock(dev_priv, &unlock))
166 * When shrinking the active list, also consider active contexts.
167 * Active contexts are pinned until they are retired, and so can
168 * not be simply unbound to retire and unpin their pages. To shrink
169 * the contexts, we must wait until the gpu is idle.
171 * We don't care about errors here; if we cannot wait upon the GPU,
172 * we will free as much as we can and hope to get a second chance.
174 if (flags & I915_SHRINK_ACTIVE)
175 i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
177 trace_i915_gem_shrink(dev_priv, target, flags);
178 i915_gem_retire_requests(dev_priv);
181 * Unbinding of objects will require HW access; Let us not wake the
182 * device just to recover a little memory. If absolutely necessary,
183 * we will force the wake during oom-notifier.
185 if ((flags & I915_SHRINK_BOUND) &&
186 !intel_runtime_pm_get_if_in_use(dev_priv))
187 flags &= ~I915_SHRINK_BOUND;
190 * As we may completely rewrite the (un)bound list whilst unbinding
191 * (due to retiring requests) we have to strictly process only
192 * one element of the list at the time, and recheck the list
193 * on every iteration.
195 * In particular, we must hold a reference whilst removing the
196 * object as we may end up waiting for and/or retiring the objects.
197 * This might release the final reference (held by the active list)
198 * and result in the object being freed from under us. This is
199 * similar to the precautions the eviction code must take whilst
202 * Also note that although these lists do not hold a reference to
203 * the object we can safely grab one here: The final object
204 * unreferencing and the bound_list are both protected by the
205 * dev->struct_mutex and so we won't ever be able to observe an
206 * object on the bound_list with a reference count equals 0.
208 for (phase = phases; phase->list; phase++) {
209 struct list_head still_in_list;
210 struct drm_i915_gem_object *obj;
212 if ((flags & phase->bit) == 0)
215 INIT_LIST_HEAD(&still_in_list);
218 * We serialize our access to unreferenced objects through
219 * the use of the struct_mutex. While the objects are not
220 * yet freed (due to RCU then a workqueue) we still want
221 * to be able to shrink their pages, so they remain on
222 * the unbound/bound list until actually freed.
224 lockmgr(&dev_priv->mm.obj_lock, LK_EXCLUSIVE);
225 while (count < target &&
226 (obj = list_first_entry_or_null(phase->list,
229 list_move_tail(&obj->mm.link, &still_in_list);
231 if (flags & I915_SHRINK_PURGEABLE &&
232 obj->mm.madv != I915_MADV_DONTNEED)
235 if (flags & I915_SHRINK_VMAPS &&
236 !is_vmalloc_addr(obj->mm.mapping))
239 if (!(flags & I915_SHRINK_ACTIVE) &&
240 (i915_gem_object_is_active(obj) ||
241 i915_gem_object_is_framebuffer(obj)))
244 if (!can_release_pages(obj))
247 lockmgr(&dev_priv->mm.obj_lock, LK_RELEASE);
249 if (unsafe_drop_pages(obj)) {
250 /* May arrive from get_pages on another bo */
251 mutex_lock_nested(&obj->mm.lock,
253 if (!i915_gem_object_has_pages(obj)) {
254 __i915_gem_object_invalidate(obj);
255 count += obj->base.size >> PAGE_SHIFT;
257 mutex_unlock(&obj->mm.lock);
259 scanned += obj->base.size >> PAGE_SHIFT;
261 lockmgr(&dev_priv->mm.obj_lock, LK_EXCLUSIVE);
263 list_splice_tail(&still_in_list, phase->list);
264 lockmgr(&dev_priv->mm.obj_lock, LK_RELEASE);
267 if (flags & I915_SHRINK_BOUND)
268 intel_runtime_pm_put(dev_priv);
270 i915_gem_retire_requests(dev_priv);
272 shrinker_unlock(dev_priv, unlock);
275 *nr_scanned += scanned;
280 * i915_gem_shrink_all - Shrink buffer object caches completely
281 * @dev_priv: i915 device
283 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
284 * caches completely. It also first waits for and retires all outstanding
285 * requests to also be able to release backing storage for active objects.
287 * This should only be used in code to intentionally quiescent the gpu or as a
288 * last-ditch effort when memory seems to have run out.
291 * The number of pages of backing storage actually released.
293 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
297 intel_runtime_pm_get(dev_priv);
298 freed = i915_gem_shrink(dev_priv, -1UL, NULL,
300 I915_SHRINK_UNBOUND |
302 intel_runtime_pm_put(dev_priv);
308 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
311 struct drm_i915_private *dev_priv =
312 container_of(shrinker, struct drm_i915_private, mm.shrinker);
313 struct drm_i915_gem_object *obj;
314 unsigned long count = 0;
316 spin_lock(&dev_priv->mm.obj_lock);
317 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
318 if (can_release_pages(obj))
319 count += obj->base.size >> PAGE_SHIFT;
321 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
322 if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
323 count += obj->base.size >> PAGE_SHIFT;
324 spin_unlock(&dev_priv->mm.obj_lock);
332 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
335 struct drm_i915_private *dev_priv =
336 container_of(shrinker, struct drm_i915_private, mm.shrinker);
342 if (!shrinker_lock(dev_priv, &unlock))
345 freed = i915_gem_shrink(dev_priv,
349 I915_SHRINK_UNBOUND |
350 I915_SHRINK_PURGEABLE);
351 if (freed < sc->nr_to_scan)
352 freed += i915_gem_shrink(dev_priv,
353 sc->nr_to_scan - sc->nr_scanned,
356 I915_SHRINK_UNBOUND);
357 if (freed < sc->nr_to_scan && current_is_kswapd()) {
358 intel_runtime_pm_get(dev_priv);
359 freed += i915_gem_shrink(dev_priv,
360 sc->nr_to_scan - sc->nr_scanned,
364 I915_SHRINK_UNBOUND);
365 intel_runtime_pm_put(dev_priv);
368 shrinker_unlock(dev_priv, unlock);
377 shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
380 unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
383 if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
384 shrinker_lock(dev_priv, unlock))
387 schedule_timeout_killable(1);
388 if (fatal_signal_pending(current))
391 if (time_after(jiffies, timeout)) {
392 pr_err("Unable to lock GPU to purge memory.\n");
401 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
403 struct drm_i915_private *dev_priv =
404 container_of(nb, struct drm_i915_private, mm.oom_notifier);
405 struct drm_i915_gem_object *obj;
406 unsigned long unevictable, bound, unbound, freed_pages;
408 freed_pages = i915_gem_shrink_all(dev_priv);
410 /* Because we may be allocating inside our own driver, we cannot
411 * assert that there are no objects with pinned pages that are not
412 * being pointed to by hardware.
414 unbound = bound = unevictable = 0;
415 spin_lock(&dev_priv->mm.obj_lock);
416 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
417 if (!can_release_pages(obj))
418 unevictable += obj->base.size >> PAGE_SHIFT;
420 unbound += obj->base.size >> PAGE_SHIFT;
422 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
423 if (!can_release_pages(obj))
424 unevictable += obj->base.size >> PAGE_SHIFT;
426 bound += obj->base.size >> PAGE_SHIFT;
428 spin_unlock(&dev_priv->mm.obj_lock);
430 if (freed_pages || unbound || bound)
431 pr_info("Purging GPU memory, %lu pages freed, "
432 "%lu pages still pinned.\n",
433 freed_pages, unevictable);
434 if (unbound || bound)
435 pr_err("%lu and %lu pages still available in the "
436 "bound and unbound GPU page lists.\n",
439 *(unsigned long *)ptr += freed_pages;
444 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
446 struct drm_i915_private *dev_priv =
447 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
448 struct i915_vma *vma, *next;
449 unsigned long freed_pages = 0;
453 if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
456 /* Force everything onto the inactive lists */
457 ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
461 intel_runtime_pm_get(dev_priv);
462 freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
464 I915_SHRINK_UNBOUND |
467 intel_runtime_pm_put(dev_priv);
469 /* We also want to clear any cached iomaps as they wrap vmap */
470 list_for_each_entry_safe(vma, next,
471 &dev_priv->ggtt.base.inactive_list, vm_link) {
472 unsigned long count = vma->node.size >> PAGE_SHIFT;
473 if (vma->iomap && i915_vma_unbind(vma) == 0)
474 freed_pages += count;
478 shrinker_unlock(dev_priv, unlock);
480 *(unsigned long *)ptr += freed_pages;
486 * i915_gem_shrinker_init - Initialize i915 shrinker
487 * @dev_priv: i915 device
489 * This function registers and sets up the i915 shrinker and OOM handler.
491 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
493 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
494 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
495 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
497 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
499 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
500 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
502 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
503 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
508 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
509 * @dev_priv: i915 device
511 * This function unregisters the i915 shrinker and OOM handler.
513 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
516 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
517 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
518 unregister_shrinker(&dev_priv->mm.shrinker);