2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/shmem_fs.h>
26 #include <linux/swap.h>
27 #include <linux/pci.h>
28 #include <linux/vmalloc.h>
30 #include <drm/i915_drm.h>
33 #include "i915_trace.h"
36 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
38 if (!mutex_is_locked(mutex))
41 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
42 return mutex->owner == task;
44 /* Since UP may be pre-empted, we cannot assume that we own the lock */
50 static int num_vma_bound(struct drm_i915_gem_object *obj)
55 list_for_each_entry(vma, &obj->vma_list, obj_link) {
56 if (drm_mm_node_allocated(&vma->node))
65 static bool swap_available(void)
67 return get_nr_swap_pages() > 0;
70 static bool can_release_pages(struct drm_i915_gem_object *obj)
72 /* Only shmemfs objects are backed by swap */
73 if (!obj->base.vm_obj)
76 /* Only report true if by unbinding the object and putting its pages
77 * we can actually make forward progress towards freeing physical
80 * If the pages are pinned for any other reason than being bound
81 * to the GPU, simply unbinding from the GPU is not going to succeed
82 * in releasing our pin count on the pages themselves.
84 if (obj->pages_pin_count != num_vma_bound(obj))
87 /* We can only return physical pages to the system if we can either
88 * discard the contents (because the user has marked them as being
89 * purgeable) or if we can move their contents out to swap.
91 return swap_available() || obj->madv == I915_MADV_DONTNEED;
95 * i915_gem_shrink - Shrink buffer object caches
96 * @dev_priv: i915 device
97 * @target: amount of memory to make available, in pages
98 * @flags: control flags for selecting cache types
100 * This function is the main interface to the shrinker. It will try to release
101 * up to @target pages of main memory backing storage from buffer objects.
102 * Selection of the specific caches can be done with @flags. This is e.g. useful
103 * when purgeable objects should be removed from caches preferentially.
105 * Note that it's not guaranteed that released amount is actually available as
106 * free system memory - the pages might still be in-used to due to other reasons
107 * (like cpu mmaps) or the mm core has reused them before we could grab them.
108 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
109 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
111 * Also note that any kind of pinning (both per-vma address space pins and
112 * backing storage pins at the buffer object level) result in the shrinker code
113 * having to skip the object.
116 * The number of pages of backing storage actually released.
119 i915_gem_shrink(struct drm_i915_private *dev_priv,
120 unsigned long target, unsigned flags)
123 struct list_head *list;
126 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
127 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
130 unsigned long count = 0;
132 trace_i915_gem_shrink(dev_priv, target, flags);
133 i915_gem_retire_requests(dev_priv->dev);
136 * As we may completely rewrite the (un)bound list whilst unbinding
137 * (due to retiring requests) we have to strictly process only
138 * one element of the list at the time, and recheck the list
139 * on every iteration.
141 * In particular, we must hold a reference whilst removing the
142 * object as we may end up waiting for and/or retiring the objects.
143 * This might release the final reference (held by the active list)
144 * and result in the object being freed from under us. This is
145 * similar to the precautions the eviction code must take whilst
148 * Also note that although these lists do not hold a reference to
149 * the object we can safely grab one here: The final object
150 * unreferencing and the bound_list are both protected by the
151 * dev->struct_mutex and so we won't ever be able to observe an
152 * object on the bound_list with a reference count equals 0.
154 for (phase = phases; phase->list; phase++) {
155 struct list_head still_in_list;
157 if ((flags & phase->bit) == 0)
160 INIT_LIST_HEAD(&still_in_list);
161 while (count < target && !list_empty(phase->list)) {
162 struct drm_i915_gem_object *obj;
163 struct i915_vma *vma, *v;
165 obj = list_first_entry(phase->list,
166 typeof(*obj), global_list);
167 list_move_tail(&obj->global_list, &still_in_list);
169 if (flags & I915_SHRINK_PURGEABLE &&
170 obj->madv != I915_MADV_DONTNEED)
173 if (flags & I915_SHRINK_VMAPS &&
174 !is_vmalloc_addr(obj->mapping))
177 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
180 if (!can_release_pages(obj))
183 drm_gem_object_reference(&obj->base);
185 /* For the unbound phase, this should be a no-op! */
186 list_for_each_entry_safe(vma, v,
187 &obj->vma_list, obj_link)
188 if (i915_vma_unbind(vma))
191 if (i915_gem_object_put_pages(obj) == 0)
192 count += obj->base.size >> PAGE_SHIFT;
194 drm_gem_object_unreference(&obj->base);
196 list_splice(&still_in_list, phase->list);
199 i915_gem_retire_requests(dev_priv->dev);
205 * i915_gem_shrink_all - Shrink buffer object caches completely
206 * @dev_priv: i915 device
208 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
209 * caches completely. It also first waits for and retires all outstanding
210 * requests to also be able to release backing storage for active objects.
212 * This should only be used in code to intentionally quiescent the gpu or as a
213 * last-ditch effort when memory seems to have run out.
216 * The number of pages of backing storage actually released.
218 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
220 return i915_gem_shrink(dev_priv, -1UL,
222 I915_SHRINK_UNBOUND |
227 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
229 if (!mutex_trylock(&dev->struct_mutex)) {
230 if (!mutex_is_locked_by(&dev->struct_mutex, current))
233 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
244 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
246 struct drm_i915_private *dev_priv =
247 container_of(shrinker, struct drm_i915_private, mm.shrinker);
248 struct drm_device *dev = dev_priv->dev;
249 struct drm_i915_gem_object *obj;
253 if (!i915_gem_shrinker_lock(dev, &unlock))
257 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
258 if (can_release_pages(obj))
259 count += obj->base.size >> PAGE_SHIFT;
261 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
262 if (!obj->active && can_release_pages(obj))
263 count += obj->base.size >> PAGE_SHIFT;
267 mutex_unlock(&dev->struct_mutex);
273 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
275 struct drm_i915_private *dev_priv =
276 container_of(shrinker, struct drm_i915_private, mm.shrinker);
277 struct drm_device *dev = dev_priv->dev;
281 if (!i915_gem_shrinker_lock(dev, &unlock))
284 freed = i915_gem_shrink(dev_priv,
287 I915_SHRINK_UNBOUND |
288 I915_SHRINK_PURGEABLE);
289 if (freed < sc->nr_to_scan)
290 freed += i915_gem_shrink(dev_priv,
291 sc->nr_to_scan - freed,
293 I915_SHRINK_UNBOUND);
295 mutex_unlock(&dev->struct_mutex);
300 struct shrinker_lock_uninterruptible {
301 bool was_interruptible;
306 i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
307 struct shrinker_lock_uninterruptible *slu,
310 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
312 while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
313 schedule_timeout_killable(1);
314 if (fatal_signal_pending(current))
316 if (--timeout == 0) {
317 pr_err("Unable to lock GPU to purge memory.\n");
322 slu->was_interruptible = dev_priv->mm.interruptible;
323 dev_priv->mm.interruptible = false;
328 i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
329 struct shrinker_lock_uninterruptible *slu)
331 dev_priv->mm.interruptible = slu->was_interruptible;
333 mutex_unlock(&dev_priv->dev->struct_mutex);
337 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
339 struct drm_i915_private *dev_priv =
340 container_of(nb, struct drm_i915_private, mm.oom_notifier);
341 struct shrinker_lock_uninterruptible slu;
342 struct drm_i915_gem_object *obj;
343 unsigned long unevictable, bound, unbound, freed_pages;
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
348 freed_pages = i915_gem_shrink_all(dev_priv);
350 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not
352 * being pointed to by hardware.
354 unbound = bound = unevictable = 0;
355 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
356 if (!can_release_pages(obj))
357 unevictable += obj->base.size >> PAGE_SHIFT;
359 unbound += obj->base.size >> PAGE_SHIFT;
361 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
362 if (!can_release_pages(obj))
363 unevictable += obj->base.size >> PAGE_SHIFT;
365 bound += obj->base.size >> PAGE_SHIFT;
368 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
370 if (freed_pages || unbound || bound)
371 pr_info("Purging GPU memory, %lu pages freed, "
372 "%lu pages still pinned.\n",
373 freed_pages, unevictable);
374 if (unbound || bound)
375 pr_err("%lu and %lu pages still available in the "
376 "bound and unbound GPU page lists.\n",
379 *(unsigned long *)ptr += freed_pages;
384 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
386 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages;
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
394 freed_pages = i915_gem_shrink(dev_priv, -1UL,
396 I915_SHRINK_UNBOUND |
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
402 *(unsigned long *)ptr += freed_pages;
408 * i915_gem_shrinker_init - Initialize i915 shrinker
409 * @dev_priv: i915 device
411 * This function registers and sets up the i915 shrinker and OOM handler.
413 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
416 dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
417 dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
418 dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
419 WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
421 dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
422 WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
424 dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
425 WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
430 * i915_gem_shrinker_cleanup - Clean up i915 shrinker
431 * @dev_priv: i915 device
433 * This function unregisters the i915 shrinker and OOM handler.
435 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
438 WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
439 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
440 unregister_shrinker(&dev_priv->mm.shrinker);