2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
31 struct i915_mm_struct {
33 struct drm_i915_private *i915;
34 struct i915_mmu_notifier *mn;
35 struct hlist_node node;
37 struct work_struct work;
40 #if defined(CONFIG_MMU_NOTIFIER)
41 #include <linux/interval_tree.h>
43 struct i915_mmu_notifier {
45 struct hlist_node node;
46 struct mmu_notifier mn;
47 struct rb_root objects;
48 struct workqueue_struct *wq;
51 struct i915_mmu_object {
52 struct i915_mmu_notifier *mn;
53 struct drm_i915_gem_object *obj;
54 struct interval_tree_node it;
55 struct list_head link;
56 struct work_struct work;
60 static void wait_rendering(struct drm_i915_gem_object *obj)
62 struct drm_device *dev = obj->base.dev;
63 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
70 for (i = 0; i < I915_NUM_ENGINES; i++) {
71 struct drm_i915_gem_request *req;
73 req = obj->last_read_req[i];
77 requests[n++] = i915_gem_request_reference(req);
80 mutex_unlock(&dev->struct_mutex);
82 for (i = 0; i < n; i++)
83 __i915_wait_request(requests[i], false, NULL, NULL);
85 mutex_lock(&dev->struct_mutex);
87 for (i = 0; i < n; i++)
88 i915_gem_request_unreference(requests[i]);
91 static void cancel_userptr(struct work_struct *work)
93 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
94 struct drm_i915_gem_object *obj = mo->obj;
95 struct drm_device *dev = obj->base.dev;
97 mutex_lock(&dev->struct_mutex);
98 /* Cancel any active worker and force us to re-evaluate gup */
99 obj->userptr.work = NULL;
101 if (obj->pages != NULL) {
102 struct drm_i915_private *dev_priv = to_i915(dev);
103 struct i915_vma *vma, *tmp;
104 bool was_interruptible;
108 was_interruptible = dev_priv->mm.interruptible;
109 dev_priv->mm.interruptible = false;
111 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
112 WARN_ON(i915_vma_unbind(vma));
113 WARN_ON(i915_gem_object_put_pages(obj));
115 dev_priv->mm.interruptible = was_interruptible;
118 drm_gem_object_unreference(&obj->base);
119 mutex_unlock(&dev->struct_mutex);
122 static void add_object(struct i915_mmu_object *mo)
127 interval_tree_insert(&mo->it, &mo->mn->objects);
131 static void del_object(struct i915_mmu_object *mo)
136 interval_tree_remove(&mo->it, &mo->mn->objects);
137 mo->attached = false;
140 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
141 struct mm_struct *mm,
145 struct i915_mmu_notifier *mn =
146 container_of(_mn, struct i915_mmu_notifier, mn);
147 struct i915_mmu_object *mo;
148 struct interval_tree_node *it;
149 LIST_HEAD(cancelled);
151 if (RB_EMPTY_ROOT(&mn->objects))
154 /* interval ranges are inclusive, but invalidate range is exclusive */
157 spin_lock(&mn->lock);
158 it = interval_tree_iter_first(&mn->objects, start, end);
160 /* The mmu_object is released late when destroying the
161 * GEM object so it is entirely possible to gain a
162 * reference on an object in the process of being freed
163 * since our serialisation is via the spinlock and not
164 * the struct_mutex - and consequently use it after it
165 * is freed and then double free it. To prevent that
166 * use-after-free we only acquire a reference on the
167 * object if it is not in the process of being destroyed.
169 mo = container_of(it, struct i915_mmu_object, it);
170 if (kref_get_unless_zero(&mo->obj->base.refcount))
171 queue_work(mn->wq, &mo->work);
173 list_add(&mo->link, &cancelled);
174 it = interval_tree_iter_next(it, start, end);
176 list_for_each_entry(mo, &cancelled, link)
178 spin_unlock(&mn->lock);
180 flush_workqueue(mn->wq);
183 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
184 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
187 static struct i915_mmu_notifier *
188 i915_mmu_notifier_create(struct mm_struct *mm)
190 struct i915_mmu_notifier *mn;
193 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
195 return ERR_PTR(-ENOMEM);
197 spin_lock_init(&mn->lock);
198 mn->mn.ops = &i915_gem_userptr_notifier;
199 mn->objects = RB_ROOT;
200 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
201 if (mn->wq == NULL) {
203 return ERR_PTR(-ENOMEM);
206 /* Protected by mmap_sem (write-lock) */
207 ret = __mmu_notifier_register(&mn->mn, mm);
209 destroy_workqueue(mn->wq);
218 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
220 struct i915_mmu_object *mo;
222 mo = obj->userptr.mmu_object;
226 spin_lock(&mo->mn->lock);
228 spin_unlock(&mo->mn->lock);
231 obj->userptr.mmu_object = NULL;
234 static struct i915_mmu_notifier *
235 i915_mmu_notifier_find(struct i915_mm_struct *mm)
237 struct i915_mmu_notifier *mn = mm->mn;
243 down_write(&mm->mm->mmap_sem);
244 mutex_lock(&mm->i915->mm_lock);
245 if ((mn = mm->mn) == NULL) {
246 mn = i915_mmu_notifier_create(mm->mm);
250 mutex_unlock(&mm->i915->mm_lock);
251 up_write(&mm->mm->mmap_sem);
257 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
260 struct i915_mmu_notifier *mn;
261 struct i915_mmu_object *mo;
263 if (flags & I915_USERPTR_UNSYNCHRONIZED)
264 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
266 if (WARN_ON(obj->userptr.mm == NULL))
269 mn = i915_mmu_notifier_find(obj->userptr.mm);
273 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
279 mo->it.start = obj->userptr.ptr;
280 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
281 INIT_WORK(&mo->work, cancel_userptr);
283 obj->userptr.mmu_object = mo;
288 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
289 struct mm_struct *mm)
294 mmu_notifier_unregister(&mn->mn, mm);
295 destroy_workqueue(mn->wq);
302 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
307 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
310 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
313 if (!capable(CAP_SYS_ADMIN))
320 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
321 struct mm_struct *mm)
328 static struct i915_mm_struct *
329 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
331 struct i915_mm_struct *mm;
333 /* Protected by dev_priv->mm_lock */
334 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
343 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
345 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
347 struct i915_mm_struct *mm;
351 /* During release of the GEM object we hold the struct_mutex. This
352 * precludes us from calling mmput() at that time as that may be
353 * the last reference and so call exit_mmap(). exit_mmap() will
354 * attempt to reap the vma, and if we were holding a GTT mmap
355 * would then call drm_gem_vm_close() and attempt to reacquire
356 * the struct mutex. So in order to avoid that recursion, we have
357 * to defer releasing the mm reference until after we drop the
358 * struct_mutex, i.e. we need to schedule a worker to do the clean
361 mutex_lock(&dev_priv->mm_lock);
363 mm = __i915_mm_struct_find(dev_priv, current->mm);
365 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
373 kref_init(&mm->kref);
374 mm->i915 = to_i915(obj->base.dev);
376 mm->mm = current->mm;
377 atomic_inc(¤t->mm->mm_count);
381 /* Protected by dev_priv->mm_lock */
382 hash_add(dev_priv->mm_structs,
383 &mm->node, (unsigned long)mm->mm);
387 obj->userptr.mm = mm;
390 mutex_unlock(&dev_priv->mm_lock);
395 __i915_mm_struct_free__worker(struct work_struct *work)
397 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
398 i915_mmu_notifier_free(mm->mn, mm->mm);
406 __i915_mm_struct_free(struct kref *kref)
408 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
410 /* Protected by dev_priv->mm_lock */
414 mutex_unlock(&mm->i915->mm_lock);
416 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
417 schedule_work(&mm->work);
421 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
423 if (obj->userptr.mm == NULL)
426 kref_put_mutex(&obj->userptr.mm->kref,
427 __i915_mm_struct_free,
428 &to_i915(obj->base.dev)->mm_lock);
429 obj->userptr.mm = NULL;
432 struct get_pages_work {
433 struct work_struct work;
434 struct drm_i915_gem_object *obj;
435 struct task_struct *task;
438 #if IS_ENABLED(CONFIG_SWIOTLB)
439 #define swiotlb_active() swiotlb_nr_tbl()
441 #define swiotlb_active() 0
446 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
448 struct scatterlist *sg;
451 *st = kmalloc(sizeof(**st), M_DRM, M_WAITOK);
455 if (swiotlb_active()) {
456 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
460 for_each_sg((*st)->sgl, sg, num_pages, n)
461 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
463 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
464 0, num_pages << PAGE_SHIFT,
479 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
480 struct page **pvec, int num_pages)
484 ret = st_set_pages(&obj->pages, pvec, num_pages);
488 ret = i915_gem_gtt_prepare_object(obj);
490 sg_free_table(obj->pages);
500 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
505 /* During mm_invalidate_range we need to cancel any userptr that
506 * overlaps the range being invalidated. Doing so requires the
507 * struct_mutex, and that risks recursion. In order to cause
508 * recursion, the user must alias the userptr address space with
509 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
510 * to invalidate that mmaping, mm_invalidate_range is called with
511 * the userptr address *and* the struct_mutex held. To prevent that
512 * we set a flag under the i915_mmu_notifier spinlock to indicate
513 * whether this object is valid.
515 #if defined(CONFIG_MMU_NOTIFIER)
516 if (obj->userptr.mmu_object == NULL)
519 spin_lock(&obj->userptr.mmu_object->mn->lock);
520 /* In order to serialise get_pages with an outstanding
521 * cancel_userptr, we must drop the struct_mutex and try again.
524 del_object(obj->userptr.mmu_object);
525 else if (!work_pending(&obj->userptr.mmu_object->work))
526 add_object(obj->userptr.mmu_object);
529 spin_unlock(&obj->userptr.mmu_object->mn->lock);
537 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
539 struct get_pages_work *work = container_of(_work, typeof(*work), work);
540 struct drm_i915_gem_object *obj = work->obj;
541 struct drm_device *dev = obj->base.dev;
542 const int npages = obj->base.size >> PAGE_SHIFT;
549 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
551 struct mm_struct *mm = obj->userptr.mm->mm;
554 if (atomic_inc_not_zero(&mm->mm_users)) {
555 down_read(&mm->mmap_sem);
556 while (pinned < npages) {
557 ret = get_user_pages_remote
559 obj->userptr.ptr + pinned * PAGE_SIZE,
561 !obj->userptr.read_only, 0,
562 pvec + pinned, NULL);
568 up_read(&mm->mmap_sem);
573 mutex_lock(&dev->struct_mutex);
574 if (obj->userptr.work == &work->work) {
575 if (pinned == npages) {
576 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
578 list_add_tail(&obj->global_list,
579 &to_i915(dev)->mm.unbound_list);
580 obj->get_page.sg = obj->pages->sgl;
581 obj->get_page.last = 0;
585 obj->userptr.work = ERR_PTR(ret);
587 __i915_gem_userptr_set_active(obj, false);
590 obj->userptr.workers--;
591 drm_gem_object_unreference(&obj->base);
592 mutex_unlock(&dev->struct_mutex);
594 release_pages(pvec, pinned, 0);
595 drm_free_large(pvec);
597 put_task_struct(work->task);
602 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
605 struct get_pages_work *work;
607 /* Spawn a worker so that we can acquire the
608 * user pages without holding our mutex. Access
609 * to the user pages requires mmap_sem, and we have
610 * a strict lock ordering of mmap_sem, struct_mutex -
611 * we already hold struct_mutex here and so cannot
612 * call gup without encountering a lock inversion.
614 * Userspace will keep on repeating the operation
615 * (thanks to EAGAIN) until either we hit the fast
616 * path or the worker completes. If the worker is
617 * cancelled or superseded, the task is still run
618 * but the results ignored. (This leads to
619 * complications that we may have a stray object
620 * refcount that we need to be wary of when
621 * checking for existing objects during creation.)
622 * If the worker encounters an error, it reports
623 * that error back to this function through
624 * obj->userptr.work = ERR_PTR.
626 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
629 work = kmalloc(sizeof(*work), GFP_KERNEL);
633 obj->userptr.work = &work->work;
634 obj->userptr.workers++;
637 drm_gem_object_reference(&obj->base);
639 work->task = current;
640 get_task_struct(work->task);
642 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
643 schedule_work(&work->work);
651 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
654 const int num_pages = obj->base.size >> PAGE_SHIFT;
659 /* If userspace should engineer that these pages are replaced in
660 * the vma between us binding this page into the GTT and completion
661 * of rendering... Their loss. If they change the mapping of their
662 * pages they need to create a new bo to point to the new vma.
664 * However, that still leaves open the possibility of the vma
665 * being copied upon fork. Which falls under the same userspace
666 * synchronisation issue as a regular bo, except that this time
667 * the process may not be expecting that a particular piece of
668 * memory is tied to the GPU.
670 * Fortunately, we can hook into the mmu_notifier in order to
671 * discard the page references prior to anything nasty happening
672 * to the vma (discard or cloning) which should prevent the more
673 * egregious cases from causing harm.
675 if (IS_ERR(obj->userptr.work)) {
676 /* active flag will have been dropped already by the worker */
677 ret = PTR_ERR(obj->userptr.work);
678 obj->userptr.work = NULL;
681 if (obj->userptr.work)
682 /* active flag should still be held for the pending work */
685 /* Let the mmu-notifier know that we have begun and need cancellation */
686 ret = __i915_gem_userptr_set_active(obj, true);
692 if (obj->userptr.mm->mm == current->mm) {
693 pvec = kmalloc(num_pages*sizeof(struct page *),
694 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
696 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
698 __i915_gem_userptr_set_active(obj, false);
703 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
704 !obj->userptr.read_only, pvec);
709 ret = pinned, pinned = 0;
710 else if (pinned < num_pages)
711 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
713 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
715 __i915_gem_userptr_set_active(obj, active);
716 release_pages(pvec, pinned, 0);
718 drm_free_large(pvec);
726 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
728 struct sg_page_iter sg_iter;
730 BUG_ON(obj->userptr.work != NULL);
731 __i915_gem_userptr_set_active(obj, false);
733 if (obj->madv != I915_MADV_WILLNEED)
736 i915_gem_gtt_finish_object(obj);
738 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
739 struct page *page = sg_page_iter_page(&sg_iter);
742 set_page_dirty(page);
744 mark_page_accessed(page);
746 page_cache_release(page);
751 sg_free_table(obj->pages);
756 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
758 i915_gem_userptr_release__mmu_notifier(obj);
759 i915_gem_userptr_release__mm_struct(obj);
763 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
765 if (obj->userptr.mmu_object)
768 return i915_gem_userptr_init__mmu_notifier(obj, 0);
771 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
772 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
773 .get_pages = i915_gem_userptr_get_pages,
774 .put_pages = i915_gem_userptr_put_pages,
775 .dmabuf_export = i915_gem_userptr_dmabuf_export,
776 .release = i915_gem_userptr_release,
780 * Creates a new mm object that wraps some normal memory from the process
781 * context - user memory.
783 * We impose several restrictions upon the memory being mapped
785 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
786 * 2. It must be normal system memory, not a pointer into another map of IO
787 * space (e.g. it must not be a GTT mmapping of another object).
788 * 3. We only allow a bo as large as we could in theory map into the GTT,
789 * that is we limit the size to the total size of the GTT.
790 * 4. The bo is marked as being snoopable. The backing pages are left
791 * accessible directly by the CPU, but reads and writes by the GPU may
792 * incur the cost of a snoop (unless you have an LLC architecture).
794 * Synchronisation between multiple users and the GPU is left to userspace
795 * through the normal set-domain-ioctl. The kernel will enforce that the
796 * GPU relinquishes the VMA before it is returned back to the system
797 * i.e. upon free(), munmap() or process termination. However, the userspace
798 * malloc() library may not immediately relinquish the VMA after free() and
799 * instead reuse it whilst the GPU is still reading and writing to the VMA.
802 * Also note, that the object created here is not currently a "first class"
803 * object, in that several ioctls are banned. These are the CPU access
804 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
805 * direct access via your pointer rather than use those ioctls. Another
806 * restriction is that we do not allow userptr surfaces to be pinned to the
807 * hardware and so we reject any attempt to create a framebuffer out of a
810 * If you think this is a good interface to use to pass GPU memory between
811 * drivers, please use dma-buf instead. In fact, wherever possible use
815 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
817 struct drm_i915_gem_userptr *args = data;
818 struct drm_i915_gem_object *obj;
822 if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
823 /* We cannot support coherent userptr objects on hw without
824 * LLC and broken snooping.
829 if (args->flags & ~(I915_USERPTR_READ_ONLY |
830 I915_USERPTR_UNSYNCHRONIZED))
833 if (offset_in_page(args->user_ptr | args->user_size))
837 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
838 (char __user *)(unsigned long)args->user_ptr, args->user_size))
842 if (args->flags & I915_USERPTR_READ_ONLY) {
843 /* On almost all of the current hw, we cannot tell the GPU that a
844 * page is readonly, so this is just a placeholder in the uAPI.
849 obj = i915_gem_object_alloc(dev);
853 drm_gem_private_object_init(dev, &obj->base, args->user_size);
854 i915_gem_object_init(obj, &i915_gem_userptr_ops);
855 obj->cache_level = I915_CACHE_LLC;
856 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
857 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
859 obj->userptr.ptr = args->user_ptr;
860 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
862 /* And keep a pointer to the current->mm for resolving the user pages
863 * at binding. This means that we need to hook into the mmu_notifier
864 * in order to detect if the mmu is destroyed.
866 ret = i915_gem_userptr_init__mm_struct(obj);
868 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
870 ret = drm_gem_handle_create(file, &obj->base, &handle);
872 /* drop reference from allocate - handle holds it now */
873 drm_gem_object_unreference_unlocked(&obj->base);
877 args->handle = handle;
882 i915_gem_init_userptr(struct drm_device *dev)
884 struct drm_i915_private *dev_priv = to_i915(dev);
885 lockinit(&dev_priv->mm_lock, "i915dmm", 0, LK_CANRECURSE);
887 hash_init(dev_priv->mm_structs);