2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 * Copyright (c) 2011 The FreeBSD Foundation
29 * All rights reserved.
31 * This software was developed by Konstantin Belousov under sponsorship from
32 * the FreeBSD Foundation.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/limits.h>
62 #include <sys/mutex.h>
66 #include <vm/vm_page.h>
68 #include <linux/types.h>
70 #include <linux/module.h>
72 #include <drm/drm_vma_manager.h>
76 * This file provides some of the base ioctls and library routines for
77 * the graphics memory manager implemented by each device driver.
79 * Because various devices have different requirements in terms of
80 * synchronization and migration strategies, implementing that is left up to
81 * the driver, and all that the general API provides should be generic --
82 * allocating objects, reading/writing data with the cpu, freeing objects.
83 * Even there, platform-dependent optimizations for reading/writing data with
84 * the CPU mean we'll likely hook those out to driver-specific calls. However,
85 * the DRI2 implementation wants to have at least allocate/mmap be generic.
87 * The goal was to have swap-backed object allocation managed through
88 * struct file. However, file descriptors as handles to a struct file have
90 * - Process limits prevent more than 1024 or so being used at a time by
92 * - Inability to allocate high fds will aggravate the X Server's select()
93 * handling, and likely that of many GL client applications as well.
95 * This led to a plan of using our own integer IDs (called handles, following
96 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
97 * ioctls. The objects themselves will still include the struct file so
98 * that we can transition to fds if the required kernel infrastructure shows
99 * up at a later date, and as our interface with shmfs for memory allocation.
103 * We make up offsets for buffer objects so we can recognize them at
107 /* pgoff in mmap is an unsigned long, so we need to make sure that
108 * the faked up offset will fit
111 #if BITS_PER_LONG == 64
112 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
113 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
115 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
116 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
120 * Initialize the GEM device fields
124 drm_gem_init(struct drm_device *dev)
126 struct drm_gem_mm *mm;
128 lockinit(&dev->object_name_lock, "objnam", 0, LK_CANRECURSE);
129 idr_init(&dev->object_name_idr);
131 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
133 DRM_ERROR("out of memory\n");
137 dev->mm_private = mm;
139 if (drm_ht_create(&mm->offset_hash, 12)) {
144 mm->idxunr = new_unrhdr(0, DRM_GEM_MAX_IDX, NULL);
145 drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
146 DRM_FILE_PAGE_OFFSET_SIZE);
147 drm_vma_offset_manager_init(&mm->vma_manager,
148 DRM_FILE_PAGE_OFFSET_START,
149 DRM_FILE_PAGE_OFFSET_SIZE);
154 drm_gem_destroy(struct drm_device *dev)
156 struct drm_gem_mm *mm = dev->mm_private;
158 drm_mm_takedown(&mm->offset_manager);
159 drm_ht_remove(&mm->offset_hash);
161 drm_vma_offset_manager_destroy(&mm->vma_manager);
162 delete_unrhdr(mm->idxunr);
164 dev->mm_private = NULL;
168 * Initialize an already allocated GEM object of the specified size with
169 * shmfs backing store.
171 int drm_gem_object_init(struct drm_device *dev,
172 struct drm_gem_object *obj, size_t size)
174 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
177 obj->vm_obj = default_pager_alloc(NULL, size,
178 VM_PROT_READ | VM_PROT_WRITE, 0);
180 kref_init(&obj->refcount);
181 atomic_set(&obj->handle_count, 0);
186 EXPORT_SYMBOL(drm_gem_object_init);
189 * Initialize an already allocated GEM object of the specified size with
190 * no GEM provided backing store. Instead the caller is responsible for
191 * backing the object and handling it.
193 void drm_gem_private_object_init(struct drm_device *dev,
194 struct drm_gem_object *obj, size_t size)
196 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
201 kref_init(&obj->refcount);
202 atomic_set(&obj->handle_count, 0);
204 drm_vma_node_reset(&obj->vma_node);
206 EXPORT_SYMBOL(drm_gem_private_object_init);
209 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
212 if (obj->import_attach) {
213 drm_prime_remove_buf_handle(&filp->prime,
214 obj->import_attach->dmabuf);
218 * Note: obj->dma_buf can't disappear as long as we still hold a
219 * handle reference in obj->handle_count.
222 drm_prime_remove_buf_handle(&filp->prime,
229 * Called after the last handle to the object has been closed
231 * Removes any name for the object. Note that this must be
232 * called before drm_gem_object_free or we'll be touching
235 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
237 struct drm_device *dev = obj->dev;
239 /* Remove any name for this object */
241 idr_remove(&dev->object_name_idr, obj->name);
247 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
249 /* Unbreak the reference cycle if we have an exported dma_buf. */
251 dma_buf_put(obj->dma_buf);
258 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
260 if (WARN_ON(atomic_read(&obj->handle_count) == 0))
264 * Must bump handle count first as this may be the last
265 * ref, in which case the object would disappear before we
269 if (atomic_dec_and_test(&obj->handle_count))
270 drm_gem_object_handle_free(obj);
271 drm_gem_object_unreference_unlocked(obj);
275 * Removes the mapping from handle to filp for this object.
278 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
280 struct drm_device *dev;
281 struct drm_gem_object *obj;
283 /* This is gross. The idr system doesn't let us try a delete and
284 * return an error code. It just spews if you fail at deleting.
285 * So, we have to grab a lock around finding the object and then
286 * doing the delete on it and dropping the refcount, or the user
287 * could race us to double-decrement the refcount and cause a
288 * use-after-free later. Given the frequency of our handle lookups,
289 * we may want to use ida for number allocation and a hash table
290 * for the pointers, anyway.
292 lockmgr(&filp->table_lock, LK_EXCLUSIVE);
294 /* Check if we currently have a reference on the object */
295 obj = idr_find(&filp->object_idr, handle);
297 lockmgr(&filp->table_lock, LK_RELEASE);
302 /* Release reference and decrement refcount. */
303 idr_remove(&filp->object_idr, handle);
304 lockmgr(&filp->table_lock, LK_RELEASE);
306 drm_gem_remove_prime_handles(obj, filp);
308 if (dev->driver->gem_close_object)
309 dev->driver->gem_close_object(obj, filp);
310 drm_gem_object_handle_unreference_unlocked(obj);
314 EXPORT_SYMBOL(drm_gem_handle_delete);
317 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
319 * This implements the ->dumb_destroy kms driver callback for drivers which use
320 * gem to manage their backing storage.
322 int drm_gem_dumb_destroy(struct drm_file *file,
323 struct drm_device *dev,
326 return drm_gem_handle_delete(file, handle);
328 EXPORT_SYMBOL(drm_gem_dumb_destroy);
331 * Create a handle for this object. This adds a handle reference
332 * to the object, which includes a regular reference count. Callers
333 * will likely want to dereference the object afterwards.
336 drm_gem_handle_create(struct drm_file *file_priv,
337 struct drm_gem_object *obj,
340 struct drm_device *dev = obj->dev;
344 * Get the user-visible handle using idr.
347 /* ensure there is space available to allocate a handle */
348 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
351 /* do the allocation under our spinlock */
352 lockmgr(&file_priv->table_lock, LK_EXCLUSIVE);
353 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
354 lockmgr(&file_priv->table_lock, LK_RELEASE);
360 drm_gem_object_handle_reference(obj);
362 if (dev->driver->gem_open_object) {
363 ret = dev->driver->gem_open_object(obj, file_priv);
365 drm_gem_handle_delete(file_priv, *handlep);
372 EXPORT_SYMBOL(drm_gem_handle_create);
376 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
377 * @obj: obj in question
379 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
382 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
384 struct drm_device *dev = obj->dev;
385 struct drm_gem_mm *mm = dev->mm_private;
386 struct drm_hash_item *list;
390 list = &obj->map_list;
392 drm_ht_remove_item(&mm->offset_hash, list);
393 free_unr(mm->idxunr, list->key);
396 drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
398 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
401 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
402 * @obj: obj in question
403 * @size: the virtual size
405 * GEM memory mapping works by handing back to userspace a fake mmap offset
406 * it can use in a subsequent mmap(2) call. The DRM core code then looks
407 * up the object based on the offset and sets up the various memory mapping
410 * This routine allocates and attaches a fake offset for @obj, in cases where
411 * the virtual size differs from the physical size (ie. obj->size). Otherwise
412 * just use drm_gem_create_mmap_offset().
415 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
417 struct drm_device *dev = obj->dev;
418 struct drm_gem_mm *mm = dev->mm_private;
424 obj->map_list.key = alloc_unr(mm->idxunr);
425 ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
427 DRM_ERROR("failed to add to map hash\n");
428 free_unr(mm->idxunr, obj->map_list.key);
434 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
437 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
440 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
441 * @obj: obj in question
443 * GEM memory mapping works by handing back to userspace a fake mmap offset
444 * it can use in a subsequent mmap(2) call. The DRM core code then looks
445 * up the object based on the offset and sets up the various memory mapping
448 * This routine allocates and attaches a fake offset for @obj.
450 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
452 return drm_gem_create_mmap_offset_size(obj, obj->size);
454 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
456 /** Returns a reference to the object named by the handle. */
457 struct drm_gem_object *
458 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
461 struct drm_gem_object *obj;
463 lockmgr(&filp->table_lock, LK_EXCLUSIVE);
465 /* Check if we currently have a reference on the object */
466 obj = idr_find(&filp->object_idr, handle);
468 lockmgr(&filp->table_lock, LK_RELEASE);
472 drm_gem_object_reference(obj);
474 lockmgr(&filp->table_lock, LK_RELEASE);
478 EXPORT_SYMBOL(drm_gem_object_lookup);
481 * Releases the handle to an mm object.
484 drm_gem_close_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
487 struct drm_gem_close *args = data;
489 if (!drm_core_check_feature(dev, DRIVER_GEM))
492 return (drm_gem_handle_delete(file_priv, args->handle));
496 * Create a global name for an object, returning the name.
498 * Note that the name does not hold a reference; when the object
499 * is freed, the name goes away.
502 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
503 struct drm_file *file_priv)
505 struct drm_gem_flink *args = data;
506 struct drm_gem_object *obj;
509 if (!drm_core_check_feature(dev, DRIVER_GEM))
512 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
517 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
522 lockmgr(&dev->object_name_lock, LK_EXCLUSIVE);
524 ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
526 args->name = (uint64_t) obj->name;
527 lockmgr(&dev->object_name_lock, LK_RELEASE);
535 args->name = (uint64_t) obj->name;
536 lockmgr(&dev->object_name_lock, LK_RELEASE);
541 drm_gem_object_unreference_unlocked(obj);
546 * Open an object using the global name, returning a handle and the size.
548 * This handle (of course) holds a reference to the object, so the object
549 * will not go away until the handle is deleted.
552 drm_gem_open_ioctl(struct drm_device *dev, void *data,
553 struct drm_file *file_priv)
555 struct drm_gem_open *args = data;
556 struct drm_gem_object *obj;
560 if (!(dev->driver->driver_features & DRIVER_GEM))
563 lockmgr(&dev->object_name_lock, LK_EXCLUSIVE);
564 obj = idr_find(&dev->object_name_idr, (int) args->name);
566 drm_gem_object_reference(obj);
567 lockmgr(&dev->object_name_lock, LK_RELEASE);
571 ret = drm_gem_handle_create(file_priv, obj, &handle);
572 drm_gem_object_unreference_unlocked(obj);
576 args->handle = handle;
577 args->size = obj->size;
583 * Called at device open time, sets up the structure for handling refcounting
587 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
589 idr_init(&file_private->object_idr);
590 lockinit(&file_private->table_lock, "fptab", 0, LK_CANRECURSE);
594 * Called at device close to release the file's
595 * handle references on objects.
598 drm_gem_object_release_handle(int id, void *ptr, void *data)
600 struct drm_file *file_priv = data;
601 struct drm_gem_object *obj = ptr;
602 struct drm_device *dev = obj->dev;
604 drm_gem_remove_prime_handles(obj, file_priv);
606 if (dev->driver->gem_close_object)
607 dev->driver->gem_close_object(obj, file_priv);
609 drm_gem_object_handle_unreference_unlocked(obj);
615 * Called at close time when the filp is going away.
617 * Releases any remaining references on objects by this filp.
620 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
622 idr_for_each(&file_private->object_idr,
623 &drm_gem_object_release_handle, file_private);
624 idr_destroy(&file_private->object_idr);
628 drm_gem_object_release(struct drm_gem_object *obj)
632 * obj->vm_obj can be NULL for private gem objects.
634 vm_object_deallocate(obj->vm_obj);
636 EXPORT_SYMBOL(drm_gem_object_release);
639 * Called after the last reference to the object has been lost.
640 * Must be called holding struct_ mutex
645 drm_gem_object_free(struct kref *kref)
647 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
648 struct drm_device *dev = obj->dev;
650 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
652 if (dev->driver->gem_free_object != NULL)
653 dev->driver->gem_free_object(obj);
655 EXPORT_SYMBOL(drm_gem_object_free);
657 static struct drm_gem_object *
658 drm_gem_object_from_offset(struct drm_device *dev, vm_ooffset_t offset)
660 struct drm_gem_object *obj;
661 struct drm_gem_mm *mm = dev->mm_private;
662 struct drm_hash_item *hash;
664 if ((offset & DRM_GEM_MAPPING_MASK) != DRM_GEM_MAPPING_KEY)
666 offset &= ~DRM_GEM_MAPPING_KEY;
668 if (drm_ht_find_item(&mm->offset_hash, DRM_GEM_MAPPING_IDX(offset),
672 obj = container_of(hash, struct drm_gem_object, map_list);
677 drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size,
678 struct vm_object **obj_res, int nprot)
680 struct drm_gem_object *gem_obj;
681 struct vm_object *vm_obj;
684 gem_obj = drm_gem_object_from_offset(dev, *offset);
685 if (gem_obj == NULL) {
690 drm_gem_object_reference(gem_obj);
692 vm_obj = cdev_pager_allocate(gem_obj, OBJT_MGTDEVICE,
693 dev->driver->gem_pager_ops, size, nprot,
694 DRM_GEM_MAPPING_MAPOFF(*offset), curthread->td_ucred);
695 if (vm_obj == NULL) {
696 drm_gem_object_unreference_unlocked(gem_obj);
699 *offset = DRM_GEM_MAPPING_MAPOFF(*offset);
705 drm_gem_pager_dtr(void *handle)
707 struct drm_gem_object *obj;
708 struct drm_device *dev;
714 drm_gem_free_mmap_offset(obj);
715 drm_gem_object_unreference(obj);