1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 * $FreeBSD: head/sys/dev/drm2/ttm/ttm_bo.c 248060 2013-03-08 18:11:02Z dumbbell $
33 #define pr_fmt(fmt) "[TTM] " fmt
35 #include <drm/ttm/ttm_module.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <linux/atomic.h>
39 #include <linux/export.h>
40 #include <linux/wait.h>
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob);
50 MALLOC_DEFINE(M_TTM_BO, "ttm_bo", "TTM Buffer Objects");
52 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
56 for (i = 0; i <= TTM_PL_PRIV5; i++)
57 if (flags & (1 << i)) {
64 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
66 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
68 kprintf(" has_type: %d\n", man->has_type);
69 kprintf(" use_type: %d\n", man->use_type);
70 kprintf(" flags: 0x%08X\n", man->flags);
71 kprintf(" gpu_offset: 0x%08lX\n", man->gpu_offset);
72 kprintf(" size: %ju\n", (uintmax_t)man->size);
73 kprintf(" available_caching: 0x%08X\n", man->available_caching);
74 kprintf(" default_caching: 0x%08X\n", man->default_caching);
75 if (mem_type != TTM_PL_SYSTEM)
76 (*man->func->debug)(man, TTM_PFX);
79 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
80 struct ttm_placement *placement)
84 kprintf("No space for %p (%lu pages, %luK, %luM)\n",
85 bo, bo->mem.num_pages, bo->mem.size >> 10,
87 for (i = 0; i < placement->num_placement; i++) {
88 ret = ttm_mem_type_from_flags(placement->placement[i],
92 kprintf(" placement[%d]=0x%08X (%d)\n",
93 i, placement->placement[i], mem_type);
94 ttm_mem_type_debug(bo->bdev, mem_type);
99 static ssize_t ttm_bo_global_show(struct ttm_bo_global *glob,
103 return snprintf(buffer, PAGE_SIZE, "%lu\n",
104 (unsigned long) atomic_read(&glob->bo_count));
108 static inline uint32_t ttm_bo_type_flags(unsigned type)
113 static void ttm_bo_release_list(struct kref *list_kref)
115 struct ttm_buffer_object *bo =
116 container_of(list_kref, struct ttm_buffer_object, list_kref);
117 struct ttm_bo_device *bdev = bo->bdev;
118 size_t acc_size = bo->acc_size;
120 BUG_ON(atomic_read(&bo->list_kref.refcount));
121 BUG_ON(atomic_read(&bo->kref.refcount));
122 BUG_ON(atomic_read(&bo->cpu_writers));
123 BUG_ON(bo->sync_obj != NULL);
124 BUG_ON(bo->mem.mm_node != NULL);
125 BUG_ON(!list_empty(&bo->lru));
126 BUG_ON(!list_empty(&bo->ddestroy));
129 ttm_tt_destroy(bo->ttm);
130 atomic_dec(&bo->glob->bo_count);
136 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
139 static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
153 while (ttm_bo_is_reserved(bo)) {
154 ret = -lksleep(bo, &bo->glob->lru_lock, 0, wmsg, 0);
161 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
163 struct ttm_bo_device *bdev = bo->bdev;
164 struct ttm_mem_type_manager *man;
166 BUG_ON(!ttm_bo_is_reserved(bo));
168 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
170 BUG_ON(!list_empty(&bo->lru));
172 man = &bdev->man[bo->mem.mem_type];
173 list_add_tail(&bo->lru, &man->lru);
174 kref_get(&bo->list_kref);
176 if (bo->ttm != NULL) {
177 list_add_tail(&bo->swap, &bo->glob->swap_lru);
178 kref_get(&bo->list_kref);
183 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
187 if (!list_empty(&bo->swap)) {
188 list_del_init(&bo->swap);
191 if (!list_empty(&bo->lru)) {
192 list_del_init(&bo->lru);
197 * TODO: Add a driver hook to delete from
198 * driver-specific LRU's here.
204 int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
206 bool no_wait, bool use_sequence, uint32_t sequence)
210 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
212 * Deadlock avoidance for multi-bo reserving.
214 if (use_sequence && bo->seq_valid) {
216 * We've already reserved this one.
218 if (unlikely(sequence == bo->val_seq))
221 * Already reserved by a thread that will not back
222 * off for us. We need to back off.
224 if (unlikely(sequence - bo->val_seq < (1U << 31)))
231 ret = ttm_bo_wait_unreserved(bo, interruptible);
238 bool wake_up = false;
240 * Wake up waiters that may need to recheck for deadlock,
241 * if we decreased the sequence number.
243 if (unlikely((bo->val_seq - sequence < (1U << 31))
248 * In the worst case with memory ordering these values can be
249 * seen in the wrong order. However since we call wake_up_all
250 * in that case, this will hopefully not pose a problem,
251 * and the worst case would only cause someone to accidentally
252 * hit -EAGAIN in ttm_bo_reserve when they see old value of
253 * val_seq. However this would only happen if seq_valid was
254 * written before val_seq was, and just means some slightly
255 * increased cpu usage
257 bo->val_seq = sequence;
258 bo->seq_valid = true;
260 wake_up_all(&bo->event_queue);
262 bo->seq_valid = false;
267 EXPORT_SYMBOL(ttm_bo_reserve);
269 static void ttm_bo_ref_bug(struct kref *list_kref)
274 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
277 kref_sub(&bo->list_kref, count,
278 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
281 int ttm_bo_reserve(struct ttm_buffer_object *bo,
283 bool no_wait, bool use_sequence, uint32_t sequence)
285 struct ttm_bo_global *glob = bo->glob;
289 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
290 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
292 if (likely(ret == 0)) {
293 put_count = ttm_bo_del_from_lru(bo);
294 lockmgr(&glob->lru_lock, LK_RELEASE);
295 ttm_bo_list_ref_sub(bo, put_count, true);
297 lockmgr(&glob->lru_lock, LK_RELEASE);
304 int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
305 bool interruptible, uint32_t sequence)
307 bool wake_up = false;
310 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
311 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
313 ret = ttm_bo_wait_unreserved(bo, interruptible);
319 if ((bo->val_seq - sequence < (1U << 31)) || !bo->seq_valid)
323 * Wake up waiters that may need to recheck for deadlock,
324 * if we decreased the sequence number.
326 bo->val_seq = sequence;
327 bo->seq_valid = true;
329 wake_up_all(&bo->event_queue);
334 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
335 bool interruptible, uint32_t sequence)
337 struct ttm_bo_global *glob = bo->glob;
340 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
341 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
343 put_count = ttm_bo_del_from_lru(bo);
344 lockmgr(&glob->lru_lock, LK_RELEASE);
345 ttm_bo_list_ref_sub(bo, put_count, true);
347 lockmgr(&glob->lru_lock, LK_RELEASE);
351 EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
353 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
355 ttm_bo_add_to_lru(bo);
356 atomic_set(&bo->reserved, 0);
357 wake_up_all(&bo->event_queue);
360 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
362 struct ttm_bo_global *glob = bo->glob;
364 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
365 ttm_bo_unreserve_locked(bo);
366 lockmgr(&glob->lru_lock, LK_RELEASE);
368 EXPORT_SYMBOL(ttm_bo_unreserve);
371 * Call bo->mutex locked.
373 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
375 struct ttm_bo_device *bdev = bo->bdev;
376 struct ttm_bo_global *glob = bo->glob;
378 uint32_t page_flags = 0;
380 TTM_ASSERT_LOCKED(&bo->mutex);
383 if (bdev->need_dma32)
384 page_flags |= TTM_PAGE_FLAG_DMA32;
387 case ttm_bo_type_device:
389 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
390 case ttm_bo_type_kernel:
391 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
392 page_flags, glob->dummy_read_page);
393 if (unlikely(bo->ttm == NULL))
397 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
398 page_flags | TTM_PAGE_FLAG_SG,
399 glob->dummy_read_page);
400 if (unlikely(bo->ttm == NULL)) {
404 bo->ttm->sg = bo->sg;
407 kprintf("[TTM] Illegal buffer object type\n");
415 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
416 struct ttm_mem_reg *mem,
417 bool evict, bool interruptible,
420 struct ttm_bo_device *bdev = bo->bdev;
421 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
422 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
423 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
424 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
427 if (old_is_pci || new_is_pci ||
428 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
429 ret = ttm_mem_io_lock(old_man, true);
430 if (unlikely(ret != 0))
432 ttm_bo_unmap_virtual_locked(bo);
433 ttm_mem_io_unlock(old_man);
437 * Create and bind a ttm if required.
440 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
441 if (bo->ttm == NULL) {
442 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
443 ret = ttm_bo_add_ttm(bo, zero);
448 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
452 if (mem->mem_type != TTM_PL_SYSTEM) {
453 ret = ttm_tt_bind(bo->ttm, mem);
458 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
459 if (bdev->driver->move_notify)
460 bdev->driver->move_notify(bo, mem);
467 if (bdev->driver->move_notify)
468 bdev->driver->move_notify(bo, mem);
470 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
471 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
472 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
473 else if (bdev->driver->move)
474 ret = bdev->driver->move(bo, evict, interruptible,
477 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
480 if (bdev->driver->move_notify) {
481 struct ttm_mem_reg tmp_mem = *mem;
484 bdev->driver->move_notify(bo, mem);
494 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
496 kprintf("[TTM] Can not flush read caches\n");
500 if (bo->mem.mm_node) {
501 bo->offset = (bo->mem.start << PAGE_SHIFT) +
502 bdev->man[bo->mem.mem_type].gpu_offset;
503 bo->cur_placement = bo->mem.placement;
510 new_man = &bdev->man[bo->mem.mem_type];
511 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
512 ttm_tt_unbind(bo->ttm);
513 ttm_tt_destroy(bo->ttm);
522 * Will release GPU memory type usage on destruction.
523 * This is the place to put in driver specific hooks to release
524 * driver private resources.
525 * Will release the bo::reserved lock.
528 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
530 if (bo->bdev->driver->move_notify)
531 bo->bdev->driver->move_notify(bo, NULL);
534 ttm_tt_unbind(bo->ttm);
535 ttm_tt_destroy(bo->ttm);
538 ttm_bo_mem_put(bo, &bo->mem);
540 atomic_set(&bo->reserved, 0);
541 wake_up_all(&bo->event_queue);
544 * Since the final reference to this bo may not be dropped by
545 * the current task we have to put a memory barrier here to make
546 * sure the changes done in this function are always visible.
548 * This function only needs protection against the final kref_put.
553 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
555 struct ttm_bo_device *bdev = bo->bdev;
556 struct ttm_bo_global *glob = bo->glob;
557 struct ttm_bo_driver *driver = bdev->driver;
558 void *sync_obj = NULL;
562 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
563 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
565 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
566 (void) ttm_bo_wait(bo, false, false, true);
567 if (!ret && !bo->sync_obj) {
568 lockmgr(&bdev->fence_lock, LK_RELEASE);
569 put_count = ttm_bo_del_from_lru(bo);
571 lockmgr(&glob->lru_lock, LK_RELEASE);
572 ttm_bo_cleanup_memtype_use(bo);
574 ttm_bo_list_ref_sub(bo, put_count, true);
579 sync_obj = driver->sync_obj_ref(bo->sync_obj);
580 lockmgr(&bdev->fence_lock, LK_RELEASE);
583 atomic_set(&bo->reserved, 0);
584 wake_up_all(&bo->event_queue);
587 kref_get(&bo->list_kref);
588 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
589 lockmgr(&glob->lru_lock, LK_RELEASE);
592 driver->sync_obj_flush(sync_obj);
593 driver->sync_obj_unref(&sync_obj);
595 schedule_delayed_work(&bdev->wq,
596 ((hz / 100) < 1) ? 1 : hz / 100);
600 * function ttm_bo_cleanup_refs_and_unlock
601 * If bo idle, remove from delayed- and lru lists, and unref.
602 * If not idle, do nothing.
604 * Must be called with lru_lock and reservation held, this function
605 * will drop both before returning.
607 * @interruptible Any sleeps should occur interruptibly.
608 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
611 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
615 struct ttm_bo_device *bdev = bo->bdev;
616 struct ttm_bo_driver *driver = bdev->driver;
617 struct ttm_bo_global *glob = bo->glob;
621 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
622 ret = ttm_bo_wait(bo, false, false, true);
624 if (ret && !no_wait_gpu) {
628 * Take a reference to the fence and unreserve,
629 * at this point the buffer should be dead, so
630 * no new sync objects can be attached.
632 sync_obj = driver->sync_obj_ref(bo->sync_obj);
633 lockmgr(&bdev->fence_lock, LK_RELEASE);
635 atomic_set(&bo->reserved, 0);
636 wake_up_all(&bo->event_queue);
637 lockmgr(&glob->lru_lock, LK_RELEASE);
639 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
640 driver->sync_obj_unref(&sync_obj);
645 * remove sync_obj with ttm_bo_wait, the wait should be
646 * finished, and no new wait object should have been added.
648 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
649 ret = ttm_bo_wait(bo, false, false, true);
651 lockmgr(&bdev->fence_lock, LK_RELEASE);
655 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
656 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
659 * We raced, and lost, someone else holds the reservation now,
660 * and is probably busy in ttm_bo_cleanup_memtype_use.
662 * Even if it's not the case, because we finished waiting any
663 * delayed destruction would succeed, so just return success
667 lockmgr(&glob->lru_lock, LK_RELEASE);
671 lockmgr(&bdev->fence_lock, LK_RELEASE);
673 if (ret || unlikely(list_empty(&bo->ddestroy))) {
674 atomic_set(&bo->reserved, 0);
675 wake_up_all(&bo->event_queue);
676 lockmgr(&glob->lru_lock, LK_RELEASE);
680 put_count = ttm_bo_del_from_lru(bo);
681 list_del_init(&bo->ddestroy);
684 lockmgr(&glob->lru_lock, LK_RELEASE);
685 ttm_bo_cleanup_memtype_use(bo);
687 ttm_bo_list_ref_sub(bo, put_count, true);
693 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
694 * encountered buffers.
697 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
699 struct ttm_bo_global *glob = bdev->glob;
700 struct ttm_buffer_object *entry = NULL;
703 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
704 if (list_empty(&bdev->ddestroy))
707 entry = list_first_entry(&bdev->ddestroy,
708 struct ttm_buffer_object, ddestroy);
709 kref_get(&entry->list_kref);
712 struct ttm_buffer_object *nentry = NULL;
714 if (entry->ddestroy.next != &bdev->ddestroy) {
715 nentry = list_first_entry(&entry->ddestroy,
716 struct ttm_buffer_object, ddestroy);
717 kref_get(&nentry->list_kref);
720 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
721 if (remove_all && ret) {
722 ret = ttm_bo_reserve_nolru(entry, false, false,
727 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
730 lockmgr(&glob->lru_lock, LK_RELEASE);
732 kref_put(&entry->list_kref, ttm_bo_release_list);
738 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
739 if (list_empty(&entry->ddestroy))
744 lockmgr(&glob->lru_lock, LK_RELEASE);
747 kref_put(&entry->list_kref, ttm_bo_release_list);
751 static void ttm_bo_delayed_workqueue(struct work_struct *work)
753 struct ttm_bo_device *bdev =
754 container_of(work, struct ttm_bo_device, wq.work);
756 if (ttm_bo_delayed_delete(bdev, false)) {
757 schedule_delayed_work(&bdev->wq,
758 ((hz / 100) < 1) ? 1 : hz / 100);
763 * NOTE: bdev->vm_lock already held on call, this function release it.
765 static void ttm_bo_release(struct kref *kref)
767 struct ttm_buffer_object *bo =
768 container_of(kref, struct ttm_buffer_object, kref);
769 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
773 if (atomic_read(&bo->kref.refcount) > 0) {
774 lockmgr(&bdev->vm_lock, LK_RELEASE);
777 if (likely(bo->vm_node != NULL)) {
778 RB_REMOVE(ttm_bo_device_buffer_objects,
779 &bdev->addr_space_rb, bo);
780 drm_mm_put_block(bo->vm_node);
785 * Should we clean up our implied list_kref? Because ttm_bo_release()
786 * can be called reentrantly due to races (this may not be true any
787 * more with the lock management changes in the deref), it is possible
788 * to get here twice, but there's only one list_kref ref to drop and
789 * in the other path 'bo' can be kfree()d by another thread the
790 * instant we release our lock.
792 release_active = test_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
793 if (release_active) {
794 clear_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
795 lockmgr(&bdev->vm_lock, LK_RELEASE);
796 ttm_mem_io_lock(man, false);
797 ttm_mem_io_free_vm(bo);
798 ttm_mem_io_unlock(man);
799 ttm_bo_cleanup_refs_or_queue(bo);
800 kref_put(&bo->list_kref, ttm_bo_release_list);
802 lockmgr(&bdev->vm_lock, LK_RELEASE);
806 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
808 struct ttm_buffer_object *bo = *p_bo;
809 struct ttm_bo_device *bdev = bo->bdev;
812 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
813 if (kref_put(&bo->kref, ttm_bo_release) == 0)
814 lockmgr(&bdev->vm_lock, LK_RELEASE);
816 EXPORT_SYMBOL(ttm_bo_unref);
818 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
820 return cancel_delayed_work_sync(&bdev->wq);
822 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
824 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
827 schedule_delayed_work(&bdev->wq,
828 ((hz / 100) < 1) ? 1 : hz / 100);
830 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
832 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
835 struct ttm_bo_device *bdev = bo->bdev;
836 struct ttm_mem_reg evict_mem;
837 struct ttm_placement placement;
840 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
841 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
842 lockmgr(&bdev->fence_lock, LK_RELEASE);
844 if (unlikely(ret != 0)) {
845 if (ret != -ERESTART) {
846 kprintf("[TTM] Failed to expire sync object before buffer eviction\n");
851 BUG_ON(!ttm_bo_is_reserved(bo));
854 evict_mem.mm_node = NULL;
855 evict_mem.bus.io_reserved_vm = false;
856 evict_mem.bus.io_reserved_count = 0;
860 placement.num_placement = 0;
861 placement.num_busy_placement = 0;
862 bdev->driver->evict_flags(bo, &placement);
863 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
866 if (ret != -ERESTART) {
867 kprintf("[TTM] Failed to find memory space for buffer 0x%p eviction\n",
869 ttm_bo_mem_space_debug(bo, &placement);
874 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
877 if (ret != -ERESTART)
878 kprintf("[TTM] Buffer eviction failed\n");
879 ttm_bo_mem_put(bo, &evict_mem);
887 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
892 struct ttm_bo_global *glob = bdev->glob;
893 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
894 struct ttm_buffer_object *bo;
895 int ret = -EBUSY, put_count;
897 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
898 list_for_each_entry(bo, &man->lru, lru) {
899 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
905 lockmgr(&glob->lru_lock, LK_RELEASE);
909 kref_get(&bo->list_kref);
911 if (!list_empty(&bo->ddestroy)) {
912 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
914 kref_put(&bo->list_kref, ttm_bo_release_list);
918 put_count = ttm_bo_del_from_lru(bo);
919 lockmgr(&glob->lru_lock, LK_RELEASE);
923 ttm_bo_list_ref_sub(bo, put_count, true);
925 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
926 ttm_bo_unreserve(bo);
928 kref_put(&bo->list_kref, ttm_bo_release_list);
932 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
934 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
937 (*man->func->put_node)(man, mem);
939 EXPORT_SYMBOL(ttm_bo_mem_put);
942 * Repeatedly evict memory from the LRU for @mem_type until we create enough
943 * space, or we've evicted everything and there isn't enough space.
945 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
947 struct ttm_placement *placement,
948 struct ttm_mem_reg *mem,
952 struct ttm_bo_device *bdev = bo->bdev;
953 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
957 ret = (*man->func->get_node)(man, bo, placement, mem);
958 if (unlikely(ret != 0))
962 ret = ttm_mem_evict_first(bdev, mem_type,
963 interruptible, no_wait_gpu);
964 if (unlikely(ret != 0))
967 if (mem->mm_node == NULL)
969 mem->mem_type = mem_type;
973 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
974 uint32_t cur_placement,
975 uint32_t proposed_placement)
977 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
978 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
981 * Keep current caching if possible.
984 if ((cur_placement & caching) != 0)
985 result |= (cur_placement & caching);
986 else if ((man->default_caching & caching) != 0)
987 result |= man->default_caching;
988 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
989 result |= TTM_PL_FLAG_CACHED;
990 else if ((TTM_PL_FLAG_WC & caching) != 0)
991 result |= TTM_PL_FLAG_WC;
992 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
993 result |= TTM_PL_FLAG_UNCACHED;
998 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
1000 uint32_t proposed_placement,
1001 uint32_t *masked_placement)
1003 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1005 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
1008 if ((proposed_placement & man->available_caching) == 0)
1011 cur_flags |= (proposed_placement & man->available_caching);
1013 *masked_placement = cur_flags;
1018 * Creates space for memory region @mem according to its type.
1020 * This function first searches for free space in compatible memory types in
1021 * the priority order defined by the driver. If free space isn't found, then
1022 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1025 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1026 struct ttm_placement *placement,
1027 struct ttm_mem_reg *mem,
1031 struct ttm_bo_device *bdev = bo->bdev;
1032 struct ttm_mem_type_manager *man;
1033 uint32_t mem_type = TTM_PL_SYSTEM;
1034 uint32_t cur_flags = 0;
1035 bool type_found = false;
1036 bool type_ok = false;
1037 bool has_erestartsys = false;
1040 mem->mm_node = NULL;
1041 for (i = 0; i < placement->num_placement; ++i) {
1042 ret = ttm_mem_type_from_flags(placement->placement[i],
1046 man = &bdev->man[mem_type];
1048 type_ok = ttm_bo_mt_compatible(man,
1050 placement->placement[i],
1056 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1059 * Use the access and other non-mapping-related flag bits from
1060 * the memory placement flags to the current flags
1062 ttm_flag_masked(&cur_flags, placement->placement[i],
1063 ~TTM_PL_MASK_MEMTYPE);
1065 if (mem_type == TTM_PL_SYSTEM)
1068 if (man->has_type && man->use_type) {
1070 ret = (*man->func->get_node)(man, bo, placement, mem);
1078 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
1079 mem->mem_type = mem_type;
1080 mem->placement = cur_flags;
1087 for (i = 0; i < placement->num_busy_placement; ++i) {
1088 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1092 man = &bdev->man[mem_type];
1095 if (!ttm_bo_mt_compatible(man,
1097 placement->busy_placement[i],
1101 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1104 * Use the access and other non-mapping-related flag bits from
1105 * the memory placement flags to the current flags
1107 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1108 ~TTM_PL_MASK_MEMTYPE);
1111 if (mem_type == TTM_PL_SYSTEM) {
1112 mem->mem_type = mem_type;
1113 mem->placement = cur_flags;
1114 mem->mm_node = NULL;
1118 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1119 interruptible, no_wait_gpu);
1120 if (ret == 0 && mem->mm_node) {
1121 mem->placement = cur_flags;
1124 if (ret == -ERESTART)
1125 has_erestartsys = true;
1127 ret = (has_erestartsys) ? -ERESTART : -ENOMEM;
1130 EXPORT_SYMBOL(ttm_bo_mem_space);
1133 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1134 struct ttm_placement *placement,
1139 struct ttm_mem_reg mem;
1140 struct ttm_bo_device *bdev = bo->bdev;
1142 BUG_ON(!ttm_bo_is_reserved(bo));
1145 * FIXME: It's possible to pipeline buffer moves.
1146 * Have the driver move function wait for idle when necessary,
1147 * instead of doing it here.
1149 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1150 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1151 lockmgr(&bdev->fence_lock, LK_RELEASE);
1154 mem.num_pages = bo->num_pages;
1155 mem.size = mem.num_pages << PAGE_SHIFT;
1156 mem.page_alignment = bo->mem.page_alignment;
1157 mem.bus.io_reserved_vm = false;
1158 mem.bus.io_reserved_count = 0;
1160 * Determine where to move the buffer.
1162 ret = ttm_bo_mem_space(bo, placement, &mem,
1163 interruptible, no_wait_gpu);
1166 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1167 interruptible, no_wait_gpu);
1169 if (ret && mem.mm_node)
1170 ttm_bo_mem_put(bo, &mem);
1174 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1175 struct ttm_mem_reg *mem)
1179 if (mem->mm_node && placement->lpfn != 0 &&
1180 (mem->start < placement->fpfn ||
1181 mem->start + mem->num_pages > placement->lpfn))
1184 for (i = 0; i < placement->num_placement; i++) {
1185 if ((placement->placement[i] & mem->placement &
1186 TTM_PL_MASK_CACHING) &&
1187 (placement->placement[i] & mem->placement &
1194 int ttm_bo_validate(struct ttm_buffer_object *bo,
1195 struct ttm_placement *placement,
1201 BUG_ON(!ttm_bo_is_reserved(bo));
1202 /* Check that range is valid */
1203 if (placement->lpfn || placement->fpfn)
1204 if (placement->fpfn > placement->lpfn ||
1205 (placement->lpfn - placement->fpfn) < bo->num_pages)
1208 * Check whether we need to move buffer.
1210 ret = ttm_bo_mem_compat(placement, &bo->mem);
1212 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1218 * Use the access and other non-mapping-related flag bits from
1219 * the compatible memory placement flags to the active flags
1221 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1222 ~TTM_PL_MASK_MEMTYPE);
1225 * We might need to add a TTM.
1227 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1228 ret = ttm_bo_add_ttm(bo, true);
1234 EXPORT_SYMBOL(ttm_bo_validate);
1236 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1237 struct ttm_placement *placement)
1239 BUG_ON((placement->fpfn || placement->lpfn) &&
1240 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1245 int ttm_bo_init(struct ttm_bo_device *bdev,
1246 struct ttm_buffer_object *bo,
1248 enum ttm_bo_type type,
1249 struct ttm_placement *placement,
1250 uint32_t page_alignment,
1252 struct vm_object *persistent_swap_storage,
1254 struct sg_table *sg,
1255 void (*destroy) (struct ttm_buffer_object *))
1258 unsigned long num_pages;
1259 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1261 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1263 kprintf("[TTM] Out of kernel memory\n");
1267 kfree(bo, M_TTM_BO);
1271 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1272 if (num_pages == 0) {
1273 kprintf("[TTM] Illegal buffer object size\n");
1277 kfree(bo, M_TTM_BO);
1278 ttm_mem_global_free(mem_glob, acc_size);
1281 bo->destroy = destroy;
1283 kref_init(&bo->kref);
1284 kref_init(&bo->list_kref);
1285 atomic_set(&bo->cpu_writers, 0);
1286 atomic_set(&bo->reserved, 1);
1287 init_waitqueue_head(&bo->event_queue);
1288 INIT_LIST_HEAD(&bo->lru);
1289 INIT_LIST_HEAD(&bo->ddestroy);
1290 INIT_LIST_HEAD(&bo->swap);
1291 INIT_LIST_HEAD(&bo->io_reserve_lru);
1292 /*bzero(&bo->vm_rb, sizeof(bo->vm_rb));*/
1294 bo->glob = bdev->glob;
1296 bo->num_pages = num_pages;
1297 bo->mem.size = num_pages << PAGE_SHIFT;
1298 bo->mem.mem_type = TTM_PL_SYSTEM;
1299 bo->mem.num_pages = bo->num_pages;
1300 bo->mem.mm_node = NULL;
1301 bo->mem.page_alignment = page_alignment;
1302 bo->mem.bus.io_reserved_vm = false;
1303 bo->mem.bus.io_reserved_count = 0;
1305 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1306 bo->seq_valid = false;
1307 bo->persistent_swap_storage = persistent_swap_storage;
1308 bo->acc_size = acc_size;
1310 atomic_inc(&bo->glob->bo_count);
1313 * Mirror ref from kref_init() for list_kref.
1315 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &bo->priv_flags);
1317 ret = ttm_bo_check_placement(bo, placement);
1318 if (unlikely(ret != 0))
1322 * For ttm_bo_type_device buffers, allocate
1323 * address space from the device.
1325 if (bo->type == ttm_bo_type_device ||
1326 bo->type == ttm_bo_type_sg) {
1327 ret = ttm_bo_setup_vm(bo);
1332 ret = ttm_bo_validate(bo, placement, interruptible, false);
1336 ttm_bo_unreserve(bo);
1340 ttm_bo_unreserve(bo);
1345 EXPORT_SYMBOL(ttm_bo_init);
1347 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1348 unsigned long bo_size,
1349 unsigned struct_size)
1351 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1354 size += ttm_round_pot(struct_size);
1355 size += PAGE_ALIGN(npages * sizeof(void *));
1356 size += ttm_round_pot(sizeof(struct ttm_tt));
1359 EXPORT_SYMBOL(ttm_bo_acc_size);
1361 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1362 unsigned long bo_size,
1363 unsigned struct_size)
1365 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1368 size += ttm_round_pot(struct_size);
1369 size += PAGE_ALIGN(npages * sizeof(void *));
1370 size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1371 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1374 EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1376 int ttm_bo_create(struct ttm_bo_device *bdev,
1378 enum ttm_bo_type type,
1379 struct ttm_placement *placement,
1380 uint32_t page_alignment,
1382 struct vm_object *persistent_swap_storage,
1383 struct ttm_buffer_object **p_bo)
1385 struct ttm_buffer_object *bo;
1390 bo = kmalloc(sizeof(*bo), M_TTM_BO, M_WAITOK | M_ZERO);
1391 if (unlikely(bo == NULL))
1394 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1395 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1396 interruptible, persistent_swap_storage, acc_size,
1398 if (likely(ret == 0))
1403 EXPORT_SYMBOL(ttm_bo_create);
1405 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1406 unsigned mem_type, bool allow_errors)
1408 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1409 struct ttm_bo_global *glob = bdev->glob;
1413 * Can't use standard list traversal since we're unlocking.
1416 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1417 while (!list_empty(&man->lru)) {
1418 lockmgr(&glob->lru_lock, LK_RELEASE);
1419 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1424 kprintf("[TTM] Cleanup eviction failed\n");
1427 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1429 lockmgr(&glob->lru_lock, LK_RELEASE);
1433 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1435 struct ttm_mem_type_manager *man;
1438 if (mem_type >= TTM_NUM_MEM_TYPES) {
1439 kprintf("[TTM] Illegal memory type %d\n", mem_type);
1442 man = &bdev->man[mem_type];
1444 if (!man->has_type) {
1445 kprintf("[TTM] Trying to take down uninitialized memory manager type %u\n",
1450 man->use_type = false;
1451 man->has_type = false;
1455 ttm_bo_force_list_clean(bdev, mem_type, false);
1457 ret = (*man->func->takedown)(man);
1462 EXPORT_SYMBOL(ttm_bo_clean_mm);
1464 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1466 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1468 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1469 kprintf("[TTM] Illegal memory manager memory type %u\n", mem_type);
1473 if (!man->has_type) {
1474 kprintf("[TTM] Memory type %u has not been initialized\n", mem_type);
1478 return ttm_bo_force_list_clean(bdev, mem_type, true);
1480 EXPORT_SYMBOL(ttm_bo_evict_mm);
1482 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1483 unsigned long p_size)
1486 struct ttm_mem_type_manager *man;
1488 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1489 man = &bdev->man[type];
1490 BUG_ON(man->has_type);
1491 man->io_reserve_fastpath = true;
1492 man->use_io_reserve_lru = false;
1493 lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE);
1494 INIT_LIST_HEAD(&man->io_reserve_lru);
1496 ret = bdev->driver->init_mem_type(bdev, type, man);
1502 if (type != TTM_PL_SYSTEM) {
1503 ret = (*man->func->init)(man, p_size);
1507 man->has_type = true;
1508 man->use_type = true;
1511 INIT_LIST_HEAD(&man->lru);
1515 EXPORT_SYMBOL(ttm_bo_init_mm);
1517 static void ttm_bo_global_kobj_release(struct ttm_bo_global *glob)
1519 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1520 vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1521 glob->dummy_read_page = NULL;
1523 vm_page_free(glob->dummy_read_page);
1527 void ttm_bo_global_release(struct drm_global_reference *ref)
1529 struct ttm_bo_global *glob = ref->object;
1531 if (refcount_release(&glob->kobj_ref))
1532 ttm_bo_global_kobj_release(glob);
1534 EXPORT_SYMBOL(ttm_bo_global_release);
1536 int ttm_bo_global_init(struct drm_global_reference *ref)
1538 struct ttm_bo_global_ref *bo_ref =
1539 container_of(ref, struct ttm_bo_global_ref, ref);
1540 struct ttm_bo_global *glob = ref->object;
1543 lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE);
1544 lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE);
1545 glob->mem_glob = bo_ref->mem_glob;
1546 glob->dummy_read_page = vm_page_alloc_contig(
1547 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, 1*PAGE_SIZE, VM_MEMATTR_UNCACHEABLE);
1549 if (unlikely(glob->dummy_read_page == NULL)) {
1554 INIT_LIST_HEAD(&glob->swap_lru);
1555 INIT_LIST_HEAD(&glob->device_list);
1557 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1558 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1559 if (unlikely(ret != 0)) {
1560 kprintf("[TTM] Could not register buffer object swapout\n");
1564 atomic_set(&glob->bo_count, 0);
1566 refcount_init(&glob->kobj_ref, 1);
1570 vm_page_free_contig(glob->dummy_read_page, PAGE_SIZE);
1571 glob->dummy_read_page = NULL;
1573 vm_page_free(glob->dummy_read_page);
1576 kfree(glob, M_DRM_GLOBAL);
1579 EXPORT_SYMBOL(ttm_bo_global_init);
1582 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1585 unsigned i = TTM_NUM_MEM_TYPES;
1586 struct ttm_mem_type_manager *man;
1587 struct ttm_bo_global *glob = bdev->glob;
1590 man = &bdev->man[i];
1591 if (man->has_type) {
1592 man->use_type = false;
1593 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1595 kprintf("[TTM] DRM memory manager type %d is not clean\n",
1598 man->has_type = false;
1602 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1603 list_del(&bdev->device_list);
1604 lockmgr(&glob->device_list_mutex, LK_RELEASE);
1606 cancel_delayed_work_sync(&bdev->wq);
1608 while (ttm_bo_delayed_delete(bdev, true))
1611 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1612 if (list_empty(&bdev->ddestroy))
1613 TTM_DEBUG("Delayed destroy list was clean\n");
1615 if (list_empty(&bdev->man[0].lru))
1616 TTM_DEBUG("Swap list was clean\n");
1617 lockmgr(&glob->lru_lock, LK_RELEASE);
1619 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1620 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1621 drm_mm_takedown(&bdev->addr_space_mm);
1622 lockmgr(&bdev->vm_lock, LK_RELEASE);
1626 EXPORT_SYMBOL(ttm_bo_device_release);
1628 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1629 struct ttm_bo_global *glob,
1630 struct ttm_bo_driver *driver,
1631 uint64_t file_page_offset,
1636 lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE);
1637 bdev->driver = driver;
1639 memset(bdev->man, 0, sizeof(bdev->man));
1642 * Initialize the system memory buffer type.
1643 * Other types need to be driver / IOCTL initialized.
1645 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1646 if (unlikely(ret != 0))
1649 RB_INIT(&bdev->addr_space_rb);
1650 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1651 if (unlikely(ret != 0))
1652 goto out_no_addr_mm;
1654 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1655 INIT_LIST_HEAD(&bdev->ddestroy);
1656 bdev->dev_mapping = NULL;
1658 bdev->need_dma32 = need_dma32;
1660 lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE);
1661 lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE);
1662 list_add_tail(&bdev->device_list, &glob->device_list);
1663 lockmgr(&glob->device_list_mutex, LK_RELEASE);
1667 ttm_bo_clean_mm(bdev, 0);
1671 EXPORT_SYMBOL(ttm_bo_device_init);
1674 * buffer object vm functions.
1677 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1679 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1681 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1682 if (mem->mem_type == TTM_PL_SYSTEM)
1685 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1688 if (mem->placement & TTM_PL_FLAG_CACHED)
1694 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1697 ttm_bo_release_mmap(bo);
1698 ttm_mem_io_free_vm(bo);
1701 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1703 struct ttm_bo_device *bdev = bo->bdev;
1704 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1706 ttm_mem_io_lock(man, false);
1707 ttm_bo_unmap_virtual_locked(bo);
1708 ttm_mem_io_unlock(man);
1712 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1714 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1716 struct ttm_bo_device *bdev = bo->bdev;
1718 /* The caller acquired bdev->vm_lock. */
1719 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1725 * @bo: the buffer to allocate address space for
1727 * Allocate address space in the drm device so that applications
1728 * can mmap the buffer and access the contents. This only
1729 * applies to ttm_bo_type_device objects as others are not
1730 * placed in the drm device address space.
1733 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1735 struct ttm_bo_device *bdev = bo->bdev;
1739 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1740 if (unlikely(ret != 0))
1743 lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
1744 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1745 bo->mem.num_pages, 0, 0);
1747 if (unlikely(bo->vm_node == NULL)) {
1752 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1753 bo->mem.num_pages, 0);
1755 if (unlikely(bo->vm_node == NULL)) {
1756 lockmgr(&bdev->vm_lock, LK_RELEASE);
1760 ttm_bo_vm_insert_rb(bo);
1761 lockmgr(&bdev->vm_lock, LK_RELEASE);
1762 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1766 lockmgr(&bdev->vm_lock, LK_RELEASE);
1770 int ttm_bo_wait(struct ttm_buffer_object *bo,
1771 bool lazy, bool interruptible, bool no_wait)
1773 struct ttm_bo_driver *driver = bo->bdev->driver;
1774 struct ttm_bo_device *bdev = bo->bdev;
1778 if (likely(bo->sync_obj == NULL))
1781 while (bo->sync_obj) {
1783 if (driver->sync_obj_signaled(bo->sync_obj)) {
1784 void *tmp_obj = bo->sync_obj;
1785 bo->sync_obj = NULL;
1786 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1787 lockmgr(&bdev->fence_lock, LK_RELEASE);
1788 driver->sync_obj_unref(&tmp_obj);
1789 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1796 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1797 lockmgr(&bdev->fence_lock, LK_RELEASE);
1798 ret = driver->sync_obj_wait(sync_obj,
1799 lazy, interruptible);
1800 if (unlikely(ret != 0)) {
1801 driver->sync_obj_unref(&sync_obj);
1802 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1805 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1806 if (likely(bo->sync_obj == sync_obj)) {
1807 void *tmp_obj = bo->sync_obj;
1808 bo->sync_obj = NULL;
1809 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1811 lockmgr(&bdev->fence_lock, LK_RELEASE);
1812 driver->sync_obj_unref(&sync_obj);
1813 driver->sync_obj_unref(&tmp_obj);
1814 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1816 lockmgr(&bdev->fence_lock, LK_RELEASE);
1817 driver->sync_obj_unref(&sync_obj);
1818 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1823 EXPORT_SYMBOL(ttm_bo_wait);
1825 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1827 struct ttm_bo_device *bdev = bo->bdev;
1831 * Using ttm_bo_reserve makes sure the lru lists are updated.
1834 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1835 if (unlikely(ret != 0))
1837 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
1838 ret = ttm_bo_wait(bo, false, true, no_wait);
1839 lockmgr(&bdev->fence_lock, LK_RELEASE);
1840 if (likely(ret == 0))
1841 atomic_inc(&bo->cpu_writers);
1842 ttm_bo_unreserve(bo);
1845 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1847 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1849 atomic_dec(&bo->cpu_writers);
1851 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1854 * A buffer object shrink method that tries to swap out the first
1855 * buffer object on the bo_global::swap_lru list.
1858 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1860 struct ttm_bo_global *glob =
1861 container_of(shrink, struct ttm_bo_global, shrink);
1862 struct ttm_buffer_object *bo;
1865 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1867 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
1868 list_for_each_entry(bo, &glob->swap_lru, swap) {
1869 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1875 lockmgr(&glob->lru_lock, LK_RELEASE);
1879 kref_get(&bo->list_kref);
1881 if (!list_empty(&bo->ddestroy)) {
1882 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1883 kref_put(&bo->list_kref, ttm_bo_release_list);
1887 put_count = ttm_bo_del_from_lru(bo);
1888 lockmgr(&glob->lru_lock, LK_RELEASE);
1890 ttm_bo_list_ref_sub(bo, put_count, true);
1893 * Wait for GPU, then move to system cached.
1896 lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE);
1897 ret = ttm_bo_wait(bo, false, false, false);
1898 lockmgr(&bo->bdev->fence_lock, LK_RELEASE);
1900 if (unlikely(ret != 0))
1903 if ((bo->mem.placement & swap_placement) != swap_placement) {
1904 struct ttm_mem_reg evict_mem;
1906 evict_mem = bo->mem;
1907 evict_mem.mm_node = NULL;
1908 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1909 evict_mem.mem_type = TTM_PL_SYSTEM;
1911 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1913 if (unlikely(ret != 0))
1917 ttm_bo_unmap_virtual(bo);
1920 * Swap out. Buffer will be swapped in again as soon as
1921 * anyone tries to access a ttm page.
1924 if (bo->bdev->driver->swap_notify)
1925 bo->bdev->driver->swap_notify(bo);
1927 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1932 * Unreserve without putting on LRU to avoid swapping out an
1933 * already swapped buffer.
1936 atomic_set(&bo->reserved, 0);
1937 wake_up_all(&bo->event_queue);
1938 kref_put(&bo->list_kref, ttm_bo_release_list);
1942 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1944 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1947 EXPORT_SYMBOL(ttm_bo_swapout_all);