drm/ttm: convert to the reservation api
authorFrançois Tigeot <ftigeot@wolfpond.org>
Mon, 11 Mar 2019 22:00:40 +0000 (23:00 +0100)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Mon, 11 Mar 2019 22:00:40 +0000 (23:00 +0100)
Additional-bugfix-by: dillon
sys/dev/drm/include/drm/ttm/ttm_bo_api.h
sys/dev/drm/include/drm/ttm/ttm_bo_driver.h
sys/dev/drm/include/drm/ttm/ttm_execbuf_util.h
sys/dev/drm/ttm/ttm_bo.c
sys/dev/drm/ttm/ttm_bo_util.c
sys/dev/drm/ttm/ttm_execbuf_util.c

index aae6dd5..0576ca2 100644 (file)
@@ -220,8 +220,6 @@ struct ttm_buffer_object {
        struct kref kref;
        struct kref list_kref;
 
-       wait_queue_head_t event_queue;
-
        /**
         * Members protected by the bo::reserved lock.
         */
@@ -245,15 +243,6 @@ struct ttm_buffer_object {
        struct list_head ddestroy;
        struct list_head swap;
        struct list_head io_reserve_lru;
-       unsigned long val_seq;
-       bool seq_valid;
-
-       /**
-        * Members protected by the bdev::lru_lock
-        * only when written to.
-        */
-
-       atomic_t reserved;
 
        /**
         * Members protected by struct buffer_object_device::fence_lock
@@ -283,6 +272,9 @@ struct ttm_buffer_object {
        uint32_t cur_placement;
 
        struct sg_table *sg;
+
+       struct reservation_object *resv;
+       struct reservation_object ttm_resv;
 };
 
 /**
@@ -733,7 +725,7 @@ extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
  */
 static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
 {
-       return atomic_read(&bo->reserved);
+       return ww_mutex_is_locked(&bo->resv->lock);
 }
 
 #endif
index 186fffc..5e3f836 100644 (file)
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_memory.h>
 #include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
 #include <drm/drm_mm.h>
 #include <drm/drm_global.h>
-#include <sys/tree.h>
+#include <drm/drm_vma_manager.h>
 #include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/spinlock.h>
 #include <linux/reservation.h>
 
+#include <sys/tree.h>
+
 /* XXX nasty hack, but does the job */
 #undef RB_ROOT
 #define        RB_ROOT(head)   (head)->rbh_root
index d84087a..a7d1937 100644 (file)
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* $FreeBSD: head/sys/dev/drm2/ttm/ttm_execbuf_util.h 247835 2013-03-05 09:49:34Z kib $ */
 
 #ifndef _TTM_EXECBUF_UTIL_H_
 #define _TTM_EXECBUF_UTIL_H_
 
 #include <drm/ttm/ttm_bo_api.h>
-#include <linux/reservation.h>
+#include <linux/list.h>
 
 /**
  * struct ttm_validate_buffer
index 64008d8..7c42f2c 100644 (file)
@@ -150,6 +150,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
        if (bo->ttm)
                ttm_tt_destroy(bo->ttm);
        atomic_dec(&bo->glob->bo_count);
+       if (bo->resv == &bo->ttm_resv)
+               reservation_object_fini(&bo->ttm_resv);
+
        if (bo->destroy)
                bo->destroy(bo);
        else {
@@ -158,18 +161,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
        ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
 }
 
-static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
-                                 bool interruptible)
-{
-       if (interruptible) {
-               return wait_event_interruptible(bo->event_queue,
-                                              !ttm_bo_is_reserved(bo));
-       } else {
-               wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
-               return 0;
-       }
-}
-
 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -218,65 +209,27 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
                          bool no_wait, bool use_ticket,
                          struct ww_acquire_ctx *ticket)
 {
-       int ret;
+       int ret = 0;
 
-       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
-               /**
-                * Deadlock avoidance for multi-bo reserving.
-                */
-               if (use_ticket && bo->seq_valid) {
-                       /**
-                        * We've already reserved this one.
-                        */
-                       if (unlikely(ticket->stamp == bo->val_seq))
-                               return -EDEADLK;
-                       /**
-                        * Already reserved by a thread that will not back
-                        * off for us. We need to back off.
-                        */
-                       if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
-                               return -EAGAIN;
-               }
+       if (no_wait) {
+               bool success;
 
-               if (no_wait)
+               /* not valid any more, fix your locking! */
+               if (WARN_ON(ticket))
                        return -EBUSY;
 
-               ret = ttm_bo_wait_unreserved(bo, interruptible);
-
-               if (unlikely(ret))
-                       return ret;
+               success = ww_mutex_trylock(&bo->resv->lock);
+               return success ? 0 : -EBUSY;
        }
 
-       if (use_ticket) {
-               bool wake_up = false;
-
-               /**
-                * Wake up waiters that may need to recheck for deadlock,
-                * if we decreased the sequence number.
-                */
-               if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
-                            || !bo->seq_valid))
-                       wake_up = true;
-
-               /*
-                * In the worst case with memory ordering these values can be
-                * seen in the wrong order. However since we call wake_up_all
-                * in that case, this will hopefully not pose a problem,
-                * and the worst case would only cause someone to accidentally
-                * hit -EAGAIN in ttm_bo_reserve when they see old value of
-                * val_seq. However this would only happen if seq_valid was
-                * written before val_seq was, and just means some slightly
-                * increased cpu usage
-                */
-               bo->val_seq = ticket->stamp;
-               bo->seq_valid = true;
-               if (wake_up)
-                       wake_up_all(&bo->event_queue);
-       } else {
-               bo->seq_valid = false;
-       }
-
-       return 0;
+       if (interruptible)
+               ret = ww_mutex_lock_interruptible(&bo->resv->lock,
+                                                 ticket);
+       else
+               ret = ww_mutex_lock(&bo->resv->lock, ticket);
+       if (ret == -EINTR)
+               return -ERESTARTSYS;
+       return ret;
 }
 EXPORT_SYMBOL(ttm_bo_reserve);
 
@@ -313,72 +266,35 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
        return ret;
 }
 
-int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
-                                 bool interruptible,
-                                 struct ww_acquire_ctx *ticket)
-{
-       bool wake_up = false;
-       int ret;
-
-       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
-               WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
-
-               ret = ttm_bo_wait_unreserved(bo, interruptible);
-
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
-               wake_up = true;
-
-       /**
-        * Wake up waiters that may need to recheck for deadlock,
-        * if we decreased the sequence number.
-        */
-       bo->val_seq = ticket->stamp;
-       bo->seq_valid = true;
-       if (wake_up)
-               wake_up_all(&bo->event_queue);
-
-       return 0;
-}
-
 int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
                            bool interruptible, struct ww_acquire_ctx *ticket)
 {
        struct ttm_bo_global *glob = bo->glob;
-       int put_count, ret;
+       int put_count = 0;
+       int ret = 0;
 
-       ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
-       if (likely(!ret)) {
+       if (interruptible)
+               ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+                                                      ticket);
+       else
+               ww_mutex_lock_slow(&bo->resv->lock, ticket);
+
+       if (likely(ret == 0)) {
                lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
                put_count = ttm_bo_del_from_lru(bo);
                lockmgr(&glob->lru_lock, LK_RELEASE);
                ttm_bo_list_ref_sub(bo, put_count, true);
-       }
+       } else if (ret == -EINTR)
+               ret = -ERESTARTSYS;
+
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
 
-/*
- * Must interlock with event_queue to avoid race against
- * wait_event_common() which can cause wait_event_common()
- * to become stuck.
- */
-static void
-ttm_bo_unreserve_core(struct ttm_buffer_object *bo)
-{
-       lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE);
-       atomic_set(&bo->reserved, 0);
-       lockmgr(&bo->event_queue.lock, LK_RELEASE);
-       wake_up_all(&bo->event_queue);
-}
-
 void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
 {
        ttm_bo_add_to_lru(bo);
-       ttm_bo_unreserve_core(bo);
+       ww_mutex_unlock(&bo->resv->lock);
 }
 
 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
@@ -570,16 +486,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
                bo->ttm = NULL;
        }
        ttm_bo_mem_put(bo, &bo->mem);
-       ttm_bo_unreserve_core(bo);
 
-       /*
-        * Since the final reference to this bo may not be dropped by
-        * the current task we have to put a memory barrier here to make
-        * sure the changes done in this function are always visible.
-        *
-        * This function only needs protection against the final kref_put.
-        */
-       cpu_mfence();
+       ww_mutex_unlock (&bo->resv->lock);
 }
 
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -623,7 +531,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                        ttm_bo_add_to_lru(bo);
                }
 
-               ttm_bo_unreserve_core(bo);
+               if (!ret)
+                       ww_mutex_unlock(&bo->resv->lock);
        }
 
        kref_get(&bo->list_kref);
@@ -674,7 +583,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
                lockmgr(&bdev->fence_lock, LK_RELEASE);
 
-               ttm_bo_unreserve_core(bo);
+               ww_mutex_unlock(&bo->resv->lock);
                lockmgr(&glob->lru_lock, LK_RELEASE);
 
                ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -712,7 +621,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
                lockmgr(&bdev->fence_lock, LK_RELEASE);
 
        if (ret || unlikely(list_empty(&bo->ddestroy))) {
-               ttm_bo_unreserve_core(bo);
+               ww_mutex_unlock(&bo->resv->lock);
                lockmgr(&glob->lru_lock, LK_RELEASE);
                return ret;
        }
@@ -1293,6 +1202,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        int ret = 0;
        unsigned long num_pages;
        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+       bool locked;
 
        ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
        if (ret) {
@@ -1319,8 +1229,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        kref_init(&bo->kref);
        kref_init(&bo->list_kref);
        atomic_set(&bo->cpu_writers, 0);
-       atomic_set(&bo->reserved, 1);
-       init_waitqueue_head(&bo->event_queue);
        INIT_LIST_HEAD(&bo->lru);
        INIT_LIST_HEAD(&bo->ddestroy);
        INIT_LIST_HEAD(&bo->swap);
@@ -1338,10 +1246,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.bus.io_reserved_count = 0;
        bo->priv_flags = 0;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
-       bo->seq_valid = false;
        bo->persistent_swap_storage = persistent_swap_storage;
        bo->acc_size = acc_size;
        bo->sg = sg;
+       bo->resv = &bo->ttm_resv;
+       reservation_object_init(bo->resv);
        atomic_inc(&bo->glob->bo_count);
 
        /*
@@ -1360,16 +1269,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
-       if (ret)
-               goto out_err;
+       locked = ww_mutex_trylock(&bo->resv->lock);
+       WARN_ON(!locked);
 
-       ttm_bo_unreserve(bo);
-       return 0;
+       if (likely(!ret))
+               ret = ttm_bo_validate(bo, placement, interruptible, false);
 
 out_err:
        ttm_bo_unreserve(bo);
-       ttm_bo_unref(&bo);
+
+       if (unlikely(ret))
+               ttm_bo_unref(&bo);
 
        return ret;
 }
@@ -1957,7 +1867,7 @@ out:
         * already swapped buffer.
         */
 
-       ttm_bo_unreserve_core(bo);
+       ww_mutex_unlock(&bo->resv->lock);
        kref_put(&bo->list_kref, ttm_bo_release_list);
        return ret;
 }
index 5bc7d19..12db784 100644 (file)
@@ -453,6 +453,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        struct ttm_buffer_object *fbo;
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_driver *driver = bdev->driver;
+       int ret;
 
        fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK);
        if (!fbo)
@@ -465,7 +466,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
         * TODO: Explicit member copy would probably be better here.
         */
 
-       init_waitqueue_head(&fbo->event_queue);
        INIT_LIST_HEAD(&fbo->ddestroy);
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
@@ -483,6 +483,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
        fbo->acc_size = 0;
+       fbo->resv = &fbo->ttm_resv;
+       reservation_object_init(fbo->resv);
+       ret = ww_mutex_trylock(&fbo->resv->lock);
+       WARN_ON(!ret);
 
         /*
         * Mirror ref from kref_init() for list_kref.
index f0dafa0..b5cb11f 100644 (file)
@@ -48,8 +48,7 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list,
                        entry->removed = false;
 
                } else {
-                       atomic_set(&bo->reserved, 0);
-                       wake_up_all(&bo->event_queue);
+                       ww_mutex_unlock(&bo->resv->lock);
                }
        }
 }
@@ -134,7 +133,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
        glob = entry->bo->glob;
 
        ww_acquire_init(ticket, &reservation_ww_class);
-       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
 
 retry:
        list_for_each_entry(entry, list, head) {
@@ -144,42 +142,34 @@ retry:
                if (entry->reserved)
                        continue;
 
-               ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
-               switch (ret) {
-               case 0:
-                       break;
-               case -EBUSY:
-                       ttm_eu_del_from_lru_locked(list);
-                       lockmgr(&glob->lru_lock, LK_RELEASE);
-                       ret = ttm_bo_reserve_nolru(bo, true, false,
-                                                  true, ticket);
-                       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
-
-                       if (!ret)
-                               break;
+               ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
 
-                       if (unlikely(ret != -EAGAIN))
-                               goto err;
 
-                       /* fallthrough */
-               case -EAGAIN:
+               if (ret == -EDEADLK) {
+                       /* uh oh, we lost out, drop every reservation and try
+                        * to only reserve this buffer, then start over if
+                        * this succeeds.
+                        */
+                       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
                        ttm_eu_backoff_reservation_locked(list, ticket);
                        lockmgr(&glob->lru_lock, LK_RELEASE);
                        ttm_eu_list_ref_sub(list);
-                       ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
-                       if (unlikely(ret != 0))
+                       ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+                                                              ticket);
+                       if (unlikely(ret != 0)) {
+                               if (ret == -EINTR)
+                                       ret = -ERESTARTSYS;
                                goto err_fini;
+                       }
 
-                       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
                        entry->reserved = true;
                        if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
                                ret = -EBUSY;
                                goto err;
                        }
                        goto retry;
-               default:
+               } else if (ret)
                        goto err;
-               }
 
                entry->reserved = true;
                if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
@@ -189,12 +179,14 @@ retry:
        }
 
        ww_acquire_done(ticket);
+       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
        ttm_eu_del_from_lru_locked(list);
        lockmgr(&glob->lru_lock, LK_RELEASE);
        ttm_eu_list_ref_sub(list);
        return 0;
 
 err:
+       lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
        ttm_eu_backoff_reservation_locked(list, ticket);
        lockmgr(&glob->lru_lock, LK_RELEASE);
        ttm_eu_list_ref_sub(list);