From 5fd16c59eed4b0560ae53e41715827f3a1309036 Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Thu, 15 Jan 2015 12:59:51 -0800 Subject: [PATCH] drm - Fix a second X lockup w/radeon ttm * Code which releases bo->reserved and wakes up waiters was not interlocked against wait_event_common() used to wait for the release. This can result in a race where the release occurs inbetween the wait's test and its sleep, preventing the wait from ever waking up. * This is different from the recent dev_pager_mtx deadlock which was recently fixed, but in the same code path. * Roll the release of bo->reserved into a static function and throw the bo->event_queue.lock around the actual release to fix the race. --- sys/dev/drm/ttm/ttm_bo.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/sys/dev/drm/ttm/ttm_bo.c b/sys/dev/drm/ttm/ttm_bo.c index dc44963954..66adfc078b 100644 --- a/sys/dev/drm/ttm/ttm_bo.c +++ b/sys/dev/drm/ttm/ttm_bo.c @@ -332,13 +332,26 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_reserve_slowpath); -void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +/* + * Must interlock with event_queue to avoid race against + * wait_event_common() which can cause wait_event_common() + * to become stuck. + */ +static void +ttm_bo_unreserve_core(struct ttm_buffer_object *bo) { - ttm_bo_add_to_lru(bo); + lockmgr(&bo->event_queue.lock, LK_EXCLUSIVE); atomic_set(&bo->reserved, 0); + lockmgr(&bo->event_queue.lock, LK_RELEASE); wake_up_all(&bo->event_queue); } +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +{ + ttm_bo_add_to_lru(bo); + ttm_bo_unreserve_core(bo); +} + void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; @@ -518,9 +531,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) bo->ttm = NULL; } ttm_bo_mem_put(bo, &bo->mem); - - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_core(bo); /* * Since the final reference to this bo may not be dropped by @@ -562,8 +573,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) lockmgr(&bdev->fence_lock, LK_RELEASE); if (!ret) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_core(bo); } kref_get(&bo->list_kref); @@ -614,8 +624,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); lockmgr(&bdev->fence_lock, LK_RELEASE); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_core(bo); lockmgr(&glob->lru_lock, LK_RELEASE); ret = driver->sync_obj_wait(sync_obj, false, interruptible); @@ -653,8 +662,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, lockmgr(&bdev->fence_lock, LK_RELEASE); if (ret || unlikely(list_empty(&bo->ddestroy))) { - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_core(bo); lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -1917,8 +1925,7 @@ out: * already swapped buffer. */ - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_core(bo); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } -- 2.41.0