From 1716017eecf02e0587b5f5ccf355c868f43a54ac Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Tigeot?= Date: Mon, 5 Aug 2013 21:12:10 +0200 Subject: [PATCH] ttm porting: Translate locking primitives * rw locks to lockmgr locks * sx locks to lockmgr locks * Some mtx locks to lockmgr locks * Some mtx_locks to spinlocks Some more locks could probably be converted to spinlocks but this can be dangerous. Better use lockmgr locks first and be sure the code runs without any issue before trying to optimize it. --- sys/dev/drm2/ttm/ttm_bo.c | 140 ++++++++++++++-------------- sys/dev/drm2/ttm/ttm_bo_driver.h | 10 +- sys/dev/drm2/ttm/ttm_bo_manager.c | 26 +++--- sys/dev/drm2/ttm/ttm_bo_util.c | 13 +-- sys/dev/drm2/ttm/ttm_bo_vm.c | 10 +- sys/dev/drm2/ttm/ttm_execbuf_util.c | 24 ++--- sys/dev/drm2/ttm/ttm_lock.c | 38 ++++---- sys/dev/drm2/ttm/ttm_lock.h | 2 +- sys/dev/drm2/ttm/ttm_memory.c | 23 +++-- sys/dev/drm2/ttm/ttm_memory.h | 12 +-- sys/dev/drm2/ttm/ttm_object.c | 60 ++++++------ sys/dev/drm2/ttm/ttm_page_alloc.c | 22 ++--- 12 files changed, 190 insertions(+), 190 deletions(-) diff --git a/sys/dev/drm2/ttm/ttm_bo.c b/sys/dev/drm2/ttm/ttm_bo.c index 28d4aa43e9..f4256091b4 100644 --- a/sys/dev/drm2/ttm/ttm_bo.c +++ b/sys/dev/drm2/ttm/ttm_bo.c @@ -267,12 +267,12 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, int put_count = 0; int ret; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_bo_list_ref_sub(bo, put_count, true); @@ -290,9 +290,9 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ttm_bo_unreserve_locked(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); } /* @@ -486,16 +486,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int put_count; int ret; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ret = ttm_bo_reserve_locked(bo, false, true, false, 0); - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); (void) ttm_bo_wait(bo, false, false, true); if (!ret && !bo->sync_obj) { - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); put_count = ttm_bo_del_from_lru(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_bo_cleanup_memtype_use(bo); ttm_bo_list_ref_sub(bo, put_count, true); @@ -504,7 +504,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) } if (bo->sync_obj) sync_obj = driver->sync_obj_ref(bo->sync_obj); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (!ret) { atomic_set(&bo->reserved, 0); @@ -513,7 +513,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) refcount_acquire(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); if (sync_obj) { driver->sync_obj_flush(sync_obj); @@ -545,7 +545,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, int put_count; int ret; - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, false, true); if (ret && !no_wait_gpu) { @@ -557,11 +557,11 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * no new sync objects can be attached. */ sync_obj = driver->sync_obj_ref(bo->sync_obj); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); atomic_set(&bo->reserved, 0); wakeup(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ret = driver->sync_obj_wait(sync_obj, false, interruptible); driver->sync_obj_unref(&sync_obj); @@ -572,13 +572,13 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * remove sync_obj with ttm_bo_wait, the wait should be * finished, and no new wait object should have been added. */ - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, false, true); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (ret) return ret; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ret = ttm_bo_reserve_locked(bo, false, true, false, 0); /* @@ -590,16 +590,16 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, * here. */ if (ret) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return 0; } } else - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (ret || unlikely(list_empty(&bo->ddestroy))) { atomic_set(&bo->reserved, 0); wakeup(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -607,7 +607,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, list_del_init(&bo->ddestroy); ++put_count; - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_bo_cleanup_memtype_use(bo); ttm_bo_list_ref_sub(bo, put_count, true); @@ -626,7 +626,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) struct ttm_buffer_object *entry = NULL; int ret = 0; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); if (list_empty(&bdev->ddestroy)) goto out_unlock; @@ -648,7 +648,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) ret = ttm_bo_cleanup_refs_and_unlock(entry, false, !remove_all); else - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); if (refcount_release(&entry->list_kref)) ttm_bo_release_list(entry); @@ -657,13 +657,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) if (ret || !entry) goto out; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); if (list_empty(&entry->ddestroy)) break; } out_unlock: - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); out: if (entry && refcount_release(&entry->list_kref)) ttm_bo_release_list(entry); @@ -685,14 +685,14 @@ static void ttm_bo_release(struct ttm_buffer_object *bo) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - rw_wlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); if (likely(bo->vm_node != NULL)) { RB_REMOVE(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo); drm_mm_put_block(bo->vm_node); bo->vm_node = NULL; } - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); @@ -736,9 +736,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (unlikely(ret != 0)) { if (ret != -ERESTART) { @@ -793,7 +793,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo; int ret = -EBUSY, put_count; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); list_for_each_entry(bo, &man->lru, lru) { ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (!ret) @@ -801,7 +801,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, } if (ret) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -816,7 +816,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, } put_count = ttm_bo_del_from_lru(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); KKASSERT(ret == 0); @@ -1045,9 +1045,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (ret) return ret; mem.num_pages = bo->num_pages; @@ -1296,9 +1296,9 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, * Can't use standard list traversal since we're unlocking. */ - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); while (!list_empty(&man->lru)) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ret = ttm_mem_evict_first(bdev, mem_type, false, false); if (ret) { if (allow_errors) { @@ -1307,9 +1307,9 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, kprintf("[TTM] Cleanup eviction failed\n"); } } - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); } - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return 0; } @@ -1371,7 +1371,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, KKASSERT(!man->has_type); man->io_reserve_fastpath = true; man->use_io_reserve_lru = false; - sx_init(&man->io_reserve_mutex, "ttmman"); + lockinit(&man->io_reserve_mutex, "ttmman", 0, LK_CANRECURSE); INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); @@ -1416,8 +1416,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref) struct ttm_bo_global *glob = ref->object; int ret; - sx_init(&glob->device_list_mutex, "ttmdlm"); - mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF); + lockinit(&glob->device_list_mutex, "ttmdlm", 0, LK_CANRECURSE); + lockinit(&glob->lru_lock, "ttmlru", 0, LK_CANRECURSE); glob->mem_glob = bo_ref->mem_glob; glob->dummy_read_page = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ, @@ -1470,9 +1470,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } } - sx_xlock(&glob->device_list_mutex); + lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); list_del(&bdev->device_list); - sx_xunlock(&glob->device_list_mutex); + lockmgr(&glob->device_list_mutex, LK_RELEASE); if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL)) taskqueue_drain_timeout(taskqueue_thread, &bdev->wq); @@ -1480,18 +1480,18 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) while (ttm_bo_delayed_delete(bdev, true)) ; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); KKASSERT(drm_mm_clean(&bdev->addr_space_mm)); - rw_wlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); drm_mm_takedown(&bdev->addr_space_mm); - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); return ret; } @@ -1504,7 +1504,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, { int ret = -EINVAL; - rw_init(&bdev->vm_lock, "ttmvml"); + lockinit(&bdev->vm_lock, "ttmvml", 0, LK_CANRECURSE); bdev->driver = driver; memset(bdev->man, 0, sizeof(bdev->man)); @@ -1529,10 +1529,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->glob = glob; bdev->need_dma32 = need_dma32; bdev->val_seq = 0; - mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF); - sx_xlock(&glob->device_list_mutex); + lockinit(&bdev->fence_lock, "ttmfence", 0, LK_CANRECURSE); + lockmgr(&glob->device_list_mutex, LK_EXCLUSIVE); list_add_tail(&bdev->device_list, &glob->device_list); - sx_xunlock(&glob->device_list_mutex); + lockmgr(&glob->device_list_mutex, LK_RELEASE); return 0; out_no_addr_mm: @@ -1613,7 +1613,7 @@ retry_pre_get: if (unlikely(ret != 0)) return ret; - rw_wlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, bo->mem.num_pages, 0, 0); @@ -1626,17 +1626,17 @@ retry_pre_get: bo->mem.num_pages, 0); if (unlikely(bo->vm_node == NULL)) { - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); goto retry_pre_get; } ttm_bo_vm_insert_rb(bo); - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; return 0; out_unlock: - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); return ret; } @@ -1657,9 +1657,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); driver->sync_obj_unref(&tmp_obj); - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); continue; } @@ -1667,28 +1667,28 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return -EBUSY; sync_obj = driver->sync_obj_ref(bo->sync_obj); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); ret = driver->sync_obj_wait(sync_obj, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); return ret; } - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); if (likely(bo->sync_obj == sync_obj)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); } else { - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); driver->sync_obj_unref(&sync_obj); - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); } } return 0; @@ -1706,9 +1706,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, true, no_wait); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1734,7 +1734,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); list_for_each_entry(bo, &glob->swap_lru, swap) { ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (!ret) @@ -1742,7 +1742,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) } if (ret) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } @@ -1756,7 +1756,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) } put_count = ttm_bo_del_from_lru(bo); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_bo_list_ref_sub(bo, put_count, true); @@ -1764,9 +1764,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Wait for GPU, then move to system cached. */ - mtx_lock(&bo->bdev->fence_lock); + lockmgr(&bo->bdev->fence_lock, LK_EXCLUSIVE); ret = ttm_bo_wait(bo, false, false, false); - mtx_unlock(&bo->bdev->fence_lock); + lockmgr(&bo->bdev->fence_lock, LK_RELEASE); if (unlikely(ret != 0)) goto out; diff --git a/sys/dev/drm2/ttm/ttm_bo_driver.h b/sys/dev/drm2/ttm/ttm_bo_driver.h index 16bf47e42f..2660120e1f 100644 --- a/sys/dev/drm2/ttm/ttm_bo_driver.h +++ b/sys/dev/drm2/ttm/ttm_bo_driver.h @@ -277,7 +277,7 @@ struct ttm_mem_type_manager { uint32_t default_caching; const struct ttm_mem_type_manager_func *func; void *priv; - struct sx io_reserve_mutex; + struct lock io_reserve_mutex; bool use_io_reserve_lru; bool io_reserve_fastpath; @@ -485,8 +485,8 @@ struct ttm_bo_global { struct ttm_mem_global *mem_glob; struct vm_page *dummy_read_page; struct ttm_mem_shrink shrink; - struct sx device_list_mutex; - struct mtx lru_lock; + struct lock device_list_mutex; + struct lock lru_lock; /** * Protected by device_list_mutex. @@ -535,9 +535,9 @@ struct ttm_bo_device { struct list_head device_list; struct ttm_bo_global *glob; struct ttm_bo_driver *driver; - struct rwlock vm_lock; + struct lock vm_lock; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; - struct mtx fence_lock; + struct lock fence_lock; /* * Protected by the vm lock. */ diff --git a/sys/dev/drm2/ttm/ttm_bo_manager.c b/sys/dev/drm2/ttm/ttm_bo_manager.c index ece1bbbc6d..9aaa80b423 100644 --- a/sys/dev/drm2/ttm/ttm_bo_manager.c +++ b/sys/dev/drm2/ttm/ttm_bo_manager.c @@ -44,7 +44,7 @@ struct ttm_range_manager { struct drm_mm mm; - struct mtx lock; + struct lock lock; }; MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager"); @@ -68,19 +68,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (unlikely(ret)) return ret; - mtx_lock(&rman->lock); + lockmgr(&rman->lock, LK_EXCLUSIVE); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, 1); if (unlikely(node == NULL)) { - mtx_unlock(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); return 0; } node = drm_mm_get_block_atomic_range(node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); - mtx_unlock(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); } while (node == NULL); mem->mm_node = node; @@ -94,9 +94,9 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; if (mem->mm_node) { - mtx_lock(&rman->lock); + lockmgr(&rman->lock, LK_EXCLUSIVE); drm_mm_put_block(mem->mm_node); - mtx_unlock(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); mem->mm_node = NULL; } } @@ -114,7 +114,7 @@ static int ttm_bo_man_init(struct ttm_mem_type_manager *man, return ret; } - mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF); + lockinit(&rman->lock, "ttmrman", 0, LK_CANRECURSE); man->priv = rman; return 0; } @@ -124,16 +124,16 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; - mtx_lock(&rman->lock); + lockmgr(&rman->lock, LK_EXCLUSIVE); if (drm_mm_clean(mm)) { drm_mm_takedown(mm); - mtx_unlock(&rman->lock); - mtx_destroy(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); + lockuninit(&rman->lock); drm_free(rman, M_TTM_RMAN); man->priv = NULL; return 0; } - mtx_unlock(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); return -EBUSY; } @@ -142,9 +142,9 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; - mtx_lock(&rman->lock); + lockmgr(&rman->lock, LK_EXCLUSIVE); drm_mm_debug_table(&rman->mm, prefix); - mtx_unlock(&rman->lock); + lockmgr(&rman->lock, LK_RELEASE); } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { diff --git a/sys/dev/drm2/ttm/ttm_bo_util.c b/sys/dev/drm2/ttm/ttm_bo_util.c index cf1b76196d..d429a57e1b 100644 --- a/sys/dev/drm2/ttm/ttm_bo_util.c +++ b/sys/dev/drm2/ttm/ttm_bo_util.c @@ -78,13 +78,14 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) return 0; if (interruptible) { - if (sx_xlock_sig(&man->io_reserve_mutex)) + if (lockmgr(&man->io_reserve_mutex, + LK_EXCLUSIVE | LK_SLEEPFAIL)) return (-EINTR); else return (0); } - sx_xlock(&man->io_reserve_mutex); + lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE); return 0; } @@ -93,7 +94,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) if (likely(man->io_reserve_fastpath)) return; - sx_xunlock(&man->io_reserve_mutex); + lockmgr(&man->io_reserve_mutex, LK_RELEASE); } static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) @@ -594,7 +595,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *tmp_obj = NULL; void *sync_obj_ref; - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; @@ -602,7 +603,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj = driver->sync_obj_ref(sync_obj); if (evict) { ret = ttm_bo_wait(bo, false, false, false); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) @@ -627,7 +628,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); sync_obj_ref = bo->bdev->driver->sync_obj_ref(bo->sync_obj); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); /* ttm_buffer_object_transfer accesses bo->sync_obj */ ret = ttm_buffer_object_transfer(bo, sync_obj_ref, &ghost_obj); if (tmp_obj) diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c index c73f22fb69..8828c43679 100644 --- a/sys/dev/drm2/ttm/ttm_bo_vm.c +++ b/sys/dev/drm2/ttm/ttm_bo_vm.c @@ -150,16 +150,16 @@ reserve: * move. */ - mtx_lock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); if (unlikely(ret != 0)) { retval = VM_PAGER_ERROR; goto out_unlock; } } else - mtx_unlock(&bdev->fence_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); ret = ttm_mem_io_lock(man, true); if (unlikely(ret != 0)) { @@ -288,11 +288,11 @@ ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t s struct vm_object *vm_obj; int ret; - rw_wlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_EXCLUSIVE); bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size)); if (likely(bo != NULL)) refcount_acquire(&bo->kref); - rw_wunlock(&bdev->vm_lock); + lockmgr(&bdev->vm_lock, LK_RELEASE); if (unlikely(bo == NULL)) { kprintf("[TTM] Could not find buffer object to map\n"); diff --git a/sys/dev/drm2/ttm/ttm_execbuf_util.c b/sys/dev/drm2/ttm/ttm_execbuf_util.c index 4c500bc80a..60057e4c1a 100644 --- a/sys/dev/drm2/ttm/ttm_execbuf_util.c +++ b/sys/dev/drm2/ttm/ttm_execbuf_util.c @@ -104,9 +104,9 @@ void ttm_eu_backoff_reservation(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); ttm_eu_backoff_reservation_locked(list); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); } /* @@ -140,7 +140,7 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; - mtx_lock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); retry_locked: val_seq = entry->bo->bdev->val_seq++; @@ -155,7 +155,7 @@ retry_this_bo: case -EBUSY: ret = ttm_eu_wait_unreserved_locked(list, bo); if (unlikely(ret != 0)) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); return ret; } @@ -165,13 +165,13 @@ retry_this_bo: ttm_eu_list_ref_sub(list); ret = ttm_bo_wait_unreserved_locked(bo, true); if (unlikely(ret != 0)) { - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); return ret; } goto retry_locked; default: ttm_eu_backoff_reservation_locked(list); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); return ret; } @@ -179,14 +179,14 @@ retry_this_bo: entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { ttm_eu_backoff_reservation_locked(list); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); return -EBUSY; } } ttm_eu_del_from_lru_locked(list); - mtx_unlock(&glob->lru_lock); + lockmgr(&glob->lru_lock, LK_RELEASE); ttm_eu_list_ref_sub(list); return 0; @@ -208,8 +208,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) driver = bdev->driver; glob = bo->glob; - mtx_lock(&glob->lru_lock); - mtx_lock(&bdev->fence_lock); + lockmgr(&glob->lru_lock, LK_EXCLUSIVE); + lockmgr(&bdev->fence_lock, LK_EXCLUSIVE); list_for_each_entry(entry, list, head) { bo = entry->bo; @@ -218,8 +218,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) ttm_bo_unreserve_locked(bo); entry->reserved = false; } - mtx_unlock(&bdev->fence_lock); - mtx_unlock(&glob->lru_lock); + lockmgr(&bdev->fence_lock, LK_RELEASE); + lockmgr(&glob->lru_lock, LK_RELEASE); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) diff --git a/sys/dev/drm2/ttm/ttm_lock.c b/sys/dev/drm2/ttm/ttm_lock.c index 79440e1894..9c20de6e92 100644 --- a/sys/dev/drm2/ttm/ttm_lock.c +++ b/sys/dev/drm2/ttm/ttm_lock.c @@ -48,7 +48,7 @@ void ttm_lock_init(struct ttm_lock *lock) { - mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF); + lockinit(&lock->lock, "ttmlk", 0, LK_CANRECURSE); lock->rw = 0; lock->flags = 0; lock->kill_takers = false; @@ -68,10 +68,10 @@ ttm_lock_send_sig(int signo) void ttm_read_unlock(struct ttm_lock *lock) { - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); if (--lock->rw == 0) wakeup(lock); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); } static bool __ttm_read_lock(struct ttm_lock *lock) @@ -103,7 +103,7 @@ ttm_read_lock(struct ttm_lock *lock, bool interruptible) flags = 0; wmsg = "ttmr"; } - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); while (!__ttm_read_lock(lock)) { ret = msleep(lock, &lock->lock, flags, wmsg, 0); if (ret != 0) @@ -147,24 +147,24 @@ int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) flags = 0; wmsg = "ttmrt"; } - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); while (!__ttm_read_trylock(lock, &locked)) { ret = msleep(lock, &lock->lock, flags, wmsg, 0); if (ret != 0) break; } KKASSERT(!locked || ret == 0); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); return (locked) ? 0 : -EBUSY; } void ttm_write_unlock(struct ttm_lock *lock) { - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); lock->rw = 0; wakeup(lock); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); } static bool __ttm_write_lock(struct ttm_lock *lock) @@ -199,7 +199,7 @@ ttm_write_lock(struct ttm_lock *lock, bool interruptible) flags = 0; wmsg = "ttmw"; } - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); /* XXXKIB: linux uses __ttm_read_lock for uninterruptible sleeps */ while (!__ttm_write_lock(lock)) { ret = msleep(lock, &lock->lock, flags, wmsg, 0); @@ -209,29 +209,29 @@ ttm_write_lock(struct ttm_lock *lock, bool interruptible) break; } } - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); return (-ret); } void ttm_write_lock_downgrade(struct ttm_lock *lock) { - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); lock->rw = 1; wakeup(lock); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); } static int __ttm_vt_unlock(struct ttm_lock *lock) { int ret = 0; - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); if (unlikely(!(lock->flags & TTM_VT_LOCK))) ret = -EINVAL; lock->flags &= ~TTM_VT_LOCK; wakeup(lock); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); return ret; } @@ -276,7 +276,7 @@ int ttm_vt_lock(struct ttm_lock *lock, flags = 0; wmsg = "ttmw"; } - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); while (!__ttm_vt_lock(lock)) { ret = msleep(lock, &lock->lock, flags, wmsg, 0); if (interruptible && ret != 0) { @@ -310,10 +310,10 @@ int ttm_vt_unlock(struct ttm_lock *lock) void ttm_suspend_unlock(struct ttm_lock *lock) { - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); lock->flags &= ~TTM_SUSPEND_LOCK; wakeup(lock); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); } static bool __ttm_suspend_lock(struct ttm_lock *lock) @@ -332,8 +332,8 @@ static bool __ttm_suspend_lock(struct ttm_lock *lock) void ttm_suspend_lock(struct ttm_lock *lock) { - mtx_lock(&lock->lock); + lockmgr(&lock->lock, LK_EXCLUSIVE); while (!__ttm_suspend_lock(lock)) msleep(lock, &lock->lock, 0, "ttms", 0); - mtx_unlock(&lock->lock); + lockmgr(&lock->lock, LK_RELEASE); } diff --git a/sys/dev/drm2/ttm/ttm_lock.h b/sys/dev/drm2/ttm/ttm_lock.h index 759a15acb2..40013bee42 100644 --- a/sys/dev/drm2/ttm/ttm_lock.h +++ b/sys/dev/drm2/ttm/ttm_lock.h @@ -69,7 +69,7 @@ struct ttm_lock { struct ttm_base_object base; - struct mtx lock; + struct lock lock; int32_t rw; uint32_t flags; bool kill_takers; diff --git a/sys/dev/drm2/ttm/ttm_memory.c b/sys/dev/drm2/ttm/ttm_memory.c index 493cf219c6..108ca56c77 100644 --- a/sys/dev/drm2/ttm/ttm_memory.c +++ b/sys/dev/drm2/ttm/ttm_memory.c @@ -163,20 +163,20 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, int ret; struct ttm_mem_shrink *shrink; - mtx_lock(&glob->lock); + spin_lock(&glob->spin); if (glob->shrink == NULL) goto out; while (ttm_zones_above_swap_target(glob, from_wq, extra)) { shrink = glob->shrink; - mtx_unlock(&glob->lock); + spin_lock(&glob->spin); ret = shrink->do_shrink(shrink); - mtx_lock(&glob->lock); + spin_unlock(&glob->spin); if (unlikely(ret != 0)) goto out; } out: - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); } @@ -251,7 +251,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) int i; struct ttm_mem_zone *zone; - mtx_init(&glob->lock, "ttmgz", NULL, MTX_DEF); + spin_init(&glob->spin); glob->swap_queue = taskqueue_create("ttm_swap", M_WAITOK, taskqueue_thread_enqueue, &glob->swap_queue); taskqueue_start_threads(&glob->swap_queue, 1, PVM, "ttm swap"); @@ -307,7 +307,7 @@ static void ttm_check_swapping(struct ttm_mem_global *glob) unsigned int i; struct ttm_mem_zone *zone; - mtx_lock(&glob->lock); + spin_lock(&glob->spin); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (zone->used_mem > zone->swap_limit) { @@ -315,8 +315,7 @@ static void ttm_check_swapping(struct ttm_mem_global *glob) break; } } - - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); if (unlikely(needs_swapping)) taskqueue_enqueue(glob->swap_queue, &glob->work); @@ -330,14 +329,14 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, unsigned int i; struct ttm_mem_zone *zone; - mtx_lock(&glob->lock); + spin_lock(&glob->spin); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (single_zone && zone != single_zone) continue; zone->used_mem -= amount; } - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); } void ttm_mem_global_free(struct ttm_mem_global *glob, @@ -355,7 +354,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob, unsigned int i; struct ttm_mem_zone *zone; - mtx_lock(&glob->lock); + spin_lock(&glob->spin); for (i = 0; i < glob->num_zones; ++i) { zone = glob->zones[i]; if (single_zone && zone != single_zone) @@ -379,7 +378,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob, ret = 0; out_unlock: - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); ttm_check_swapping(glob); return ret; diff --git a/sys/dev/drm2/ttm/ttm_memory.h b/sys/dev/drm2/ttm/ttm_memory.h index 8bb491d9be..54e53d935d 100644 --- a/sys/dev/drm2/ttm/ttm_memory.h +++ b/sys/dev/drm2/ttm/ttm_memory.h @@ -72,7 +72,7 @@ struct ttm_mem_global { struct ttm_mem_shrink *shrink; struct taskqueue *swap_queue; struct task work; - struct mtx lock; + struct spinlock spin; struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; unsigned int num_zones; struct ttm_mem_zone *zone_kernel; @@ -105,13 +105,13 @@ static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink, static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, struct ttm_mem_shrink *shrink) { - mtx_lock(&glob->lock); + spin_lock(&glob->spin); if (glob->shrink != NULL) { - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); return -EBUSY; } glob->shrink = shrink; - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); return 0; } @@ -126,10 +126,10 @@ static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob, static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob, struct ttm_mem_shrink *shrink) { - mtx_lock(&glob->lock); + spin_lock(&glob->spin); KKASSERT(glob->shrink == shrink); glob->shrink = NULL; - mtx_unlock(&glob->lock); + spin_unlock(&glob->spin); } struct vm_page; diff --git a/sys/dev/drm2/ttm/ttm_object.c b/sys/dev/drm2/ttm/ttm_object.c index f5bd4ff24d..3bc5336a57 100644 --- a/sys/dev/drm2/ttm/ttm_object.c +++ b/sys/dev/drm2/ttm/ttm_object.c @@ -60,7 +60,7 @@ struct ttm_object_file { struct ttm_object_device *tdev; - struct rwlock lock; + struct lock lock; struct list_head ref_list; struct drm_open_hash ref_hash[TTM_REF_NUM]; u_int refcount; @@ -79,7 +79,7 @@ struct ttm_object_file { */ struct ttm_object_device { - struct rwlock object_lock; + struct lock object_lock; struct drm_open_hash object_hash; atomic_t object_count; struct ttm_mem_global *mem_glob; @@ -158,12 +158,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->ref_obj_release = ref_obj_release; base->object_type = object_type; refcount_init(&base->refcount, 1); - rw_init(&tdev->object_lock, "ttmbao"); - rw_wlock(&tdev->object_lock); + lockinit(&tdev->object_lock, "ttmbao", 0, LK_CANRECURSE); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); ret = drm_ht_just_insert_please(&tdev->object_hash, &base->hash, (unsigned long)base, 31, 0, 0); - rw_wunlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); if (unlikely(ret != 0)) goto out_err0; @@ -175,9 +175,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile, return 0; out_err1: - rw_wlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); - rw_wunlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); out_err0: return ret; } @@ -187,7 +187,7 @@ static void ttm_release_base(struct ttm_base_object *base) struct ttm_object_device *tdev = base->tfile->tdev; (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); - rw_wunlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); /* * Note: We don't use synchronize_rcu() here because it's far * too slow. It's up to the user to free the object using @@ -198,7 +198,7 @@ static void ttm_release_base(struct ttm_base_object *base) ttm_object_file_unref(&base->tfile); base->refcount_release(&base); } - rw_wlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); } void ttm_base_object_unref(struct ttm_base_object **p_base) @@ -213,10 +213,10 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) * users trying to look up the object. */ - rw_wlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); if (refcount_release(&base->refcount)) ttm_release_base(base); - rw_wunlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); } struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, @@ -227,14 +227,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct drm_hash_item *hash; int ret; - rw_rlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); ret = drm_ht_find_item(&tdev->object_hash, key, &hash); if (ret == 0) { base = drm_hash_entry(hash, struct ttm_base_object, hash); refcount_acquire(&base->refcount); } - rw_runlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); if (unlikely(ret != 0)) return NULL; @@ -265,17 +265,17 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, *existed = true; while (ret == -EINVAL) { - rw_rlock(&tfile->lock); + lockmgr(&tfile->lock, LK_EXCLUSIVE); ret = drm_ht_find_item(ht, base->hash.key, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); refcount_acquire(&ref->kref); - rw_runlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); break; } - rw_runlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -292,19 +292,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, ref->ref_type = ref_type; refcount_init(&ref->kref, 1); - rw_wlock(&tfile->lock); + lockmgr(&tfile->lock, LK_EXCLUSIVE); ret = drm_ht_insert_item(ht, &ref->hash); if (ret == 0) { list_add_tail(&ref->head, &tfile->ref_list); refcount_acquire(&base->refcount); - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); if (existed != NULL) *existed = false; break; } - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); KKASSERT(ret == -EINVAL); ttm_mem_global_free(mem_glob, sizeof(*ref)); @@ -324,7 +324,7 @@ static void ttm_ref_object_release(struct ttm_ref_object *ref) ht = &tfile->ref_hash[ref->ref_type]; (void)drm_ht_remove_item(ht, &ref->hash); list_del(&ref->head); - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) base->ref_obj_release(base, ref->ref_type); @@ -332,7 +332,7 @@ static void ttm_ref_object_release(struct ttm_ref_object *ref) ttm_base_object_unref(&ref->obj); ttm_mem_global_free(mem_glob, sizeof(*ref)); drm_free(ref, M_TTM_OBJ_REF); - rw_wlock(&tfile->lock); + lockmgr(&tfile->lock, LK_EXCLUSIVE); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, @@ -343,16 +343,16 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile, struct drm_hash_item *hash; int ret; - rw_wlock(&tfile->lock); + lockmgr(&tfile->lock, LK_EXCLUSIVE); ret = drm_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); return -EINVAL; } ref = drm_hash_entry(hash, struct ttm_ref_object, hash); if (refcount_release(&ref->kref)) ttm_ref_object_release(ref); - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); return 0; } @@ -364,7 +364,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; - rw_wlock(&tfile->lock); + lockmgr(&tfile->lock, LK_EXCLUSIVE); /* * Since we release the lock within the loop, we have to @@ -380,7 +380,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); - rw_wunlock(&tfile->lock); + lockmgr(&tfile->lock, LK_RELEASE); ttm_object_file_unref(&tfile); } @@ -393,7 +393,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, int ret; tfile = kmalloc(sizeof(*tfile), M_TTM_OBJ_FILE, M_WAITOK); - rw_init(&tfile->lock, "ttmfo"); + lockinit(&tfile->lock, "ttmfo", 0, LK_CANRECURSE); tfile->tdev = tdev; refcount_init(&tfile->refcount, 1); INIT_LIST_HEAD(&tfile->ref_list); @@ -427,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global tdev = kmalloc(sizeof(*tdev), M_TTM_OBJ_DEV, M_WAITOK); tdev->mem_glob = mem_glob; - rw_init(&tdev->object_lock, "ttmdo"); + lockinit(&tdev->object_lock, "ttmdo", 0, LK_CANRECURSE); atomic_set(&tdev->object_count, 0); ret = drm_ht_create(&tdev->object_hash, hash_order); @@ -444,9 +444,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) *p_tdev = NULL; - rw_wlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_EXCLUSIVE); drm_ht_remove(&tdev->object_hash); - rw_wunlock(&tdev->object_lock); + lockmgr(&tdev->object_lock, LK_RELEASE); drm_free(tdev, M_TTM_OBJ_DEV); } diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c index 1710e2b449..637b57132d 100644 --- a/sys/dev/drm2/ttm/ttm_page_alloc.c +++ b/sys/dev/drm2/ttm/ttm_page_alloc.c @@ -70,7 +70,7 @@ * @npages: Number of pages in pool. */ struct ttm_page_pool { - struct mtx lock; + struct lock lock; bool fill_lock; bool dma32; struct pglist list; @@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) M_TEMP, M_WAITOK | M_ZERO); restart: - mtx_lock(&pool->lock); + lockmgr(&pool->lock, LK_EXCLUSIVE); TAILQ_FOREACH_REVERSE_SAFE(p, &pool->list, pglist, pageq, p1) { if (freed_pages >= npages_to_free) @@ -346,7 +346,7 @@ restart: * Because changing page caching is costly * we unlock the pool to prevent stalling. */ - mtx_unlock(&pool->lock); + lockmgr(&pool->lock, LK_RELEASE); ttm_pages_put(pages_to_free, freed_pages); if (likely(nr_free != FREE_ALL_PAGES)) @@ -380,7 +380,7 @@ restart: nr_free -= freed_pages; } - mtx_unlock(&pool->lock); + lockmgr(&pool->lock, LK_RELEASE); if (freed_pages) ttm_pages_put(pages_to_free, freed_pages); @@ -593,12 +593,12 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, * Can't change page caching if in irqsave context. We have to * drop the pool->lock. */ - mtx_unlock(&pool->lock); + lockmgr(&pool->lock, LK_RELEASE); TAILQ_INIT(&new_pages); r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags, ttm_flags, cstate, alloc_size); - mtx_lock(&pool->lock); + lockmgr(&pool->lock, LK_EXCLUSIVE); if (!r) { TAILQ_CONCAT(&pool->list, &new_pages, pageq); @@ -632,7 +632,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, vm_page_t p; unsigned i; - mtx_lock(&pool->lock); + lockmgr(&pool->lock, LK_EXCLUSIVE); ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count); if (count >= pool->npages) { @@ -650,7 +650,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool, pool->npages -= count; count = 0; out: - mtx_unlock(&pool->lock); + lockmgr(&pool->lock, LK_RELEASE); return count; } @@ -672,7 +672,7 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, return; } - mtx_lock(&pool->lock); + lockmgr(&pool->lock, LK_EXCLUSIVE); for (i = 0; i < npages; i++) { if (pages[i]) { TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq); @@ -689,7 +689,7 @@ static void ttm_put_pages(vm_page_t *pages, unsigned npages, int flags, if (npages < NUM_PAGES_TO_ALLOC) npages = NUM_PAGES_TO_ALLOC; } - mtx_unlock(&pool->lock); + lockmgr(&pool->lock, LK_RELEASE); if (npages) ttm_page_pool_free(pool, npages); } @@ -773,7 +773,7 @@ static int ttm_get_pages(vm_page_t *pages, unsigned npages, int flags, static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, char *name) { - mtx_init(&pool->lock, "ttmpool", NULL, MTX_DEF); + lockinit(&pool->lock, "ttmpool", 0, LK_CANRECURSE); pool->fill_lock = false; TAILQ_INIT(&pool->list); pool->npages = pool->nfrees = 0; -- 2.41.0