1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/export.h>
32 #include <linux/wait.h>
34 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36 struct ttm_validate_buffer *entry;
38 list_for_each_entry(entry, list, head) {
39 struct ttm_buffer_object *bo = entry->bo;
44 ttm_bo_add_to_lru(bo);
45 entry->removed = false;
48 entry->reserved = false;
49 atomic_set(&bo->reserved, 0);
50 wake_up_all(&bo->event_queue);
54 static void ttm_eu_del_from_lru_locked(struct list_head *list)
56 struct ttm_validate_buffer *entry;
58 list_for_each_entry(entry, list, head) {
59 struct ttm_buffer_object *bo = entry->bo;
63 if (!entry->removed) {
64 entry->put_count = ttm_bo_del_from_lru(bo);
65 entry->removed = true;
70 static void ttm_eu_list_ref_sub(struct list_head *list)
72 struct ttm_validate_buffer *entry;
74 list_for_each_entry(entry, list, head) {
75 struct ttm_buffer_object *bo = entry->bo;
77 if (entry->put_count) {
78 ttm_bo_list_ref_sub(bo, entry->put_count, true);
84 void ttm_eu_backoff_reservation(struct list_head *list)
86 struct ttm_validate_buffer *entry;
87 struct ttm_bo_global *glob;
92 entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 glob = entry->bo->glob;
94 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
95 ttm_eu_backoff_reservation_locked(list);
96 lockmgr(&glob->lru_lock, LK_RELEASE);
98 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101 * Reserve buffers for validation.
103 * If a buffer in the list is marked for CPU access, we back off and
104 * wait for that buffer to become free for GPU access.
106 * If a buffer is reserved for another validation, the validator with
107 * the highest validation sequence backs off and waits for that buffer
108 * to become unreserved. This prevents deadlocks when validating multiple
109 * buffers in different orders.
112 int ttm_eu_reserve_buffers(struct list_head *list)
114 struct ttm_bo_global *glob;
115 struct ttm_validate_buffer *entry;
119 if (list_empty(list))
122 list_for_each_entry(entry, list, head) {
123 entry->reserved = false;
124 entry->put_count = 0;
125 entry->removed = false;
128 entry = list_first_entry(list, struct ttm_validate_buffer, head);
129 glob = entry->bo->glob;
131 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
132 val_seq = entry->bo->bdev->val_seq++;
135 list_for_each_entry(entry, list, head) {
136 struct ttm_buffer_object *bo = entry->bo;
139 /* already slowpath reserved? */
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
148 ttm_eu_del_from_lru_locked(list);
149 owned = lockstatus(&glob->lru_lock, curthread);
150 if (owned == LK_EXCLUSIVE)
151 lockmgr(&glob->lru_lock, LK_RELEASE);
152 ret = ttm_bo_reserve_nolru(bo, true, false,
154 if (owned == LK_EXCLUSIVE)
155 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
159 if (unlikely(ret != -EAGAIN))
164 ttm_eu_backoff_reservation_locked(list);
167 * temporarily increase sequence number every retry,
168 * to prevent us from seeing our old reservation
169 * sequence when someone else reserved the buffer,
170 * but hasn't updated the seq_valid/seqno members yet.
172 val_seq = entry->bo->bdev->val_seq++;
174 lockmgr(&glob->lru_lock, LK_RELEASE);
175 ttm_eu_list_ref_sub(list);
176 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
177 if (unlikely(ret != 0))
179 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
180 entry->reserved = true;
181 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
190 entry->reserved = true;
191 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
197 ttm_eu_del_from_lru_locked(list);
198 lockmgr(&glob->lru_lock, LK_RELEASE);
199 ttm_eu_list_ref_sub(list);
204 ttm_eu_backoff_reservation_locked(list);
205 lockmgr(&glob->lru_lock, LK_RELEASE);
206 ttm_eu_list_ref_sub(list);
209 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
211 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
213 struct ttm_validate_buffer *entry;
214 struct ttm_buffer_object *bo;
215 struct ttm_bo_global *glob;
216 struct ttm_bo_device *bdev;
217 struct ttm_bo_driver *driver;
219 if (list_empty(list))
222 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
224 driver = bdev->driver;
227 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
228 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
230 list_for_each_entry(entry, list, head) {
232 entry->old_sync_obj = bo->sync_obj;
233 bo->sync_obj = driver->sync_obj_ref(sync_obj);
234 ttm_bo_unreserve_locked(bo);
235 entry->reserved = false;
237 lockmgr(&bdev->fence_lock, LK_RELEASE);
238 lockmgr(&glob->lru_lock, LK_RELEASE);
240 list_for_each_entry(entry, list, head) {
241 if (entry->old_sync_obj)
242 driver->sync_obj_unref(&entry->old_sync_obj);
245 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);