Merge branch 'vendor/LIBEDIT'
[dragonfly.git] / sys / dev / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/export.h>
32 #include <linux/wait.h>
33
34 /* XXX this should go to dma-buf driver, for now just to avoid undef */
35 DEFINE_WW_CLASS(reservation_ww_class);
36
37 static void ttm_eu_backoff_reservation_locked(struct list_head *list,
38                                               struct ww_acquire_ctx *ticket)
39 {
40         struct ttm_validate_buffer *entry;
41
42         list_for_each_entry(entry, list, head) {
43                 struct ttm_buffer_object *bo = entry->bo;
44                 if (!entry->reserved)
45                         continue;
46
47                 entry->reserved = false;
48                 if (entry->removed) {
49                         ttm_bo_unreserve_ticket_locked(bo, ticket);
50                         entry->removed = false;
51
52                 } else {
53                         atomic_set(&bo->reserved, 0);
54                         wake_up_all(&bo->event_queue);
55                 }
56         }
57 }
58
59 static void ttm_eu_del_from_lru_locked(struct list_head *list)
60 {
61         struct ttm_validate_buffer *entry;
62
63         list_for_each_entry(entry, list, head) {
64                 struct ttm_buffer_object *bo = entry->bo;
65                 if (!entry->reserved)
66                         continue;
67
68                 if (!entry->removed) {
69                         entry->put_count = ttm_bo_del_from_lru(bo);
70                         entry->removed = true;
71                 }
72         }
73 }
74
75 static void ttm_eu_list_ref_sub(struct list_head *list)
76 {
77         struct ttm_validate_buffer *entry;
78
79         list_for_each_entry(entry, list, head) {
80                 struct ttm_buffer_object *bo = entry->bo;
81
82                 if (entry->put_count) {
83                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
84                         entry->put_count = 0;
85                 }
86         }
87 }
88
89 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
90                                 struct list_head *list)
91 {
92         struct ttm_validate_buffer *entry;
93         struct ttm_bo_global *glob;
94
95         if (list_empty(list))
96                 return;
97
98         entry = list_first_entry(list, struct ttm_validate_buffer, head);
99         glob = entry->bo->glob;
100         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
101         ttm_eu_backoff_reservation_locked(list, ticket);
102         ww_acquire_fini(ticket);
103         lockmgr(&glob->lru_lock, LK_RELEASE);
104 }
105 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
106
107 /*
108  * Reserve buffers for validation.
109  *
110  * If a buffer in the list is marked for CPU access, we back off and
111  * wait for that buffer to become free for GPU access.
112  *
113  * If a buffer is reserved for another validation, the validator with
114  * the highest validation sequence backs off and waits for that buffer
115  * to become unreserved. This prevents deadlocks when validating multiple
116  * buffers in different orders.
117  */
118
119 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
120                            struct list_head *list)
121 {
122         struct ttm_bo_global *glob;
123         struct ttm_validate_buffer *entry;
124         int ret;
125
126         if (list_empty(list))
127                 return 0;
128
129         list_for_each_entry(entry, list, head) {
130                 entry->reserved = false;
131                 entry->put_count = 0;
132                 entry->removed = false;
133         }
134
135         entry = list_first_entry(list, struct ttm_validate_buffer, head);
136         glob = entry->bo->glob;
137
138         ww_acquire_init(ticket, &reservation_ww_class);
139         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
140
141 retry:
142         list_for_each_entry(entry, list, head) {
143                 struct ttm_buffer_object *bo = entry->bo;
144                 int owned;
145
146                 /* already slowpath reserved? */
147                 if (entry->reserved)
148                         continue;
149
150                 ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
151                 switch (ret) {
152                 case 0:
153                         break;
154                 case -EBUSY:
155                         ttm_eu_del_from_lru_locked(list);
156                         owned = lockstatus(&glob->lru_lock, curthread);
157                         if (owned == LK_EXCLUSIVE)
158                                 lockmgr(&glob->lru_lock, LK_RELEASE);
159                         ret = ttm_bo_reserve_nolru(bo, true, false,
160                                                    true, ticket);
161                         if (owned == LK_EXCLUSIVE)
162                                 lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
163                         if (!ret)
164                                 break;
165
166                         if (unlikely(ret != -EAGAIN))
167                                 goto err;
168
169                         /* fallthrough */
170                 case -EAGAIN:
171                         ttm_eu_backoff_reservation_locked(list, ticket);
172                         lockmgr(&glob->lru_lock, LK_RELEASE);
173                         ttm_eu_list_ref_sub(list);
174                         ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
175                         if (unlikely(ret != 0))
176                                 goto err_fini;
177
178                         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
179                         entry->reserved = true;
180                         if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
181                                 ret = -EBUSY;
182                                 goto err;
183                         }
184                         goto retry;
185                 default:
186                         goto err;
187                 }
188
189                 entry->reserved = true;
190                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
191                         ret = -EBUSY;
192                         goto err;
193                 }
194         }
195
196         ww_acquire_done(ticket);
197         ttm_eu_del_from_lru_locked(list);
198         lockmgr(&glob->lru_lock, LK_RELEASE);
199         ttm_eu_list_ref_sub(list);
200         return 0;
201
202 err:
203         ttm_eu_backoff_reservation_locked(list, ticket);
204         lockmgr(&glob->lru_lock, LK_RELEASE);
205         ttm_eu_list_ref_sub(list);
206 err_fini:
207         ww_acquire_done(ticket);
208         ww_acquire_fini(ticket);
209         return ret;
210 }
211 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
212
213 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
214                                  struct list_head *list, void *sync_obj)
215 {
216         struct ttm_validate_buffer *entry;
217         struct ttm_buffer_object *bo;
218         struct ttm_bo_global *glob;
219         struct ttm_bo_device *bdev;
220         struct ttm_bo_driver *driver;
221
222         if (list_empty(list))
223                 return;
224
225         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
226         bdev = bo->bdev;
227         driver = bdev->driver;
228         glob = bo->glob;
229
230         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
231         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
232
233         list_for_each_entry(entry, list, head) {
234                 bo = entry->bo;
235                 entry->old_sync_obj = bo->sync_obj;
236                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
237                 ttm_bo_unreserve_ticket_locked(bo, ticket);
238                 entry->reserved = false;
239         }
240         lockmgr(&bdev->fence_lock, LK_RELEASE);
241         lockmgr(&glob->lru_lock, LK_RELEASE);
242         ww_acquire_fini(ticket);
243
244         list_for_each_entry(entry, list, head) {
245                 if (entry->old_sync_obj)
246                         driver->sync_obj_unref(&entry->old_sync_obj);
247         }
248 }
249 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);