drm: Sync with FreeBSD
[dragonfly.git] / sys / dev / drm / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_execbuf_util.c 247835 2013-03-05 09:49:34Z kib $
27  **************************************************************************/
28
29 #include <dev/drm/drmP.h>
30 #include <dev/drm/ttm/ttm_execbuf_util.h>
31 #include <dev/drm/ttm/ttm_bo_driver.h>
32 #include <dev/drm/ttm/ttm_placement.h>
33
34 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
35 {
36         struct ttm_validate_buffer *entry;
37
38         list_for_each_entry(entry, list, head) {
39                 struct ttm_buffer_object *bo = entry->bo;
40                 if (!entry->reserved)
41                         continue;
42
43                 if (entry->removed) {
44                         ttm_bo_add_to_lru(bo);
45                         entry->removed = false;
46
47                 }
48                 entry->reserved = false;
49                 atomic_set(&bo->reserved, 0);
50                 wakeup(bo);
51         }
52 }
53
54 static void ttm_eu_del_from_lru_locked(struct list_head *list)
55 {
56         struct ttm_validate_buffer *entry;
57
58         list_for_each_entry(entry, list, head) {
59                 struct ttm_buffer_object *bo = entry->bo;
60                 if (!entry->reserved)
61                         continue;
62
63                 if (!entry->removed) {
64                         entry->put_count = ttm_bo_del_from_lru(bo);
65                         entry->removed = true;
66                 }
67         }
68 }
69
70 static void ttm_eu_list_ref_sub(struct list_head *list)
71 {
72         struct ttm_validate_buffer *entry;
73
74         list_for_each_entry(entry, list, head) {
75                 struct ttm_buffer_object *bo = entry->bo;
76
77                 if (entry->put_count) {
78                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
79                         entry->put_count = 0;
80                 }
81         }
82 }
83
84 void ttm_eu_backoff_reservation(struct list_head *list)
85 {
86         struct ttm_validate_buffer *entry;
87         struct ttm_bo_global *glob;
88
89         if (list_empty(list))
90                 return;
91
92         entry = list_first_entry(list, struct ttm_validate_buffer, head);
93         glob = entry->bo->glob;
94         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
95         ttm_eu_backoff_reservation_locked(list);
96         lockmgr(&glob->lru_lock, LK_RELEASE);
97 }
98
99 /*
100  * Reserve buffers for validation.
101  *
102  * If a buffer in the list is marked for CPU access, we back off and
103  * wait for that buffer to become free for GPU access.
104  *
105  * If a buffer is reserved for another validation, the validator with
106  * the highest validation sequence backs off and waits for that buffer
107  * to become unreserved. This prevents deadlocks when validating multiple
108  * buffers in different orders.
109  */
110
111 int ttm_eu_reserve_buffers(struct list_head *list)
112 {
113         struct ttm_bo_global *glob;
114         struct ttm_validate_buffer *entry;
115         int ret;
116         uint32_t val_seq;
117
118         if (list_empty(list))
119                 return 0;
120
121         list_for_each_entry(entry, list, head) {
122                 entry->reserved = false;
123                 entry->put_count = 0;
124                 entry->removed = false;
125         }
126
127         entry = list_first_entry(list, struct ttm_validate_buffer, head);
128         glob = entry->bo->glob;
129
130         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
131         val_seq = entry->bo->bdev->val_seq++;
132
133 retry_locked:
134         list_for_each_entry(entry, list, head) {
135                 struct ttm_buffer_object *bo = entry->bo;
136
137                 /* already slowpath reserved? */
138                 if (entry->reserved)
139                         continue;
140
141                 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
142                 switch (ret) {
143                 case 0:
144                         break;
145                 case -EBUSY:
146                         ttm_eu_del_from_lru_locked(list);
147                         ret = ttm_bo_reserve_nolru(bo, true, false,
148                                                    true, val_seq);
149                         if (!ret)
150                                 break;
151
152                         if (unlikely(ret != -EAGAIN))
153                                 goto err;
154
155                         /* fallthrough */
156                 case -EAGAIN:
157                         ttm_eu_backoff_reservation_locked(list);
158
159                         /*
160                          * temporarily increase sequence number every retry,
161                          * to prevent us from seeing our old reservation
162                          * sequence when someone else reserved the buffer,
163                          * but hasn't updated the seq_valid/seqno members yet.
164                          */
165                         val_seq = entry->bo->bdev->val_seq++;
166
167                         ttm_eu_list_ref_sub(list);
168                         ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
169                         if (unlikely(ret != 0)) {
170                                 lockmgr(&glob->lru_lock, LK_RELEASE);
171                                 return ret;
172                         }
173                         entry->reserved = true;
174                         if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
175                                 ret = -EBUSY;
176                                 goto err;
177                         }
178                         goto retry_locked;
179                 default:
180                         goto err;
181                 }
182
183                 entry->reserved = true;
184                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185                         ret = -EBUSY;
186                         goto err;
187                 }
188         }
189
190         ttm_eu_del_from_lru_locked(list);
191         lockmgr(&glob->lru_lock, LK_RELEASE);
192         ttm_eu_list_ref_sub(list);
193
194         return 0;
195
196 err:
197         ttm_eu_backoff_reservation_locked(list);
198         lockmgr(&glob->lru_lock, LK_RELEASE);
199         ttm_eu_list_ref_sub(list);
200         return ret;
201 }
202
203 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
204 {
205         struct ttm_validate_buffer *entry;
206         struct ttm_buffer_object *bo;
207         struct ttm_bo_global *glob;
208         struct ttm_bo_device *bdev;
209         struct ttm_bo_driver *driver;
210
211         if (list_empty(list))
212                 return;
213
214         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215         bdev = bo->bdev;
216         driver = bdev->driver;
217         glob = bo->glob;
218
219         lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
220         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
221
222         list_for_each_entry(entry, list, head) {
223                 bo = entry->bo;
224                 entry->old_sync_obj = bo->sync_obj;
225                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226                 ttm_bo_unreserve_locked(bo);
227                 entry->reserved = false;
228         }
229         lockmgr(&bdev->fence_lock, LK_RELEASE);
230         lockmgr(&glob->lru_lock, LK_RELEASE);
231
232         list_for_each_entry(entry, list, head) {
233                 if (entry->old_sync_obj)
234                         driver->sync_obj_unref(&entry->old_sync_obj);
235         }
236 }