drm: Import the ttm memory manager from FreeBSD
[dragonfly.git] / sys / dev / drm2 / ttm / ttm_execbuf_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  * $FreeBSD: head/sys/dev/drm2/ttm/ttm_execbuf_util.c 247835 2013-03-05 09:49:34Z kib $
27  **************************************************************************/
28
29 #include <dev/drm2/drmP.h>
30 #include <dev/drm2/ttm/ttm_execbuf_util.h>
31 #include <dev/drm2/ttm/ttm_bo_driver.h>
32 #include <dev/drm2/ttm/ttm_placement.h>
33
34 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
35 {
36         struct ttm_validate_buffer *entry;
37
38         list_for_each_entry(entry, list, head) {
39                 struct ttm_buffer_object *bo = entry->bo;
40                 if (!entry->reserved)
41                         continue;
42
43                 if (entry->removed) {
44                         ttm_bo_add_to_lru(bo);
45                         entry->removed = false;
46
47                 }
48                 entry->reserved = false;
49                 atomic_set(&bo->reserved, 0);
50                 wakeup(bo);
51         }
52 }
53
54 static void ttm_eu_del_from_lru_locked(struct list_head *list)
55 {
56         struct ttm_validate_buffer *entry;
57
58         list_for_each_entry(entry, list, head) {
59                 struct ttm_buffer_object *bo = entry->bo;
60                 if (!entry->reserved)
61                         continue;
62
63                 if (!entry->removed) {
64                         entry->put_count = ttm_bo_del_from_lru(bo);
65                         entry->removed = true;
66                 }
67         }
68 }
69
70 static void ttm_eu_list_ref_sub(struct list_head *list)
71 {
72         struct ttm_validate_buffer *entry;
73
74         list_for_each_entry(entry, list, head) {
75                 struct ttm_buffer_object *bo = entry->bo;
76
77                 if (entry->put_count) {
78                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
79                         entry->put_count = 0;
80                 }
81         }
82 }
83
84 static int ttm_eu_wait_unreserved_locked(struct list_head *list,
85                                          struct ttm_buffer_object *bo)
86 {
87         int ret;
88
89         ttm_eu_del_from_lru_locked(list);
90         ret = ttm_bo_wait_unreserved_locked(bo, true);
91         if (unlikely(ret != 0))
92                 ttm_eu_backoff_reservation_locked(list);
93         return ret;
94 }
95
96
97 void ttm_eu_backoff_reservation(struct list_head *list)
98 {
99         struct ttm_validate_buffer *entry;
100         struct ttm_bo_global *glob;
101
102         if (list_empty(list))
103                 return;
104
105         entry = list_first_entry(list, struct ttm_validate_buffer, head);
106         glob = entry->bo->glob;
107         mtx_lock(&glob->lru_lock);
108         ttm_eu_backoff_reservation_locked(list);
109         mtx_unlock(&glob->lru_lock);
110 }
111
112 /*
113  * Reserve buffers for validation.
114  *
115  * If a buffer in the list is marked for CPU access, we back off and
116  * wait for that buffer to become free for GPU access.
117  *
118  * If a buffer is reserved for another validation, the validator with
119  * the highest validation sequence backs off and waits for that buffer
120  * to become unreserved. This prevents deadlocks when validating multiple
121  * buffers in different orders.
122  */
123
124 int ttm_eu_reserve_buffers(struct list_head *list)
125 {
126         struct ttm_bo_global *glob;
127         struct ttm_validate_buffer *entry;
128         int ret;
129         uint32_t val_seq;
130
131         if (list_empty(list))
132                 return 0;
133
134         list_for_each_entry(entry, list, head) {
135                 entry->reserved = false;
136                 entry->put_count = 0;
137                 entry->removed = false;
138         }
139
140         entry = list_first_entry(list, struct ttm_validate_buffer, head);
141         glob = entry->bo->glob;
142
143         mtx_lock(&glob->lru_lock);
144 retry_locked:
145         val_seq = entry->bo->bdev->val_seq++;
146
147         list_for_each_entry(entry, list, head) {
148                 struct ttm_buffer_object *bo = entry->bo;
149
150 retry_this_bo:
151                 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
152                 switch (ret) {
153                 case 0:
154                         break;
155                 case -EBUSY:
156                         ret = ttm_eu_wait_unreserved_locked(list, bo);
157                         if (unlikely(ret != 0)) {
158                                 mtx_unlock(&glob->lru_lock);
159                                 ttm_eu_list_ref_sub(list);
160                                 return ret;
161                         }
162                         goto retry_this_bo;
163                 case -EAGAIN:
164                         ttm_eu_backoff_reservation_locked(list);
165                         ttm_eu_list_ref_sub(list);
166                         ret = ttm_bo_wait_unreserved_locked(bo, true);
167                         if (unlikely(ret != 0)) {
168                                 mtx_unlock(&glob->lru_lock);
169                                 return ret;
170                         }
171                         goto retry_locked;
172                 default:
173                         ttm_eu_backoff_reservation_locked(list);
174                         mtx_unlock(&glob->lru_lock);
175                         ttm_eu_list_ref_sub(list);
176                         return ret;
177                 }
178
179                 entry->reserved = true;
180                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
181                         ttm_eu_backoff_reservation_locked(list);
182                         mtx_unlock(&glob->lru_lock);
183                         ttm_eu_list_ref_sub(list);
184                         return -EBUSY;
185                 }
186         }
187
188         ttm_eu_del_from_lru_locked(list);
189         mtx_unlock(&glob->lru_lock);
190         ttm_eu_list_ref_sub(list);
191
192         return 0;
193 }
194
195 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
196 {
197         struct ttm_validate_buffer *entry;
198         struct ttm_buffer_object *bo;
199         struct ttm_bo_global *glob;
200         struct ttm_bo_device *bdev;
201         struct ttm_bo_driver *driver;
202
203         if (list_empty(list))
204                 return;
205
206         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
207         bdev = bo->bdev;
208         driver = bdev->driver;
209         glob = bo->glob;
210
211         mtx_lock(&glob->lru_lock);
212         mtx_lock(&bdev->fence_lock);
213
214         list_for_each_entry(entry, list, head) {
215                 bo = entry->bo;
216                 entry->old_sync_obj = bo->sync_obj;
217                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
218                 ttm_bo_unreserve_locked(bo);
219                 entry->reserved = false;
220         }
221         mtx_unlock(&bdev->fence_lock);
222         mtx_unlock(&glob->lru_lock);
223
224         list_for_each_entry(entry, list, head) {
225                 if (entry->old_sync_obj)
226                         driver->sync_obj_unref(&entry->old_sync_obj);
227         }
228 }