2 * Copyright 2011 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
44 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_sa.c 254885 2013-08-25 19:37:15Z dumbbell $
50 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
51 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
53 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
54 struct radeon_sa_manager *sa_manager,
55 unsigned size, u32 align, u32 domain, u32 flags)
59 lockinit(&sa_manager->wq_lock, "drm__radeon_sa_manager_wq_mtx", 0,
61 cv_init(&sa_manager->wq, "drm__radeon_sa_manager__wq");
62 sa_manager->bo = NULL;
63 sa_manager->size = size;
64 sa_manager->domain = domain;
65 sa_manager->align = align;
66 sa_manager->hole = &sa_manager->olist;
67 INIT_LIST_HEAD(&sa_manager->olist);
68 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
69 INIT_LIST_HEAD(&sa_manager->flist[i]);
72 r = radeon_bo_create(rdev, size, align, true,
73 domain, flags, NULL, &sa_manager->bo);
75 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
82 void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
83 struct radeon_sa_manager *sa_manager)
85 struct radeon_sa_bo *sa_bo, *tmp;
87 if (!list_empty(&sa_manager->olist)) {
88 sa_manager->hole = &sa_manager->olist,
89 radeon_sa_bo_try_free(sa_manager);
90 if (!list_empty(&sa_manager->olist)) {
91 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
95 radeon_sa_bo_remove_locked(sa_bo);
97 radeon_bo_unref(&sa_manager->bo);
99 cv_destroy(&sa_manager->wq);
100 lockuninit(&sa_manager->wq_lock);
103 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
104 struct radeon_sa_manager *sa_manager)
108 if (sa_manager->bo == NULL) {
109 dev_err(rdev->dev, "no bo for sa manager\n");
114 r = radeon_bo_reserve(sa_manager->bo, false);
116 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
119 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
121 radeon_bo_unreserve(sa_manager->bo);
122 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
125 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
126 radeon_bo_unreserve(sa_manager->bo);
130 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
131 struct radeon_sa_manager *sa_manager)
135 if (sa_manager->bo == NULL) {
136 dev_err(rdev->dev, "no bo for sa manager\n");
140 r = radeon_bo_reserve(sa_manager->bo, false);
142 radeon_bo_kunmap(sa_manager->bo);
143 radeon_bo_unpin(sa_manager->bo);
144 radeon_bo_unreserve(sa_manager->bo);
149 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
151 struct radeon_sa_manager *sa_manager = sa_bo->manager;
152 if (sa_manager->hole == &sa_bo->olist) {
153 sa_manager->hole = sa_bo->olist.prev;
155 list_del_init(&sa_bo->olist);
156 list_del_init(&sa_bo->flist);
157 radeon_fence_unref(&sa_bo->fence);
161 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
163 struct radeon_sa_bo *sa_bo, *tmp;
165 if (sa_manager->hole->next == &sa_manager->olist)
168 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
169 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
170 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
173 radeon_sa_bo_remove_locked(sa_bo);
177 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
179 struct list_head *hole = sa_manager->hole;
181 if (hole != &sa_manager->olist) {
182 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
187 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
189 struct list_head *hole = sa_manager->hole;
191 if (hole->next != &sa_manager->olist) {
192 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
194 return sa_manager->size;
197 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
198 struct radeon_sa_bo *sa_bo,
199 unsigned size, unsigned align)
201 unsigned soffset, eoffset, wasted;
203 soffset = radeon_sa_bo_hole_soffset(sa_manager);
204 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
205 wasted = (align - (soffset % align)) % align;
207 if ((eoffset - soffset) >= (size + wasted)) {
210 sa_bo->manager = sa_manager;
211 sa_bo->soffset = soffset;
212 sa_bo->eoffset = soffset + size;
213 list_add(&sa_bo->olist, sa_manager->hole);
214 INIT_LIST_HEAD(&sa_bo->flist);
215 sa_manager->hole = &sa_bo->olist;
222 * radeon_sa_event - Check if we can stop waiting
224 * @sa_manager: pointer to the sa_manager
225 * @size: number of bytes we want to allocate
226 * @align: alignment we need to match
228 * Check if either there is a fence we can wait for or
229 * enough free memory to satisfy the allocation directly
231 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
232 unsigned size, unsigned align)
234 unsigned soffset, eoffset, wasted;
237 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
238 if (!list_empty(&sa_manager->flist[i])) {
243 soffset = radeon_sa_bo_hole_soffset(sa_manager);
244 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
245 wasted = (align - (soffset % align)) % align;
247 if ((eoffset - soffset) >= (size + wasted)) {
254 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
255 struct radeon_fence **fences,
258 struct radeon_sa_bo *best_bo = NULL;
259 unsigned i, soffset, best, tmp;
261 /* if hole points to the end of the buffer */
262 if (sa_manager->hole->next == &sa_manager->olist) {
263 /* try again with its beginning */
264 sa_manager->hole = &sa_manager->olist;
268 soffset = radeon_sa_bo_hole_soffset(sa_manager);
269 /* to handle wrap around we add sa_manager->size */
270 best = sa_manager->size * 2;
271 /* go over all fence list and try to find the closest sa_bo
272 * of the current last
274 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
275 struct radeon_sa_bo *sa_bo;
277 if (list_empty(&sa_manager->flist[i])) {
281 sa_bo = list_first_entry(&sa_manager->flist[i],
282 struct radeon_sa_bo, flist);
284 if (!radeon_fence_signaled(sa_bo->fence)) {
285 fences[i] = sa_bo->fence;
289 /* limit the number of tries each ring gets */
294 tmp = sa_bo->soffset;
296 /* wrap around, pretend it's after */
297 tmp += sa_manager->size;
301 /* this sa bo is the closest one */
308 ++tries[best_bo->fence->ring];
309 sa_manager->hole = best_bo->olist.prev;
311 /* we knew that this one is signaled,
312 so it's save to remote it */
313 radeon_sa_bo_remove_locked(best_bo);
319 int radeon_sa_bo_new(struct radeon_device *rdev,
320 struct radeon_sa_manager *sa_manager,
321 struct radeon_sa_bo **sa_bo,
322 unsigned size, unsigned align)
324 struct radeon_fence *fences[RADEON_NUM_RINGS];
325 unsigned tries[RADEON_NUM_RINGS];
328 BUG_ON(align > sa_manager->align);
329 BUG_ON(size > sa_manager->size);
331 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), M_DRM,
333 if ((*sa_bo) == NULL) {
336 (*sa_bo)->manager = sa_manager;
337 (*sa_bo)->fence = NULL;
338 INIT_LIST_HEAD(&(*sa_bo)->olist);
339 INIT_LIST_HEAD(&(*sa_bo)->flist);
341 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
343 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
349 radeon_sa_bo_try_free(sa_manager);
351 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
353 lockmgr(&sa_manager->wq_lock, LK_RELEASE);
357 /* see if we can skip over some allocations */
358 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
360 lockmgr(&sa_manager->wq_lock, LK_RELEASE);
361 r = radeon_fence_wait_any(rdev, fences, false);
362 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
363 /* if we have nothing to wait for block */
365 while (!radeon_sa_event(sa_manager, size, align)) {
366 r = -cv_wait_sig(&sa_manager->wq,
367 &sa_manager->wq_lock);
375 lockmgr(&sa_manager->wq_lock, LK_RELEASE);
381 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
382 struct radeon_fence *fence)
384 struct radeon_sa_manager *sa_manager;
386 if (sa_bo == NULL || *sa_bo == NULL) {
390 sa_manager = (*sa_bo)->manager;
391 lockmgr(&sa_manager->wq_lock, LK_EXCLUSIVE);
392 if (fence && !radeon_fence_signaled(fence)) {
393 (*sa_bo)->fence = radeon_fence_ref(fence);
394 list_add_tail(&(*sa_bo)->flist,
395 &sa_manager->flist[fence->ring]);
397 radeon_sa_bo_remove_locked(*sa_bo);
399 cv_broadcast(&sa_manager->wq);
400 lockmgr(&sa_manager->wq_lock, LK_RELEASE);
404 #if defined(CONFIG_DEBUG_FS)
405 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
408 struct radeon_sa_bo *i;
410 spin_lock(&sa_manager->wq.lock);
411 list_for_each_entry(i, &sa_manager->olist, olist) {
412 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
413 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
414 if (&i->olist == sa_manager->hole) {
419 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
420 soffset, eoffset, eoffset - soffset);
422 seq_printf(m, " protected by 0x%016llx on ring %d",
423 i->fence->seq, i->fence->ring);
427 spin_unlock(&sa_manager->wq.lock);