Update drm/radeon to Linux 4.7.10 as much as possible...
[dragonfly.git] / sys / dev / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <drm/drmP.h>
33 #include <uapi_drm/radeon_drm.h>
34 #include "radeon.h"
35 #ifdef TRACE_TODO
36 #include "radeon_trace.h"
37 #endif
38 #include <linux/io.h>
39
40
41 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
42
43 /*
44  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
45  * function are calling it.
46  */
47
48 static void radeon_update_memory_usage(struct radeon_bo *bo,
49                                        unsigned mem_type, int sign)
50 {
51         struct radeon_device *rdev = bo->rdev;
52         u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
53
54         switch (mem_type) {
55         case TTM_PL_TT:
56                 if (sign > 0)
57                         atomic64_add(size, &rdev->gtt_usage);
58                 else
59                         atomic64_sub(size, &rdev->gtt_usage);
60                 break;
61         case TTM_PL_VRAM:
62                 if (sign > 0)
63                         atomic64_add(size, &rdev->vram_usage);
64                 else
65                         atomic64_sub(size, &rdev->vram_usage);
66                 break;
67         }
68 }
69
70 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
71 {
72         struct radeon_bo *bo;
73
74         bo = container_of(tbo, struct radeon_bo, tbo);
75
76         radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
77
78         mutex_lock(&bo->rdev->gem.mutex);
79         list_del_init(&bo->list);
80         mutex_unlock(&bo->rdev->gem.mutex);
81         radeon_bo_clear_surface_reg(bo);
82         WARN_ON(!list_empty(&bo->va));
83         drm_gem_object_release(&bo->gem_base);
84         kfree(bo);
85 }
86
87 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
88 {
89         if (bo->destroy == &radeon_ttm_bo_destroy)
90                 return true;
91         return false;
92 }
93
94 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
95 {
96         u32 c = 0, i;
97
98         rbo->placement.placement = rbo->placements;
99         rbo->placement.busy_placement = rbo->placements;
100         if (domain & RADEON_GEM_DOMAIN_VRAM) {
101                 /* Try placing BOs which don't need CPU access outside of the
102                  * CPU accessible part of VRAM
103                  */
104                 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
105                     rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
106                         rbo->placements[c].fpfn =
107                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
108                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
109                                                      TTM_PL_FLAG_UNCACHED |
110                                                      TTM_PL_FLAG_VRAM;
111                 }
112
113                 rbo->placements[c].fpfn = 0;
114                 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
115                                              TTM_PL_FLAG_UNCACHED |
116                                              TTM_PL_FLAG_VRAM;
117         }
118
119         if (domain & RADEON_GEM_DOMAIN_GTT) {
120                 if (rbo->flags & RADEON_GEM_GTT_UC) {
121                         rbo->placements[c].fpfn = 0;
122                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
123                                 TTM_PL_FLAG_TT;
124
125                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
126                            (rbo->rdev->flags & RADEON_IS_AGP)) {
127                         rbo->placements[c].fpfn = 0;
128                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
129                                 TTM_PL_FLAG_UNCACHED |
130                                 TTM_PL_FLAG_TT;
131                 } else {
132                         rbo->placements[c].fpfn = 0;
133                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
134                                                      TTM_PL_FLAG_TT;
135                 }
136         }
137
138         if (domain & RADEON_GEM_DOMAIN_CPU) {
139                 if (rbo->flags & RADEON_GEM_GTT_UC) {
140                         rbo->placements[c].fpfn = 0;
141                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
142                                 TTM_PL_FLAG_SYSTEM;
143
144                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
145                     rbo->rdev->flags & RADEON_IS_AGP) {
146                         rbo->placements[c].fpfn = 0;
147                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
148                                 TTM_PL_FLAG_UNCACHED |
149                                 TTM_PL_FLAG_SYSTEM;
150                 } else {
151                         rbo->placements[c].fpfn = 0;
152                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
153                                                      TTM_PL_FLAG_SYSTEM;
154                 }
155         }
156         if (!c) {
157                 rbo->placements[c].fpfn = 0;
158                 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
159                                              TTM_PL_FLAG_SYSTEM;
160         }
161
162         rbo->placement.num_placement = c;
163         rbo->placement.num_busy_placement = c;
164
165         for (i = 0; i < c; ++i) {
166                 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
167                     (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
168                     !rbo->placements[i].fpfn)
169                         rbo->placements[i].lpfn =
170                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
171                 else
172                         rbo->placements[i].lpfn = 0;
173         }
174
175 #if 0
176         /*
177          * Use two-ended allocation depending on the buffer size to
178          * improve fragmentation quality.
179          * 512kb was measured as the most optimal number.
180          */
181         if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
182               (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
183             rbo->tbo.mem.size > 512 * 1024) {
184                 for (i = 0; i < c; i++) {
185                         rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
186                 }
187         }
188 #endif
189 }
190
191 int radeon_bo_create(struct radeon_device *rdev,
192                      unsigned long size, int byte_align, bool kernel, u32 domain,
193                      u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
194 {
195         struct radeon_bo *bo;
196         enum ttm_bo_type type;
197         unsigned long page_align = roundup2(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
198         size_t acc_size;
199         int r;
200
201         size = ALIGN(size, PAGE_SIZE);
202
203         if (kernel) {
204                 type = ttm_bo_type_kernel;
205         } else if (sg) {
206                 type = ttm_bo_type_sg;
207         } else {
208                 type = ttm_bo_type_device;
209         }
210         *bo_ptr = NULL;
211
212         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
213                                        sizeof(struct radeon_bo));
214
215         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
216         if (bo == NULL)
217                 return -ENOMEM;
218         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
219         if (unlikely(r)) {
220                 kfree(bo);
221                 return r;
222         }
223         bo->rdev = rdev;
224         bo->surface_reg = -1;
225         INIT_LIST_HEAD(&bo->list);
226         INIT_LIST_HEAD(&bo->va);
227         bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
228                                        RADEON_GEM_DOMAIN_GTT |
229                                        RADEON_GEM_DOMAIN_CPU);
230
231         bo->flags = flags;
232         /* PCI GART is always snooped */
233         if (!(rdev->flags & RADEON_IS_PCIE))
234                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
235
236         /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
237          * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
238          */
239         if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
240                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
241
242 /* DragonFly only supported on __x86_64__ and supports PAT */
243 #if !defined (__DragonFly__)
244 #ifdef CONFIG_X86_32
245         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
246          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
247          */
248         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
249 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
250         /* Don't try to enable write-combining when it can't work, or things
251          * may be slow
252          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
253          */
254
255 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
256          thanks to write-combining
257
258         if (bo->flags & RADEON_GEM_GTT_WC)
259                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
260                               "better performance thanks to write-combining\n");
261         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
262 #else
263         /* For architectures that don't support WC memory,
264          * mask out the WC flag from the BO
265          */
266         if (!drm_arch_can_wc_memory())
267                 bo->flags &= ~RADEON_GEM_GTT_WC;
268 #endif
269 #endif /* __DragonFly__*/
270
271         radeon_ttm_placement_from_domain(bo, domain);
272         /* Kernel allocation are uninterruptible */
273         lockmgr(&rdev->pm.mclk_lock, LK_SHARED);
274         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
275                         &bo->placement, page_align, !kernel, NULL,
276                         acc_size, sg, &radeon_ttm_bo_destroy);
277         lockmgr(&rdev->pm.mclk_lock, LK_RELEASE);
278         if (unlikely(r != 0)) {
279                 return r;
280         }
281         *bo_ptr = bo;
282
283 #ifdef TRACE_TODO
284         trace_radeon_bo_create(bo);
285 #endif
286
287         return 0;
288 }
289
290 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
291 {
292         bool is_iomem;
293         int r;
294
295         if (bo->kptr) {
296                 if (ptr) {
297                         *ptr = bo->kptr;
298                 }
299                 return 0;
300         }
301         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
302         if (r) {
303                 return r;
304         }
305         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
306         if (ptr) {
307                 *ptr = bo->kptr;
308         }
309         radeon_bo_check_tiling(bo, 0, 0);
310         return 0;
311 }
312
313 void radeon_bo_kunmap(struct radeon_bo *bo)
314 {
315         if (bo->kptr == NULL)
316                 return;
317         bo->kptr = NULL;
318         radeon_bo_check_tiling(bo, 0, 0);
319         ttm_bo_kunmap(&bo->kmap);
320 }
321
322 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
323 {
324         if (bo == NULL)
325                 return NULL;
326
327         ttm_bo_reference(&bo->tbo);
328         return bo;
329 }
330
331 void radeon_bo_unref(struct radeon_bo **bo)
332 {
333         struct ttm_buffer_object *tbo;
334         struct radeon_device *rdev;
335         struct radeon_bo *rbo;
336
337         if ((rbo = *bo) == NULL)
338                 return;
339         *bo = NULL;
340         rdev = rbo->rdev;
341         tbo = &rbo->tbo;
342         ttm_bo_unref(&tbo);
343 }
344
345 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
346                              u64 *gpu_addr)
347 {
348         int r, i;
349
350         if (bo->pin_count) {
351                 bo->pin_count++;
352                 if (gpu_addr)
353                         *gpu_addr = radeon_bo_gpu_offset(bo);
354
355                 if (max_offset != 0) {
356                         u64 domain_start;
357
358                         if (domain == RADEON_GEM_DOMAIN_VRAM)
359                                 domain_start = bo->rdev->mc.vram_start;
360                         else
361                                 domain_start = bo->rdev->mc.gtt_start;
362                         if (max_offset < (radeon_bo_gpu_offset(bo) - domain_start)) {
363                                 DRM_ERROR("radeon_bo_pin_restricted: "
364                                     "max_offset(%ju) < "
365                                     "(radeon_bo_gpu_offset(%ju) - "
366                                     "domain_start(%ju)",
367                                     (uintmax_t)max_offset, (uintmax_t)radeon_bo_gpu_offset(bo),
368                                     (uintmax_t)domain_start);
369                         }
370                 }
371
372                 return 0;
373         }
374         radeon_ttm_placement_from_domain(bo, domain);
375         for (i = 0; i < bo->placement.num_placement; i++) {
376                 /* force to pin into visible video ram */
377                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
378                     !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
379                     (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
380                         bo->placements[i].lpfn =
381                                 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
382                 else
383                         bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
384
385                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
386         }
387
388         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
389         if (likely(r == 0)) {
390                 bo->pin_count = 1;
391                 if (gpu_addr != NULL)
392                         *gpu_addr = radeon_bo_gpu_offset(bo);
393                 if (domain == RADEON_GEM_DOMAIN_VRAM)
394                         bo->rdev->vram_pin_size += radeon_bo_size(bo);
395                 else
396                         bo->rdev->gart_pin_size += radeon_bo_size(bo);
397         } else {
398                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
399         }
400         return r;
401 }
402
403 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
404 {
405         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
406 }
407
408 int radeon_bo_unpin(struct radeon_bo *bo)
409 {
410         int r, i;
411
412         if (!bo->pin_count) {
413                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
414                 return 0;
415         }
416         bo->pin_count--;
417         if (bo->pin_count)
418                 return 0;
419         for (i = 0; i < bo->placement.num_placement; i++) {
420                 bo->placements[i].lpfn = 0;
421                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
422         }
423         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
424         if (likely(r == 0)) {
425                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
426                         bo->rdev->vram_pin_size -= radeon_bo_size(bo);
427                 else
428                         bo->rdev->gart_pin_size -= radeon_bo_size(bo);
429         } else {
430                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
431         }
432         return r;
433 }
434
435 int radeon_bo_evict_vram(struct radeon_device *rdev)
436 {
437         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
438         if (0 && (rdev->flags & RADEON_IS_IGP)) {
439                 if (rdev->mc.igp_sideport_enabled == false)
440                         /* Useless to evict on IGP chips */
441                         return 0;
442         }
443         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
444 }
445
446 void radeon_bo_force_delete(struct radeon_device *rdev)
447 {
448         struct radeon_bo *bo, *n;
449
450         if (list_empty(&rdev->gem.objects)) {
451                 return;
452         }
453         dev_err(rdev->dev, "Userspace still has active objects !\n");
454         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
455                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
456                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
457                         *((unsigned long *)&bo->gem_base.refcount));
458                 mutex_lock(&bo->rdev->gem.mutex);
459                 list_del_init(&bo->list);
460                 mutex_unlock(&bo->rdev->gem.mutex);
461                 /* this should unref the ttm bo */
462                 drm_gem_object_unreference_unlocked(&bo->gem_base);
463         }
464 }
465
466 int radeon_bo_init(struct radeon_device *rdev)
467 {
468         /* Add an MTRR for the VRAM */
469         if (!rdev->fastfb_working) {
470                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
471                                                       rdev->mc.aper_size);
472         }
473         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
474                 rdev->mc.mc_vram_size >> 20,
475                 (unsigned long long)rdev->mc.aper_size >> 20);
476         DRM_INFO("RAM width %dbits %cDR\n",
477                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
478         return radeon_ttm_init(rdev);
479 }
480
481 void radeon_bo_fini(struct radeon_device *rdev)
482 {
483         radeon_ttm_fini(rdev);
484         arch_phys_wc_del(rdev->mc.vram_mtrr);
485 }
486
487 /* Returns how many bytes TTM can move per IB.
488  */
489 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
490 {
491         u64 real_vram_size = rdev->mc.real_vram_size;
492         u64 vram_usage = atomic64_read(&rdev->vram_usage);
493
494         /* This function is based on the current VRAM usage.
495          *
496          * - If all of VRAM is free, allow relocating the number of bytes that
497          *   is equal to 1/4 of the size of VRAM for this IB.
498
499          * - If more than one half of VRAM is occupied, only allow relocating
500          *   1 MB of data for this IB.
501          *
502          * - From 0 to one half of used VRAM, the threshold decreases
503          *   linearly.
504          *         __________________
505          * 1/4 of -|\               |
506          * VRAM    | \              |
507          *         |  \             |
508          *         |   \            |
509          *         |    \           |
510          *         |     \          |
511          *         |      \         |
512          *         |       \________|1 MB
513          *         |----------------|
514          *    VRAM 0 %             100 %
515          *         used            used
516          *
517          * Note: It's a threshold, not a limit. The threshold must be crossed
518          * for buffer relocations to stop, so any buffer of an arbitrary size
519          * can be moved as long as the threshold isn't crossed before
520          * the relocation takes place. We don't want to disable buffer
521          * relocations completely.
522          *
523          * The idea is that buffers should be placed in VRAM at creation time
524          * and TTM should only do a minimum number of relocations during
525          * command submission. In practice, you need to submit at least
526          * a dozen IBs to move all buffers to VRAM if they are in GTT.
527          *
528          * Also, things can get pretty crazy under memory pressure and actual
529          * VRAM usage can change a lot, so playing safe even at 50% does
530          * consistently increase performance.
531          */
532
533         u64 half_vram = real_vram_size >> 1;
534         u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
535         u64 bytes_moved_threshold = half_free_vram >> 1;
536         return max(bytes_moved_threshold, 1024*1024ull);
537 }
538
539 int radeon_bo_list_validate(struct radeon_device *rdev,
540                             struct ww_acquire_ctx *ticket,
541                             struct list_head *head, int ring)
542 {
543         struct radeon_bo_list *lobj;
544         struct radeon_bo *bo;
545         int r;
546         u64 bytes_moved = 0, initial_bytes_moved;
547         u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
548
549         r = ttm_eu_reserve_buffers(ticket, head);
550         if (unlikely(r != 0)) {
551                 return r;
552         }
553
554         list_for_each_entry(lobj, head, tv.head) {
555                 bo = lobj->robj;
556                 if (!bo->pin_count) {
557                         u32 domain = lobj->prefered_domains;
558                         u32 allowed = lobj->allowed_domains;
559                         u32 current_domain =
560                                 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
561
562                         /* Check if this buffer will be moved and don't move it
563                          * if we have moved too many buffers for this IB already.
564                          *
565                          * Note that this allows moving at least one buffer of
566                          * any size, because it doesn't take the current "bo"
567                          * into account. We don't want to disallow buffer moves
568                          * completely.
569                          */
570                         if ((allowed & current_domain) != 0 &&
571                             (domain & current_domain) == 0 && /* will be moved */
572                             bytes_moved > bytes_moved_threshold) {
573                                 /* don't move it */
574                                 domain = current_domain;
575                         }
576
577                 retry:
578                         radeon_ttm_placement_from_domain(bo, domain);
579                         if (ring == R600_RING_TYPE_UVD_INDEX)
580                                 radeon_uvd_force_into_uvd_segment(bo, allowed);
581
582                         initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
583                         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
584                         bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
585                                        initial_bytes_moved;
586
587                         if (unlikely(r)) {
588                                 if (r != -ERESTARTSYS &&
589                                     domain != lobj->allowed_domains) {
590                                         domain = lobj->allowed_domains;
591                                         goto retry;
592                                 }
593                                 ttm_eu_backoff_reservation(ticket, head);
594                                 return r;
595                         }
596                 }
597                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
598                 lobj->tiling_flags = bo->tiling_flags;
599         }
600         return 0;
601 }
602
603 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
604 {
605         struct radeon_device *rdev = bo->rdev;
606         struct radeon_surface_reg *reg;
607         struct radeon_bo *old_object;
608         int steal;
609         int i;
610
611         KASSERT(radeon_bo_is_reserved(bo),
612             ("radeon_bo_get_surface_reg: radeon_bo is not reserved"));
613
614         if (!bo->tiling_flags)
615                 return 0;
616
617         if (bo->surface_reg >= 0) {
618                 reg = &rdev->surface_regs[bo->surface_reg];
619                 i = bo->surface_reg;
620                 goto out;
621         }
622
623         steal = -1;
624         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
625
626                 reg = &rdev->surface_regs[i];
627                 if (!reg->bo)
628                         break;
629
630                 old_object = reg->bo;
631                 if (old_object->pin_count == 0)
632                         steal = i;
633         }
634
635         /* if we are all out */
636         if (i == RADEON_GEM_MAX_SURFACES) {
637                 if (steal == -1)
638                         return -ENOMEM;
639                 /* find someone with a surface reg and nuke their BO */
640                 reg = &rdev->surface_regs[steal];
641                 old_object = reg->bo;
642                 /* blow away the mapping */
643                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
644                 ttm_bo_unmap_virtual(&old_object->tbo);
645                 old_object->surface_reg = -1;
646                 i = steal;
647         }
648
649         bo->surface_reg = i;
650         reg->bo = bo;
651
652 out:
653         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
654                                bo->tbo.mem.start << PAGE_SHIFT,
655                                bo->tbo.num_pages << PAGE_SHIFT);
656         return 0;
657 }
658
659 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
660 {
661         struct radeon_device *rdev = bo->rdev;
662         struct radeon_surface_reg *reg;
663
664         if (bo->surface_reg == -1)
665                 return;
666
667         reg = &rdev->surface_regs[bo->surface_reg];
668         radeon_clear_surface_reg(rdev, bo->surface_reg);
669
670         reg->bo = NULL;
671         bo->surface_reg = -1;
672 }
673
674 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
675                                 uint32_t tiling_flags, uint32_t pitch)
676 {
677         struct radeon_device *rdev = bo->rdev;
678         int r;
679
680         if (rdev->family >= CHIP_CEDAR) {
681                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
682
683                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
684                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
685                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
686                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
687                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
688                 switch (bankw) {
689                 case 0:
690                 case 1:
691                 case 2:
692                 case 4:
693                 case 8:
694                         break;
695                 default:
696                         return -EINVAL;
697                 }
698                 switch (bankh) {
699                 case 0:
700                 case 1:
701                 case 2:
702                 case 4:
703                 case 8:
704                         break;
705                 default:
706                         return -EINVAL;
707                 }
708                 switch (mtaspect) {
709                 case 0:
710                 case 1:
711                 case 2:
712                 case 4:
713                 case 8:
714                         break;
715                 default:
716                         return -EINVAL;
717                 }
718                 if (tilesplit > 6) {
719                         return -EINVAL;
720                 }
721                 if (stilesplit > 6) {
722                         return -EINVAL;
723                 }
724         }
725         r = radeon_bo_reserve(bo, false);
726         if (unlikely(r != 0))
727                 return r;
728         bo->tiling_flags = tiling_flags;
729         bo->pitch = pitch;
730         radeon_bo_unreserve(bo);
731         return 0;
732 }
733
734 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
735                                 uint32_t *tiling_flags,
736                                 uint32_t *pitch)
737 {
738         KASSERT(radeon_bo_is_reserved(bo),
739             ("radeon_bo_get_tiling_flags: radeon_bo is not reserved"));
740         if (tiling_flags)
741                 *tiling_flags = bo->tiling_flags;
742         if (pitch)
743                 *pitch = bo->pitch;
744 }
745
746 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
747                                 bool force_drop)
748 {
749         KASSERT((radeon_bo_is_reserved(bo) || force_drop),
750             ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop"));
751
752         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
753                 return 0;
754
755         if (force_drop) {
756                 radeon_bo_clear_surface_reg(bo);
757                 return 0;
758         }
759
760         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
761                 if (!has_moved)
762                         return 0;
763
764                 if (bo->surface_reg >= 0)
765                         radeon_bo_clear_surface_reg(bo);
766                 return 0;
767         }
768
769         if ((bo->surface_reg >= 0) && !has_moved)
770                 return 0;
771
772         return radeon_bo_get_surface_reg(bo);
773 }
774
775 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
776                            struct ttm_mem_reg *new_mem)
777 {
778         struct radeon_bo *rbo;
779
780         if (!radeon_ttm_bo_is_radeon_bo(bo))
781                 return;
782
783         rbo = container_of(bo, struct radeon_bo, tbo);
784         radeon_bo_check_tiling(rbo, 0, 1);
785         radeon_vm_bo_invalidate(rbo->rdev, rbo);
786
787         /* update statistics */
788         if (!new_mem)
789                 return;
790
791         radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
792         radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
793 }
794
795 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
796 {
797         struct radeon_device *rdev;
798         struct radeon_bo *rbo;
799         unsigned long offset, size, lpfn;
800         int i, r;
801
802         if (!radeon_ttm_bo_is_radeon_bo(bo))
803                 return 0;
804         rbo = container_of(bo, struct radeon_bo, tbo);
805         radeon_bo_check_tiling(rbo, 0, 0);
806         rdev = rbo->rdev;
807         if (bo->mem.mem_type != TTM_PL_VRAM)
808                 return 0;
809
810         size = bo->mem.num_pages << PAGE_SHIFT;
811         offset = bo->mem.start << PAGE_SHIFT;
812         if ((offset + size) <= rdev->mc.visible_vram_size)
813                 return 0;
814
815         /* Can't move a pinned BO to visible VRAM */
816         if (rbo->pin_count > 0)
817                 return -EINVAL;
818
819         /* hurrah the memory is not visible ! */
820         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
821         lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
822         for (i = 0; i < rbo->placement.num_placement; i++) {
823                 /* Force into visible VRAM */
824                 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
825                     (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
826                         rbo->placements[i].lpfn = lpfn;
827         }
828         r = ttm_bo_validate(bo, &rbo->placement, false, false);
829         if (unlikely(r == -ENOMEM)) {
830                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
831                 return ttm_bo_validate(bo, &rbo->placement, false, false);
832         } else if (unlikely(r != 0)) {
833                 return r;
834         }
835
836         offset = bo->mem.start << PAGE_SHIFT;
837         /* this should never happen */
838         if ((offset + size) > rdev->mc.visible_vram_size)
839                 return -EINVAL;
840
841         return 0;
842 }
843
844 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
845 {
846         int r;
847
848         r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
849         if (unlikely(r != 0))
850                 return r;
851         lockmgr(&bo->tbo.bdev->fence_lock, LK_EXCLUSIVE);
852         if (mem_type)
853                 *mem_type = bo->tbo.mem.mem_type;
854         if (bo->tbo.sync_obj)
855                 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
856         lockmgr(&bo->tbo.bdev->fence_lock, LK_RELEASE);
857         ttm_bo_unreserve(&bo->tbo);
858         return r;
859 }