drm/radeon: Update to Linux 4.7.10
[dragonfly.git] / sys / dev / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <drm/drmP.h>
34 #include <drm/radeon_drm.h>
35 #include <drm/drm_cache.h>
36 #include "radeon.h"
37 #include "radeon_trace.h"
38
39
40 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
41
42 /*
43  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
44  * function are calling it.
45  */
46
47 static void radeon_update_memory_usage(struct radeon_bo *bo,
48                                        unsigned mem_type, int sign)
49 {
50         struct radeon_device *rdev = bo->rdev;
51         u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
52
53         switch (mem_type) {
54         case TTM_PL_TT:
55                 if (sign > 0)
56                         atomic64_add(size, &rdev->gtt_usage);
57                 else
58                         atomic64_sub(size, &rdev->gtt_usage);
59                 break;
60         case TTM_PL_VRAM:
61                 if (sign > 0)
62                         atomic64_add(size, &rdev->vram_usage);
63                 else
64                         atomic64_sub(size, &rdev->vram_usage);
65                 break;
66         }
67 }
68
69 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
70 {
71         struct radeon_bo *bo;
72
73         bo = container_of(tbo, struct radeon_bo, tbo);
74
75         radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
76
77         mutex_lock(&bo->rdev->gem.mutex);
78         list_del_init(&bo->list);
79         mutex_unlock(&bo->rdev->gem.mutex);
80         radeon_bo_clear_surface_reg(bo);
81         WARN_ON(!list_empty(&bo->va));
82         drm_gem_object_release(&bo->gem_base);
83         kfree(bo);
84 }
85
86 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
87 {
88         if (bo->destroy == &radeon_ttm_bo_destroy)
89                 return true;
90         return false;
91 }
92
93 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
94 {
95         u32 c = 0, i;
96
97         rbo->placement.placement = rbo->placements;
98         rbo->placement.busy_placement = rbo->placements;
99         if (domain & RADEON_GEM_DOMAIN_VRAM) {
100                 /* Try placing BOs which don't need CPU access outside of the
101                  * CPU accessible part of VRAM
102                  */
103                 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
104                     rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
105                         rbo->placements[c].fpfn =
106                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
107                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
108                                                      TTM_PL_FLAG_UNCACHED |
109                                                      TTM_PL_FLAG_VRAM;
110                 }
111
112                 rbo->placements[c].fpfn = 0;
113                 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
114                                              TTM_PL_FLAG_UNCACHED |
115                                              TTM_PL_FLAG_VRAM;
116         }
117
118         if (domain & RADEON_GEM_DOMAIN_GTT) {
119                 if (rbo->flags & RADEON_GEM_GTT_UC) {
120                         rbo->placements[c].fpfn = 0;
121                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
122                                 TTM_PL_FLAG_TT;
123
124                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
125                            (rbo->rdev->flags & RADEON_IS_AGP)) {
126                         rbo->placements[c].fpfn = 0;
127                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
128                                 TTM_PL_FLAG_UNCACHED |
129                                 TTM_PL_FLAG_TT;
130                 } else {
131                         rbo->placements[c].fpfn = 0;
132                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
133                                                      TTM_PL_FLAG_TT;
134                 }
135         }
136
137         if (domain & RADEON_GEM_DOMAIN_CPU) {
138                 if (rbo->flags & RADEON_GEM_GTT_UC) {
139                         rbo->placements[c].fpfn = 0;
140                         rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
141                                 TTM_PL_FLAG_SYSTEM;
142
143                 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
144                     rbo->rdev->flags & RADEON_IS_AGP) {
145                         rbo->placements[c].fpfn = 0;
146                         rbo->placements[c++].flags = TTM_PL_FLAG_WC |
147                                 TTM_PL_FLAG_UNCACHED |
148                                 TTM_PL_FLAG_SYSTEM;
149                 } else {
150                         rbo->placements[c].fpfn = 0;
151                         rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
152                                                      TTM_PL_FLAG_SYSTEM;
153                 }
154         }
155         if (!c) {
156                 rbo->placements[c].fpfn = 0;
157                 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
158                                              TTM_PL_FLAG_SYSTEM;
159         }
160
161         rbo->placement.num_placement = c;
162         rbo->placement.num_busy_placement = c;
163
164         for (i = 0; i < c; ++i) {
165                 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
166                     (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
167                     !rbo->placements[i].fpfn)
168                         rbo->placements[i].lpfn =
169                                 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
170                 else
171                         rbo->placements[i].lpfn = 0;
172         }
173 }
174
175 int radeon_bo_create(struct radeon_device *rdev,
176                      unsigned long size, int byte_align, bool kernel,
177                      u32 domain, u32 flags, struct sg_table *sg,
178                      struct reservation_object *resv,
179                      struct radeon_bo **bo_ptr)
180 {
181         struct radeon_bo *bo;
182         enum ttm_bo_type type;
183         unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
184         size_t acc_size;
185         int r;
186
187         size = ALIGN(size, PAGE_SIZE);
188
189         if (kernel) {
190                 type = ttm_bo_type_kernel;
191         } else if (sg) {
192                 type = ttm_bo_type_sg;
193         } else {
194                 type = ttm_bo_type_device;
195         }
196         *bo_ptr = NULL;
197
198         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
199                                        sizeof(struct radeon_bo));
200
201         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
202         if (bo == NULL)
203                 return -ENOMEM;
204         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
205         if (unlikely(r)) {
206                 kfree(bo);
207                 return r;
208         }
209         bo->rdev = rdev;
210         bo->surface_reg = -1;
211         INIT_LIST_HEAD(&bo->list);
212         INIT_LIST_HEAD(&bo->va);
213         bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
214                                        RADEON_GEM_DOMAIN_GTT |
215                                        RADEON_GEM_DOMAIN_CPU);
216
217         bo->flags = flags;
218         /* PCI GART is always snooped */
219         if (!(rdev->flags & RADEON_IS_PCIE))
220                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
221
222         /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
223          * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
224          */
225         if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
226                 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
227
228 #ifdef CONFIG_X86_32
229         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
230          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
231          */
232         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
233 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
234         /* Don't try to enable write-combining when it can't work, or things
235          * may be slow
236          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
237          */
238 #ifndef CONFIG_COMPILE_TEST
239 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
240          thanks to write-combining
241 #endif
242
243         if (bo->flags & RADEON_GEM_GTT_WC)
244                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
245                               "better performance thanks to write-combining\n");
246         bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
247 #else
248         /* For architectures that don't support WC memory,
249          * mask out the WC flag from the BO
250          */
251         if (!drm_arch_can_wc_memory())
252                 bo->flags &= ~RADEON_GEM_GTT_WC;
253 #endif
254
255         radeon_ttm_placement_from_domain(bo, domain);
256         /* Kernel allocation are uninterruptible */
257         down_read(&rdev->pm.mclk_lock);
258         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
259                         &bo->placement, page_align, !kernel, NULL,
260                         acc_size, sg, resv, &radeon_ttm_bo_destroy);
261         up_read(&rdev->pm.mclk_lock);
262         if (unlikely(r != 0)) {
263                 return r;
264         }
265         *bo_ptr = bo;
266
267         trace_radeon_bo_create(bo);
268
269         return 0;
270 }
271
272 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
273 {
274         bool is_iomem;
275         int r;
276
277         if (bo->kptr) {
278                 if (ptr) {
279                         *ptr = bo->kptr;
280                 }
281                 return 0;
282         }
283         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
284         if (r) {
285                 return r;
286         }
287         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
288         if (ptr) {
289                 *ptr = bo->kptr;
290         }
291         radeon_bo_check_tiling(bo, 0, 0);
292         return 0;
293 }
294
295 void radeon_bo_kunmap(struct radeon_bo *bo)
296 {
297         if (bo->kptr == NULL)
298                 return;
299         bo->kptr = NULL;
300         radeon_bo_check_tiling(bo, 0, 0);
301         ttm_bo_kunmap(&bo->kmap);
302 }
303
304 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
305 {
306         if (bo == NULL)
307                 return NULL;
308
309         ttm_bo_reference(&bo->tbo);
310         return bo;
311 }
312
313 void radeon_bo_unref(struct radeon_bo **bo)
314 {
315         struct ttm_buffer_object *tbo;
316         struct radeon_device *rdev;
317
318         if ((*bo) == NULL)
319                 return;
320         rdev = (*bo)->rdev;
321         tbo = &((*bo)->tbo);
322         ttm_bo_unref(&tbo);
323         if (tbo == NULL)
324                 *bo = NULL;
325 }
326
327 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
328                              u64 *gpu_addr)
329 {
330         int r, i;
331
332         if (bo->pin_count) {
333                 bo->pin_count++;
334                 if (gpu_addr)
335                         *gpu_addr = radeon_bo_gpu_offset(bo);
336
337                 if (max_offset != 0) {
338                         u64 domain_start;
339
340                         if (domain == RADEON_GEM_DOMAIN_VRAM)
341                                 domain_start = bo->rdev->mc.vram_start;
342                         else
343                                 domain_start = bo->rdev->mc.gtt_start;
344                         WARN_ON_ONCE(max_offset <
345                                      (radeon_bo_gpu_offset(bo) - domain_start));
346                 }
347
348                 return 0;
349         }
350         radeon_ttm_placement_from_domain(bo, domain);
351         for (i = 0; i < bo->placement.num_placement; i++) {
352                 /* force to pin into visible video ram */
353                 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
354                     !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
355                     (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
356                         bo->placements[i].lpfn =
357                                 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
358                 else
359                         bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
360
361                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
362         }
363
364         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
365         if (likely(r == 0)) {
366                 bo->pin_count = 1;
367                 if (gpu_addr != NULL)
368                         *gpu_addr = radeon_bo_gpu_offset(bo);
369                 if (domain == RADEON_GEM_DOMAIN_VRAM)
370                         bo->rdev->vram_pin_size += radeon_bo_size(bo);
371                 else
372                         bo->rdev->gart_pin_size += radeon_bo_size(bo);
373         } else {
374                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
375         }
376         return r;
377 }
378
379 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
380 {
381         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
382 }
383
384 int radeon_bo_unpin(struct radeon_bo *bo)
385 {
386         int r, i;
387
388         if (!bo->pin_count) {
389                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
390                 return 0;
391         }
392         bo->pin_count--;
393         if (bo->pin_count)
394                 return 0;
395         for (i = 0; i < bo->placement.num_placement; i++) {
396                 bo->placements[i].lpfn = 0;
397                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
398         }
399         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
400         if (likely(r == 0)) {
401                 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
402                         bo->rdev->vram_pin_size -= radeon_bo_size(bo);
403                 else
404                         bo->rdev->gart_pin_size -= radeon_bo_size(bo);
405         } else {
406                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
407         }
408         return r;
409 }
410
411 int radeon_bo_evict_vram(struct radeon_device *rdev)
412 {
413         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
414         if (0 && (rdev->flags & RADEON_IS_IGP)) {
415                 if (rdev->mc.igp_sideport_enabled == false)
416                         /* Useless to evict on IGP chips */
417                         return 0;
418         }
419         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
420 }
421
422 void radeon_bo_force_delete(struct radeon_device *rdev)
423 {
424         struct radeon_bo *bo, *n;
425
426         if (list_empty(&rdev->gem.objects)) {
427                 return;
428         }
429         dev_err(rdev->dev, "Userspace still has active objects !\n");
430         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
431                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
432                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
433                         *((unsigned long *)&bo->gem_base.refcount));
434                 mutex_lock(&bo->rdev->gem.mutex);
435                 list_del_init(&bo->list);
436                 mutex_unlock(&bo->rdev->gem.mutex);
437                 /* this should unref the ttm bo */
438                 drm_gem_object_unreference_unlocked(&bo->gem_base);
439         }
440 }
441
442 int radeon_bo_init(struct radeon_device *rdev)
443 {
444         /* Add an MTRR for the VRAM */
445         if (!rdev->fastfb_working) {
446                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
447                                                       rdev->mc.aper_size);
448         }
449         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
450                 rdev->mc.mc_vram_size >> 20,
451                 (unsigned long long)rdev->mc.aper_size >> 20);
452         DRM_INFO("RAM width %dbits %cDR\n",
453                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
454         return radeon_ttm_init(rdev);
455 }
456
457 void radeon_bo_fini(struct radeon_device *rdev)
458 {
459         radeon_ttm_fini(rdev);
460         arch_phys_wc_del(rdev->mc.vram_mtrr);
461 }
462
463 /* Returns how many bytes TTM can move per IB.
464  */
465 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
466 {
467         u64 real_vram_size = rdev->mc.real_vram_size;
468         u64 vram_usage = atomic64_read(&rdev->vram_usage);
469
470         /* This function is based on the current VRAM usage.
471          *
472          * - If all of VRAM is free, allow relocating the number of bytes that
473          *   is equal to 1/4 of the size of VRAM for this IB.
474
475          * - If more than one half of VRAM is occupied, only allow relocating
476          *   1 MB of data for this IB.
477          *
478          * - From 0 to one half of used VRAM, the threshold decreases
479          *   linearly.
480          *         __________________
481          * 1/4 of -|\               |
482          * VRAM    | \              |
483          *         |  \             |
484          *         |   \            |
485          *         |    \           |
486          *         |     \          |
487          *         |      \         |
488          *         |       \________|1 MB
489          *         |----------------|
490          *    VRAM 0 %             100 %
491          *         used            used
492          *
493          * Note: It's a threshold, not a limit. The threshold must be crossed
494          * for buffer relocations to stop, so any buffer of an arbitrary size
495          * can be moved as long as the threshold isn't crossed before
496          * the relocation takes place. We don't want to disable buffer
497          * relocations completely.
498          *
499          * The idea is that buffers should be placed in VRAM at creation time
500          * and TTM should only do a minimum number of relocations during
501          * command submission. In practice, you need to submit at least
502          * a dozen IBs to move all buffers to VRAM if they are in GTT.
503          *
504          * Also, things can get pretty crazy under memory pressure and actual
505          * VRAM usage can change a lot, so playing safe even at 50% does
506          * consistently increase performance.
507          */
508
509         u64 half_vram = real_vram_size >> 1;
510         u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
511         u64 bytes_moved_threshold = half_free_vram >> 1;
512         return max(bytes_moved_threshold, 1024*1024ull);
513 }
514
515 int radeon_bo_list_validate(struct radeon_device *rdev,
516                             struct ww_acquire_ctx *ticket,
517                             struct list_head *head, int ring)
518 {
519         struct radeon_bo_list *lobj;
520         struct list_head duplicates;
521         int r;
522         u64 bytes_moved = 0, initial_bytes_moved;
523         u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
524
525         INIT_LIST_HEAD(&duplicates);
526         r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
527         if (unlikely(r != 0)) {
528                 return r;
529         }
530
531         list_for_each_entry(lobj, head, tv.head) {
532                 struct radeon_bo *bo = lobj->robj;
533                 if (!bo->pin_count) {
534                         u32 domain = lobj->prefered_domains;
535                         u32 allowed = lobj->allowed_domains;
536                         u32 current_domain =
537                                 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
538
539                         /* Check if this buffer will be moved and don't move it
540                          * if we have moved too many buffers for this IB already.
541                          *
542                          * Note that this allows moving at least one buffer of
543                          * any size, because it doesn't take the current "bo"
544                          * into account. We don't want to disallow buffer moves
545                          * completely.
546                          */
547                         if ((allowed & current_domain) != 0 &&
548                             (domain & current_domain) == 0 && /* will be moved */
549                             bytes_moved > bytes_moved_threshold) {
550                                 /* don't move it */
551                                 domain = current_domain;
552                         }
553
554                 retry:
555                         radeon_ttm_placement_from_domain(bo, domain);
556                         if (ring == R600_RING_TYPE_UVD_INDEX)
557                                 radeon_uvd_force_into_uvd_segment(bo, allowed);
558
559                         initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
560                         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
561                         bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
562                                        initial_bytes_moved;
563
564                         if (unlikely(r)) {
565                                 if (r != -ERESTARTSYS &&
566                                     domain != lobj->allowed_domains) {
567                                         domain = lobj->allowed_domains;
568                                         goto retry;
569                                 }
570                                 ttm_eu_backoff_reservation(ticket, head);
571                                 return r;
572                         }
573                 }
574                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
575                 lobj->tiling_flags = bo->tiling_flags;
576         }
577
578         list_for_each_entry(lobj, &duplicates, tv.head) {
579                 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
580                 lobj->tiling_flags = lobj->robj->tiling_flags;
581         }
582
583         return 0;
584 }
585
586 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
587 {
588         struct radeon_device *rdev = bo->rdev;
589         struct radeon_surface_reg *reg;
590         struct radeon_bo *old_object;
591         int steal;
592         int i;
593
594         lockdep_assert_held(&bo->tbo.resv->lock.base);
595
596         if (!bo->tiling_flags)
597                 return 0;
598
599         if (bo->surface_reg >= 0) {
600                 reg = &rdev->surface_regs[bo->surface_reg];
601                 i = bo->surface_reg;
602                 goto out;
603         }
604
605         steal = -1;
606         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
607
608                 reg = &rdev->surface_regs[i];
609                 if (!reg->bo)
610                         break;
611
612                 old_object = reg->bo;
613                 if (old_object->pin_count == 0)
614                         steal = i;
615         }
616
617         /* if we are all out */
618         if (i == RADEON_GEM_MAX_SURFACES) {
619                 if (steal == -1)
620                         return -ENOMEM;
621                 /* find someone with a surface reg and nuke their BO */
622                 reg = &rdev->surface_regs[steal];
623                 old_object = reg->bo;
624                 /* blow away the mapping */
625                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
626                 ttm_bo_unmap_virtual(&old_object->tbo);
627                 old_object->surface_reg = -1;
628                 i = steal;
629         }
630
631         bo->surface_reg = i;
632         reg->bo = bo;
633
634 out:
635         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
636                                bo->tbo.mem.start << PAGE_SHIFT,
637                                bo->tbo.num_pages << PAGE_SHIFT);
638         return 0;
639 }
640
641 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
642 {
643         struct radeon_device *rdev = bo->rdev;
644         struct radeon_surface_reg *reg;
645
646         if (bo->surface_reg == -1)
647                 return;
648
649         reg = &rdev->surface_regs[bo->surface_reg];
650         radeon_clear_surface_reg(rdev, bo->surface_reg);
651
652         reg->bo = NULL;
653         bo->surface_reg = -1;
654 }
655
656 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
657                                 uint32_t tiling_flags, uint32_t pitch)
658 {
659         struct radeon_device *rdev = bo->rdev;
660         int r;
661
662         if (rdev->family >= CHIP_CEDAR) {
663                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
664
665                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
666                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
667                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
668                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
669                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
670                 switch (bankw) {
671                 case 0:
672                 case 1:
673                 case 2:
674                 case 4:
675                 case 8:
676                         break;
677                 default:
678                         return -EINVAL;
679                 }
680                 switch (bankh) {
681                 case 0:
682                 case 1:
683                 case 2:
684                 case 4:
685                 case 8:
686                         break;
687                 default:
688                         return -EINVAL;
689                 }
690                 switch (mtaspect) {
691                 case 0:
692                 case 1:
693                 case 2:
694                 case 4:
695                 case 8:
696                         break;
697                 default:
698                         return -EINVAL;
699                 }
700                 if (tilesplit > 6) {
701                         return -EINVAL;
702                 }
703                 if (stilesplit > 6) {
704                         return -EINVAL;
705                 }
706         }
707         r = radeon_bo_reserve(bo, false);
708         if (unlikely(r != 0))
709                 return r;
710         bo->tiling_flags = tiling_flags;
711         bo->pitch = pitch;
712         radeon_bo_unreserve(bo);
713         return 0;
714 }
715
716 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
717                                 uint32_t *tiling_flags,
718                                 uint32_t *pitch)
719 {
720         lockdep_assert_held(&bo->tbo.resv->lock.base);
721
722         if (tiling_flags)
723                 *tiling_flags = bo->tiling_flags;
724         if (pitch)
725                 *pitch = bo->pitch;
726 }
727
728 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
729                                 bool force_drop)
730 {
731         if (!force_drop)
732                 lockdep_assert_held(&bo->tbo.resv->lock.base);
733
734         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
735                 return 0;
736
737         if (force_drop) {
738                 radeon_bo_clear_surface_reg(bo);
739                 return 0;
740         }
741
742         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
743                 if (!has_moved)
744                         return 0;
745
746                 if (bo->surface_reg >= 0)
747                         radeon_bo_clear_surface_reg(bo);
748                 return 0;
749         }
750
751         if ((bo->surface_reg >= 0) && !has_moved)
752                 return 0;
753
754         return radeon_bo_get_surface_reg(bo);
755 }
756
757 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
758                            struct ttm_mem_reg *new_mem)
759 {
760         struct radeon_bo *rbo;
761
762         if (!radeon_ttm_bo_is_radeon_bo(bo))
763                 return;
764
765         rbo = container_of(bo, struct radeon_bo, tbo);
766         radeon_bo_check_tiling(rbo, 0, 1);
767         radeon_vm_bo_invalidate(rbo->rdev, rbo);
768
769         /* update statistics */
770         if (!new_mem)
771                 return;
772
773         radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
774         radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
775 }
776
777 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
778 {
779         struct radeon_device *rdev;
780         struct radeon_bo *rbo;
781         unsigned long offset, size, lpfn;
782         int i, r;
783
784         if (!radeon_ttm_bo_is_radeon_bo(bo))
785                 return 0;
786         rbo = container_of(bo, struct radeon_bo, tbo);
787         radeon_bo_check_tiling(rbo, 0, 0);
788         rdev = rbo->rdev;
789         if (bo->mem.mem_type != TTM_PL_VRAM)
790                 return 0;
791
792         size = bo->mem.num_pages << PAGE_SHIFT;
793         offset = bo->mem.start << PAGE_SHIFT;
794         if ((offset + size) <= rdev->mc.visible_vram_size)
795                 return 0;
796
797         /* Can't move a pinned BO to visible VRAM */
798         if (rbo->pin_count > 0)
799                 return -EINVAL;
800
801         /* hurrah the memory is not visible ! */
802         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
803         lpfn =  rdev->mc.visible_vram_size >> PAGE_SHIFT;
804         for (i = 0; i < rbo->placement.num_placement; i++) {
805                 /* Force into visible VRAM */
806                 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
807                     (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
808                         rbo->placements[i].lpfn = lpfn;
809         }
810         r = ttm_bo_validate(bo, &rbo->placement, false, false);
811         if (unlikely(r == -ENOMEM)) {
812                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
813                 return ttm_bo_validate(bo, &rbo->placement, false, false);
814         } else if (unlikely(r != 0)) {
815                 return r;
816         }
817
818         offset = bo->mem.start << PAGE_SHIFT;
819         /* this should never happen */
820         if ((offset + size) > rdev->mc.visible_vram_size)
821                 return -EINVAL;
822
823         return 0;
824 }
825
826 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
827 {
828         int r;
829
830         r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
831         if (unlikely(r != 0))
832                 return r;
833         if (mem_type)
834                 *mem_type = bo->tbo.mem.mem_type;
835
836         r = ttm_bo_wait(&bo->tbo, true, no_wait);
837         ttm_bo_unreserve(&bo->tbo);
838         return r;
839 }
840
841 /**
842  * radeon_bo_fence - add fence to buffer object
843  *
844  * @bo: buffer object in question
845  * @fence: fence to add
846  * @shared: true if fence should be added shared
847  *
848  */
849 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
850                      bool shared)
851 {
852         struct reservation_object *resv = bo->tbo.resv;
853
854         if (shared)
855                 reservation_object_add_shared_fence(resv, &fence->base);
856         else
857                 reservation_object_add_excl_fence(resv, &fence->base);
858 }