drm/ttm: convert to unified vma offset manager
[dragonfly.git] / sys / dev / drm / ttm / ttm_bo_util.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40
41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
42 {
43         ttm_bo_mem_put(bo, &bo->mem);
44 }
45
46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
47                     bool evict,
48                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
49 {
50         struct ttm_tt *ttm = bo->ttm;
51         struct ttm_mem_reg *old_mem = &bo->mem;
52         int ret;
53
54         if (old_mem->mem_type != TTM_PL_SYSTEM) {
55                 ttm_tt_unbind(ttm);
56                 ttm_bo_free_old_node(bo);
57                 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
58                                 TTM_PL_MASK_MEM);
59                 old_mem->mem_type = TTM_PL_SYSTEM;
60         }
61
62         ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63         if (unlikely(ret != 0))
64                 return ret;
65
66         if (new_mem->mem_type != TTM_PL_SYSTEM) {
67                 ret = ttm_tt_bind(ttm, new_mem);
68                 if (unlikely(ret != 0))
69                         return ret;
70         }
71
72         *old_mem = *new_mem;
73         new_mem->mm_node = NULL;
74
75         return 0;
76 }
77 EXPORT_SYMBOL(ttm_bo_move_ttm);
78
79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
80 {
81         if (likely(man->io_reserve_fastpath))
82                 return 0;
83
84         if (interruptible) {
85                 if (lockmgr(&man->io_reserve_mutex,
86                             LK_EXCLUSIVE | LK_SLEEPFAIL))
87                         return (-EINTR);
88                 else
89                         return (0);
90         }
91
92         lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
93         return 0;
94 }
95 EXPORT_SYMBOL(ttm_mem_io_lock);
96
97 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
98 {
99         if (likely(man->io_reserve_fastpath))
100                 return;
101
102         lockmgr(&man->io_reserve_mutex, LK_RELEASE);
103 }
104 EXPORT_SYMBOL(ttm_mem_io_unlock);
105
106 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
107 {
108         struct ttm_buffer_object *bo;
109
110         if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
111                 return -EAGAIN;
112
113         bo = list_first_entry(&man->io_reserve_lru,
114                               struct ttm_buffer_object,
115                               io_reserve_lru);
116         list_del_init(&bo->io_reserve_lru);
117         ttm_bo_unmap_virtual_locked(bo);
118
119         return 0;
120 }
121
122
123 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
124                        struct ttm_mem_reg *mem)
125 {
126         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
127         int ret = 0;
128
129         if (!bdev->driver->io_mem_reserve)
130                 return 0;
131         if (likely(man->io_reserve_fastpath))
132                 return bdev->driver->io_mem_reserve(bdev, mem);
133
134         if (bdev->driver->io_mem_reserve &&
135             mem->bus.io_reserved_count++ == 0) {
136 retry:
137                 ret = bdev->driver->io_mem_reserve(bdev, mem);
138                 if (ret == -EAGAIN) {
139                         ret = ttm_mem_io_evict(man);
140                         if (ret == 0)
141                                 goto retry;
142                 }
143         }
144         return ret;
145 }
146 EXPORT_SYMBOL(ttm_mem_io_reserve);
147
148 void ttm_mem_io_free(struct ttm_bo_device *bdev,
149                      struct ttm_mem_reg *mem)
150 {
151         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
152
153         if (likely(man->io_reserve_fastpath))
154                 return;
155
156         if (bdev->driver->io_mem_reserve &&
157             --mem->bus.io_reserved_count == 0 &&
158             bdev->driver->io_mem_free)
159                 bdev->driver->io_mem_free(bdev, mem);
160
161 }
162 EXPORT_SYMBOL(ttm_mem_io_free);
163
164 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
165 {
166         struct ttm_mem_reg *mem = &bo->mem;
167         int ret;
168
169         if (!mem->bus.io_reserved_vm) {
170                 struct ttm_mem_type_manager *man =
171                         &bo->bdev->man[mem->mem_type];
172
173                 ret = ttm_mem_io_reserve(bo->bdev, mem);
174                 if (unlikely(ret != 0))
175                         return ret;
176                 mem->bus.io_reserved_vm = true;
177                 if (man->use_io_reserve_lru)
178                         list_add_tail(&bo->io_reserve_lru,
179                                       &man->io_reserve_lru);
180         }
181         return 0;
182 }
183
184 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
185 {
186         struct ttm_mem_reg *mem = &bo->mem;
187
188         if (mem->bus.io_reserved_vm) {
189                 mem->bus.io_reserved_vm = false;
190                 list_del_init(&bo->io_reserve_lru);
191                 ttm_mem_io_free(bo->bdev, mem);
192         }
193 }
194
195 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
196                         void **virtual)
197 {
198         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
199         int ret;
200         void *addr;
201
202         *virtual = NULL;
203         (void) ttm_mem_io_lock(man, false);
204         ret = ttm_mem_io_reserve(bdev, mem);
205         ttm_mem_io_unlock(man);
206         if (ret || !mem->bus.is_iomem)
207                 return ret;
208
209         if (mem->bus.addr) {
210                 addr = mem->bus.addr;
211         } else {
212                 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
213                     mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
214                     VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
215                 if (!addr) {
216                         (void) ttm_mem_io_lock(man, false);
217                         ttm_mem_io_free(bdev, mem);
218                         ttm_mem_io_unlock(man);
219                         return -ENOMEM;
220                 }
221         }
222         *virtual = addr;
223         return 0;
224 }
225
226 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
227                          void *virtual)
228 {
229         struct ttm_mem_type_manager *man;
230
231         man = &bdev->man[mem->mem_type];
232
233         if (virtual && mem->bus.addr == NULL)
234                 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
235         (void) ttm_mem_io_lock(man, false);
236         ttm_mem_io_free(bdev, mem);
237         ttm_mem_io_unlock(man);
238 }
239
240 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
241 {
242         uint32_t *dstP =
243             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
244         uint32_t *srcP =
245             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
246
247         int i;
248         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
249                 /* iowrite32(ioread32(srcP++), dstP++); */
250                 *dstP++ = *srcP++;
251         return 0;
252 }
253
254 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
255                                 unsigned long page,
256                                 pgprot_t prot)
257 {
258         struct page *d = ttm->pages[page];
259         void *dst;
260
261         if (!d)
262                 return -ENOMEM;
263
264         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
265
266 #ifdef CONFIG_X86
267         dst = kmap_atomic_prot(d, prot);
268 #else
269         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
270                 dst = vmap(&d, 1, 0, prot);
271         else
272                 dst = kmap(d);
273 #endif
274         if (!dst)
275                 return -ENOMEM;
276
277         memcpy_fromio(dst, src, PAGE_SIZE);
278
279 #ifdef CONFIG_X86
280         kunmap_atomic(dst);
281 #else
282         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
283                 vunmap(dst);
284         else
285                 kunmap(d);
286 #endif
287
288         return 0;
289 }
290
291 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
292                                 unsigned long page,
293                                 pgprot_t prot)
294 {
295         struct page *s = ttm->pages[page];
296         void *src;
297
298         if (!s)
299                 return -ENOMEM;
300
301         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
302 #ifdef CONFIG_X86
303         src = kmap_atomic_prot(s, prot);
304 #else
305         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
306                 src = vmap(&s, 1, 0, prot);
307         else
308                 src = kmap(s);
309 #endif
310         if (!src)
311                 return -ENOMEM;
312
313         memcpy_toio(dst, src, PAGE_SIZE);
314
315 #ifdef CONFIG_X86
316         kunmap_atomic(src);
317 #else
318         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
319                 vunmap(src);
320         else
321                 kunmap(s);
322 #endif
323
324         return 0;
325 }
326
327 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
328                        bool evict, bool no_wait_gpu,
329                        struct ttm_mem_reg *new_mem)
330 {
331         struct ttm_bo_device *bdev = bo->bdev;
332         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
333         struct ttm_tt *ttm = bo->ttm;
334         struct ttm_mem_reg *old_mem = &bo->mem;
335         struct ttm_mem_reg old_copy = *old_mem;
336         void *old_iomap;
337         void *new_iomap;
338         int ret;
339         unsigned long i;
340         unsigned long page;
341         unsigned long add = 0;
342         int dir;
343
344         ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
345         if (ret)
346                 return ret;
347         ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
348         if (ret)
349                 goto out;
350
351         /*
352          * Single TTM move. NOP.
353          */
354         if (old_iomap == NULL && new_iomap == NULL)
355                 goto out2;
356
357         /*
358          * Don't move nonexistent data. Clear destination instead.
359          */
360         if (old_iomap == NULL &&
361             (ttm == NULL || (ttm->state == tt_unpopulated &&
362                              !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
363                 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
364                 goto out2;
365         }
366
367         /*
368          * TTM might be null for moves within the same region.
369          */
370         if (ttm && ttm->state == tt_unpopulated) {
371                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
372                 if (ret)
373                         goto out1;
374         }
375
376         add = 0;
377         dir = 1;
378
379         if ((old_mem->mem_type == new_mem->mem_type) &&
380             (new_mem->start < old_mem->start + old_mem->size)) {
381                 dir = -1;
382                 add = new_mem->num_pages - 1;
383         }
384
385         for (i = 0; i < new_mem->num_pages; ++i) {
386                 page = i * dir + add;
387                 if (old_iomap == NULL) {
388                         pgprot_t prot = ttm_io_prot(old_mem->placement,
389                                                     PAGE_KERNEL);
390                         ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
391                                                    prot);
392                 } else if (new_iomap == NULL) {
393                         pgprot_t prot = ttm_io_prot(new_mem->placement,
394                                                     PAGE_KERNEL);
395                         ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
396                                                    prot);
397                 } else
398                         ret = ttm_copy_io_page(new_iomap, old_iomap, page);
399                 if (ret) {
400                         /* failing here, means keep old copy as-is */
401                         old_copy.mm_node = NULL;
402                         goto out1;
403                 }
404         }
405         cpu_mfence();
406 out2:
407         old_copy = *old_mem;
408         *old_mem = *new_mem;
409         new_mem->mm_node = NULL;
410
411         if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
412                 ttm_tt_unbind(ttm);
413                 ttm_tt_destroy(ttm);
414                 bo->ttm = NULL;
415         }
416
417 out1:
418         ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
419 out:
420         ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
421
422         /*
423          * On error, keep the mm node!
424          */
425         if (!ret)
426                 ttm_bo_mem_put(bo, &old_copy);
427         return ret;
428 }
429 EXPORT_SYMBOL(ttm_bo_move_memcpy);
430
431 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
432 {
433         kfree(bo);
434 }
435
436 /**
437  * ttm_buffer_object_transfer
438  *
439  * @bo: A pointer to a struct ttm_buffer_object.
440  * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
441  * holding the data of @bo with the old placement.
442  *
443  * This is a utility function that may be called after an accelerated move
444  * has been scheduled. A new buffer object is created as a placeholder for
445  * the old data while it's being copied. When that buffer object is idle,
446  * it can be destroyed, releasing the space of the old placement.
447  * Returns:
448  * !0: Failure.
449  */
450
451 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
452                                       struct ttm_buffer_object **new_obj)
453 {
454         struct ttm_buffer_object *fbo;
455         struct ttm_bo_device *bdev = bo->bdev;
456         struct ttm_bo_driver *driver = bdev->driver;
457         int ret;
458
459         fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK);
460         if (!fbo)
461                 return -ENOMEM;
462
463         *fbo = *bo;
464
465         /**
466          * Fix up members that we shouldn't copy directly:
467          * TODO: Explicit member copy would probably be better here.
468          */
469
470         INIT_LIST_HEAD(&fbo->ddestroy);
471         INIT_LIST_HEAD(&fbo->lru);
472         INIT_LIST_HEAD(&fbo->swap);
473         INIT_LIST_HEAD(&fbo->io_reserve_lru);
474         drm_vma_node_reset(&fbo->vma_node);
475         atomic_set(&fbo->cpu_writers, 0);
476
477         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
478         if (bo->sync_obj)
479                 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
480         else
481                 fbo->sync_obj = NULL;
482         lockmgr(&bdev->fence_lock, LK_RELEASE);
483         kref_init(&fbo->list_kref);
484         kref_init(&fbo->kref);
485         fbo->destroy = &ttm_transfered_destroy;
486         fbo->acc_size = 0;
487         fbo->resv = &fbo->ttm_resv;
488         reservation_object_init(fbo->resv);
489         ret = ww_mutex_trylock(&fbo->resv->lock);
490         WARN_ON(!ret);
491
492         /*
493          * Mirror ref from kref_init() for list_kref.
494          */
495         set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
496
497         *new_obj = fbo;
498         return 0;
499 }
500
501 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
502 {
503 #if defined(__i386__) || defined(__x86_64__)
504         if (caching_flags & TTM_PL_FLAG_WC)
505                 tmp = pgprot_writecombine(tmp);
506         else
507                 tmp = pgprot_noncached(tmp);
508
509 #elif defined(__powerpc__)
510         if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
511                 pgprot_val(tmp) |= _PAGE_NO_CACHE;
512                 if (caching_flags & TTM_PL_FLAG_UNCACHED)
513                         pgprot_val(tmp) |= _PAGE_GUARDED;
514         }
515 #endif
516 #if defined(__ia64__)
517         if (caching_flags & TTM_PL_FLAG_WC)
518                 tmp = pgprot_writecombine(tmp);
519         else
520                 tmp = pgprot_noncached(tmp);
521 #endif
522 #if defined(__sparc__) || defined(__mips__)
523         if (!(caching_flags & TTM_PL_FLAG_CACHED))
524                 tmp = pgprot_noncached(tmp);
525 #endif
526         return tmp;
527 }
528 EXPORT_SYMBOL(ttm_io_prot);
529
530 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
531                           unsigned long offset,
532                           unsigned long size,
533                           struct ttm_bo_kmap_obj *map)
534 {
535         struct ttm_mem_reg *mem = &bo->mem;
536
537         if (bo->mem.bus.addr) {
538                 map->bo_kmap_type = ttm_bo_map_premapped;
539                 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
540         } else {
541                 map->bo_kmap_type = ttm_bo_map_iomap;
542                 if (mem->placement & TTM_PL_FLAG_WC)
543                         map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
544                                                   size);
545                 else
546                         map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
547                                                        size);
548         }
549         return (!map->virtual) ? -ENOMEM : 0;
550 }
551
552 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
553                            unsigned long start_page,
554                            unsigned long num_pages,
555                            struct ttm_bo_kmap_obj *map)
556 {
557         struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
558         struct ttm_tt *ttm = bo->ttm;
559         int ret;
560
561         BUG_ON(!ttm);
562
563         if (ttm->state == tt_unpopulated) {
564                 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
565                 if (ret)
566                         return ret;
567         }
568
569         if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
570                 /*
571                  * We're mapping a single page, and the desired
572                  * page protection is consistent with the bo.
573                  */
574
575                 map->bo_kmap_type = ttm_bo_map_kmap;
576                 map->page = ttm->pages[start_page];
577                 map->virtual = kmap(map->page);
578         } else {
579                 /*
580                  * We need to use vmap to get the desired page protection
581                  * or to make the buffer object look contiguous.
582                  */
583                 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
584                         PAGE_KERNEL :
585                         ttm_io_prot(mem->placement, PAGE_KERNEL);
586                 map->bo_kmap_type = ttm_bo_map_vmap;
587                 map->virtual = vmap(ttm->pages + start_page, num_pages,
588                                     0, prot);
589         }
590         return (!map->virtual) ? -ENOMEM : 0;
591 }
592
593 int ttm_bo_kmap(struct ttm_buffer_object *bo,
594                 unsigned long start_page, unsigned long num_pages,
595                 struct ttm_bo_kmap_obj *map)
596 {
597         struct ttm_mem_type_manager *man =
598                 &bo->bdev->man[bo->mem.mem_type];
599         unsigned long offset, size;
600         int ret;
601
602         BUG_ON(!list_empty(&bo->swap));
603         map->virtual = NULL;
604         map->bo = bo;
605         if (num_pages > bo->num_pages)
606                 return -EINVAL;
607         if (start_page > bo->num_pages)
608                 return -EINVAL;
609 #if 0
610         if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
611                 return -EPERM;
612 #endif
613         (void) ttm_mem_io_lock(man, false);
614         ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
615         ttm_mem_io_unlock(man);
616         if (ret)
617                 return ret;
618         if (!bo->mem.bus.is_iomem) {
619                 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
620         } else {
621                 offset = start_page << PAGE_SHIFT;
622                 size = num_pages << PAGE_SHIFT;
623                 return ttm_bo_ioremap(bo, offset, size, map);
624         }
625 }
626 EXPORT_SYMBOL(ttm_bo_kmap);
627
628 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
629 {
630         struct ttm_buffer_object *bo = map->bo;
631         struct ttm_mem_type_manager *man =
632                 &bo->bdev->man[bo->mem.mem_type];
633
634         if (!map->virtual)
635                 return;
636         switch (map->bo_kmap_type) {
637         case ttm_bo_map_iomap:
638                 iounmap(map->virtual);
639                 break;
640         case ttm_bo_map_vmap:
641                 vunmap(map->virtual);
642                 break;
643         case ttm_bo_map_kmap:
644                 kunmap(map->page);
645                 break;
646         case ttm_bo_map_premapped:
647                 break;
648         default:
649                 BUG();
650         }
651         (void) ttm_mem_io_lock(man, false);
652         ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
653         ttm_mem_io_unlock(man);
654         map->virtual = NULL;
655         map->page = NULL;
656 }
657 EXPORT_SYMBOL(ttm_bo_kunmap);
658
659 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
660                               void *sync_obj,
661                               bool evict,
662                               bool no_wait_gpu,
663                               struct ttm_mem_reg *new_mem)
664 {
665         struct ttm_bo_device *bdev = bo->bdev;
666         struct ttm_bo_driver *driver = bdev->driver;
667         struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
668         struct ttm_mem_reg *old_mem = &bo->mem;
669         int ret;
670         struct ttm_buffer_object *ghost_obj;
671         void *tmp_obj = NULL;
672
673         lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
674         if (bo->sync_obj) {
675                 tmp_obj = bo->sync_obj;
676                 bo->sync_obj = NULL;
677         }
678         bo->sync_obj = driver->sync_obj_ref(sync_obj);
679         if (evict) {
680                 ret = ttm_bo_wait(bo, false, false, false);
681                 lockmgr(&bdev->fence_lock, LK_RELEASE);
682                 if (tmp_obj)
683                         driver->sync_obj_unref(&tmp_obj);
684                 if (ret)
685                         return ret;
686
687                 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
688                     (bo->ttm != NULL)) {
689                         ttm_tt_unbind(bo->ttm);
690                         ttm_tt_destroy(bo->ttm);
691                         bo->ttm = NULL;
692                 }
693                 ttm_bo_free_old_node(bo);
694         } else {
695                 /**
696                  * This should help pipeline ordinary buffer moves.
697                  *
698                  * Hang old buffer memory on a new buffer object,
699                  * and leave it to be released when the GPU
700                  * operation has completed.
701                  */
702
703                 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
704                 lockmgr(&bdev->fence_lock, LK_RELEASE);
705                 if (tmp_obj)
706                         driver->sync_obj_unref(&tmp_obj);
707
708                 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
709                 if (ret)
710                         return ret;
711
712                 /**
713                  * If we're not moving to fixed memory, the TTM object
714                  * needs to stay alive. Otherwhise hang it on the ghost
715                  * bo to be unbound and destroyed.
716                  */
717
718                 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
719                         ghost_obj->ttm = NULL;
720                 else
721                         bo->ttm = NULL;
722
723                 ttm_bo_unreserve(ghost_obj);
724                 ttm_bo_unref(&ghost_obj);
725         }
726
727         *old_mem = *new_mem;
728         new_mem->mm_node = NULL;
729
730         return 0;
731 }
732 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);