1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <sys/sfbuf.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
41 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 ttm_bo_mem_put(bo, &bo->mem);
46 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
50 struct ttm_tt *ttm = bo->ttm;
51 struct ttm_mem_reg *old_mem = &bo->mem;
54 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ttm_bo_free_old_node(bo);
57 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
59 old_mem->mem_type = TTM_PL_SYSTEM;
62 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
63 if (unlikely(ret != 0))
66 if (new_mem->mem_type != TTM_PL_SYSTEM) {
67 ret = ttm_tt_bind(ttm, new_mem);
68 if (unlikely(ret != 0))
73 new_mem->mm_node = NULL;
77 EXPORT_SYMBOL(ttm_bo_move_ttm);
79 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
81 if (likely(man->io_reserve_fastpath))
85 if (lockmgr(&man->io_reserve_mutex,
86 LK_EXCLUSIVE | LK_SLEEPFAIL))
92 mutex_lock(&man->io_reserve_mutex);
95 EXPORT_SYMBOL(ttm_mem_io_lock);
97 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
99 if (likely(man->io_reserve_fastpath))
102 mutex_unlock(&man->io_reserve_mutex);
104 EXPORT_SYMBOL(ttm_mem_io_unlock);
106 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
108 struct ttm_buffer_object *bo;
110 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
113 bo = list_first_entry(&man->io_reserve_lru,
114 struct ttm_buffer_object,
116 list_del_init(&bo->io_reserve_lru);
117 ttm_bo_unmap_virtual_locked(bo);
123 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
124 struct ttm_mem_reg *mem)
126 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
129 if (!bdev->driver->io_mem_reserve)
131 if (likely(man->io_reserve_fastpath))
132 return bdev->driver->io_mem_reserve(bdev, mem);
134 if (bdev->driver->io_mem_reserve &&
135 mem->bus.io_reserved_count++ == 0) {
137 ret = bdev->driver->io_mem_reserve(bdev, mem);
138 if (ret == -EAGAIN) {
139 ret = ttm_mem_io_evict(man);
146 EXPORT_SYMBOL(ttm_mem_io_reserve);
148 void ttm_mem_io_free(struct ttm_bo_device *bdev,
149 struct ttm_mem_reg *mem)
151 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
153 if (likely(man->io_reserve_fastpath))
156 if (bdev->driver->io_mem_reserve &&
157 --mem->bus.io_reserved_count == 0 &&
158 bdev->driver->io_mem_free)
159 bdev->driver->io_mem_free(bdev, mem);
162 EXPORT_SYMBOL(ttm_mem_io_free);
164 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
166 struct ttm_mem_reg *mem = &bo->mem;
169 if (!mem->bus.io_reserved_vm) {
170 struct ttm_mem_type_manager *man =
171 &bo->bdev->man[mem->mem_type];
173 ret = ttm_mem_io_reserve(bo->bdev, mem);
174 if (unlikely(ret != 0))
176 mem->bus.io_reserved_vm = true;
177 if (man->use_io_reserve_lru)
178 list_add_tail(&bo->io_reserve_lru,
179 &man->io_reserve_lru);
184 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
186 struct ttm_mem_reg *mem = &bo->mem;
188 if (mem->bus.io_reserved_vm) {
189 mem->bus.io_reserved_vm = false;
190 list_del_init(&bo->io_reserve_lru);
191 ttm_mem_io_free(bo->bdev, mem);
195 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
198 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203 (void) ttm_mem_io_lock(man, false);
204 ret = ttm_mem_io_reserve(bdev, mem);
205 ttm_mem_io_unlock(man);
206 if (ret || !mem->bus.is_iomem)
210 addr = mem->bus.addr;
212 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
213 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
214 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
216 (void) ttm_mem_io_lock(man, false);
217 ttm_mem_io_free(bdev, mem);
218 ttm_mem_io_unlock(man);
226 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
229 struct ttm_mem_type_manager *man;
231 man = &bdev->man[mem->mem_type];
233 if (virtual && mem->bus.addr == NULL)
234 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
235 (void) ttm_mem_io_lock(man, false);
236 ttm_mem_io_free(bdev, mem);
237 ttm_mem_io_unlock(man);
240 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
243 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
245 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
248 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
249 /* iowrite32(ioread32(srcP++), dstP++); */
254 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
258 struct page *d = ttm->pages[page];
264 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
267 dst = kmap_atomic_prot(d, prot);
269 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
270 dst = vmap(&d, 1, 0, prot);
277 memcpy_fromio(dst, src, PAGE_SIZE);
282 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
291 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
295 struct page *s = ttm->pages[page];
301 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
303 src = kmap_atomic_prot(s, prot);
305 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
306 src = vmap(&s, 1, 0, prot);
313 memcpy_toio(dst, src, PAGE_SIZE);
318 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
327 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
328 bool evict, bool no_wait_gpu,
329 struct ttm_mem_reg *new_mem)
331 struct ttm_bo_device *bdev = bo->bdev;
332 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
333 struct ttm_tt *ttm = bo->ttm;
334 struct ttm_mem_reg *old_mem = &bo->mem;
335 struct ttm_mem_reg old_copy = *old_mem;
341 unsigned long add = 0;
344 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
347 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
352 * Single TTM move. NOP.
354 if (old_iomap == NULL && new_iomap == NULL)
358 * Don't move nonexistent data. Clear destination instead.
360 if (old_iomap == NULL &&
361 (ttm == NULL || (ttm->state == tt_unpopulated &&
362 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
363 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
368 * TTM might be null for moves within the same region.
370 if (ttm && ttm->state == tt_unpopulated) {
371 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
379 if ((old_mem->mem_type == new_mem->mem_type) &&
380 (new_mem->start < old_mem->start + old_mem->size)) {
382 add = new_mem->num_pages - 1;
385 for (i = 0; i < new_mem->num_pages; ++i) {
386 page = i * dir + add;
387 if (old_iomap == NULL) {
388 pgprot_t prot = ttm_io_prot(old_mem->placement,
390 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
392 } else if (new_iomap == NULL) {
393 pgprot_t prot = ttm_io_prot(new_mem->placement,
395 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
398 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
400 /* failing here, means keep old copy as-is */
401 old_copy.mm_node = NULL;
409 new_mem->mm_node = NULL;
411 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
418 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
420 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
423 * On error, keep the mm node!
426 ttm_bo_mem_put(bo, &old_copy);
429 EXPORT_SYMBOL(ttm_bo_move_memcpy);
431 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
437 * ttm_buffer_object_transfer
439 * @bo: A pointer to a struct ttm_buffer_object.
440 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
441 * holding the data of @bo with the old placement.
443 * This is a utility function that may be called after an accelerated move
444 * has been scheduled. A new buffer object is created as a placeholder for
445 * the old data while it's being copied. When that buffer object is idle,
446 * it can be destroyed, releasing the space of the old placement.
451 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
452 struct ttm_buffer_object **new_obj)
454 struct ttm_buffer_object *fbo;
455 struct ttm_bo_device *bdev = bo->bdev;
456 struct ttm_bo_driver *driver = bdev->driver;
458 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO);
465 * Fix up members that we shouldn't copy directly:
466 * TODO: Explicit member copy would probably be better here.
469 init_waitqueue_head(&fbo->event_queue);
470 INIT_LIST_HEAD(&fbo->ddestroy);
471 INIT_LIST_HEAD(&fbo->lru);
472 INIT_LIST_HEAD(&fbo->swap);
473 INIT_LIST_HEAD(&fbo->io_reserve_lru);
475 atomic_set(&fbo->cpu_writers, 0);
477 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
479 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
481 fbo->sync_obj = NULL;
482 lockmgr(&bdev->fence_lock, LK_RELEASE);
483 kref_init(&fbo->list_kref);
484 kref_init(&fbo->kref);
485 fbo->destroy = &ttm_transfered_destroy;
489 * Mirror ref from kref_init() for list_kref.
491 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
497 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
499 #if defined(__i386__) || defined(__x86_64__)
500 if (caching_flags & TTM_PL_FLAG_WC)
501 tmp = pgprot_writecombine(tmp);
503 tmp = pgprot_noncached(tmp);
505 #elif defined(__powerpc__)
506 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
507 pgprot_val(tmp) |= _PAGE_NO_CACHE;
508 if (caching_flags & TTM_PL_FLAG_UNCACHED)
509 pgprot_val(tmp) |= _PAGE_GUARDED;
512 #if defined(__ia64__)
513 if (caching_flags & TTM_PL_FLAG_WC)
514 tmp = pgprot_writecombine(tmp);
516 tmp = pgprot_noncached(tmp);
518 #if defined(__sparc__) || defined(__mips__)
519 if (!(caching_flags & TTM_PL_FLAG_CACHED))
520 tmp = pgprot_noncached(tmp);
524 EXPORT_SYMBOL(ttm_io_prot);
526 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
527 unsigned long offset,
529 struct ttm_bo_kmap_obj *map)
531 struct ttm_mem_reg *mem = &bo->mem;
533 if (bo->mem.bus.addr) {
534 map->bo_kmap_type = ttm_bo_map_premapped;
535 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
537 map->bo_kmap_type = ttm_bo_map_iomap;
538 map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
539 bo->mem.bus.offset + offset, size,
540 (mem->placement & TTM_PL_FLAG_WC) ?
541 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
544 return (!map->virtual) ? -ENOMEM : 0;
547 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
548 unsigned long start_page,
549 unsigned long num_pages,
550 struct ttm_bo_kmap_obj *map)
552 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
553 struct ttm_tt *ttm = bo->ttm;
558 if (ttm->state == tt_unpopulated) {
559 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
564 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
566 * We're mapping a single page, and the desired
567 * page protection is consistent with the bo.
570 map->bo_kmap_type = ttm_bo_map_kmap;
571 map->page = ttm->pages[start_page];
572 map->sf = sf_buf_alloc((struct vm_page *)map->page);
573 map->virtual = (void *)sf_buf_kva(map->sf);
576 * We need to use vmap to get the desired page protection
577 * or to make the buffer object look contiguous.
579 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
581 ttm_io_prot(mem->placement, PAGE_KERNEL);
582 map->bo_kmap_type = ttm_bo_map_vmap;
583 map->num_pages = num_pages;
585 (void *)kmem_alloc_nofault(&kernel_map,
586 num_pages * PAGE_SIZE,
589 if (map->virtual != NULL) {
590 for (i = 0; i < num_pages; i++) {
592 pmap_page_set_memattr((struct vm_page *)ttm->pages[start_page +
595 pmap_qenter((vm_offset_t)map->virtual,
596 (struct vm_page **)&ttm->pages[start_page], num_pages);
599 return (!map->virtual) ? -ENOMEM : 0;
602 int ttm_bo_kmap(struct ttm_buffer_object *bo,
603 unsigned long start_page, unsigned long num_pages,
604 struct ttm_bo_kmap_obj *map)
606 struct ttm_mem_type_manager *man =
607 &bo->bdev->man[bo->mem.mem_type];
608 unsigned long offset, size;
611 BUG_ON(!list_empty(&bo->swap));
614 if (num_pages > bo->num_pages)
616 if (start_page > bo->num_pages)
619 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
622 (void) ttm_mem_io_lock(man, false);
623 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
624 ttm_mem_io_unlock(man);
627 if (!bo->mem.bus.is_iomem) {
628 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
630 offset = start_page << PAGE_SHIFT;
631 size = num_pages << PAGE_SHIFT;
632 return ttm_bo_ioremap(bo, offset, size, map);
635 EXPORT_SYMBOL(ttm_bo_kmap);
637 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
639 struct ttm_buffer_object *bo = map->bo;
640 struct ttm_mem_type_manager *man =
641 &bo->bdev->man[bo->mem.mem_type];
645 switch (map->bo_kmap_type) {
646 case ttm_bo_map_iomap:
647 pmap_unmapdev((vm_offset_t)map->virtual, map->size);
649 case ttm_bo_map_vmap:
650 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
651 kmem_free(&kernel_map, (vm_offset_t)map->virtual,
652 map->num_pages * PAGE_SIZE);
654 case ttm_bo_map_kmap:
655 sf_buf_free(map->sf);
657 case ttm_bo_map_premapped:
662 (void) ttm_mem_io_lock(man, false);
663 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
664 ttm_mem_io_unlock(man);
669 EXPORT_SYMBOL(ttm_bo_kunmap);
671 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
675 struct ttm_mem_reg *new_mem)
677 struct ttm_bo_device *bdev = bo->bdev;
678 struct ttm_bo_driver *driver = bdev->driver;
679 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
680 struct ttm_mem_reg *old_mem = &bo->mem;
682 struct ttm_buffer_object *ghost_obj;
683 void *tmp_obj = NULL;
685 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
687 tmp_obj = bo->sync_obj;
690 bo->sync_obj = driver->sync_obj_ref(sync_obj);
692 ret = ttm_bo_wait(bo, false, false, false);
693 lockmgr(&bdev->fence_lock, LK_RELEASE);
695 driver->sync_obj_unref(&tmp_obj);
699 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
701 ttm_tt_unbind(bo->ttm);
702 ttm_tt_destroy(bo->ttm);
705 ttm_bo_free_old_node(bo);
708 * This should help pipeline ordinary buffer moves.
710 * Hang old buffer memory on a new buffer object,
711 * and leave it to be released when the GPU
712 * operation has completed.
715 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
716 lockmgr(&bdev->fence_lock, LK_RELEASE);
718 driver->sync_obj_unref(&tmp_obj);
720 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
725 * If we're not moving to fixed memory, the TTM object
726 * needs to stay alive. Otherwhise hang it on the ghost
727 * bo to be unbound and destroyed.
730 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
731 ghost_obj->ttm = NULL;
735 ttm_bo_unreserve(ghost_obj);
736 ttm_bo_unref(&ghost_obj);
740 new_mem->mm_node = NULL;
744 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);