1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <sys/sfbuf.h>
34 #include <linux/export.h>
35 #include <linux/wait.h>
37 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
39 ttm_bo_mem_put(bo, &bo->mem);
42 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
44 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
46 struct ttm_tt *ttm = bo->ttm;
47 struct ttm_mem_reg *old_mem = &bo->mem;
50 if (old_mem->mem_type != TTM_PL_SYSTEM) {
52 ttm_bo_free_old_node(bo);
53 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
55 old_mem->mem_type = TTM_PL_SYSTEM;
58 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
59 if (unlikely(ret != 0))
62 if (new_mem->mem_type != TTM_PL_SYSTEM) {
63 ret = ttm_tt_bind(ttm, new_mem);
64 if (unlikely(ret != 0))
69 new_mem->mm_node = NULL;
73 EXPORT_SYMBOL(ttm_bo_move_ttm);
75 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
77 if (likely(man->io_reserve_fastpath))
81 if (lockmgr(&man->io_reserve_mutex,
82 LK_EXCLUSIVE | LK_SLEEPFAIL))
88 lockmgr(&man->io_reserve_mutex, LK_EXCLUSIVE);
92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
94 if (likely(man->io_reserve_fastpath))
97 lockmgr(&man->io_reserve_mutex, LK_RELEASE);
100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
102 struct ttm_buffer_object *bo;
104 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107 bo = list_first_entry(&man->io_reserve_lru,
108 struct ttm_buffer_object,
110 list_del_init(&bo->io_reserve_lru);
111 ttm_bo_unmap_virtual_locked(bo);
116 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
117 struct ttm_mem_reg *mem)
119 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
122 if (!bdev->driver->io_mem_reserve)
124 if (likely(man->io_reserve_fastpath))
125 return bdev->driver->io_mem_reserve(bdev, mem);
127 if (bdev->driver->io_mem_reserve &&
128 mem->bus.io_reserved_count++ == 0) {
130 ret = bdev->driver->io_mem_reserve(bdev, mem);
131 if (ret == -EAGAIN) {
132 ret = ttm_mem_io_evict(man);
140 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
141 struct ttm_mem_reg *mem)
143 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
145 if (likely(man->io_reserve_fastpath))
148 if (bdev->driver->io_mem_reserve &&
149 --mem->bus.io_reserved_count == 0 &&
150 bdev->driver->io_mem_free)
151 bdev->driver->io_mem_free(bdev, mem);
155 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
157 struct ttm_mem_reg *mem = &bo->mem;
160 if (!mem->bus.io_reserved_vm) {
161 struct ttm_mem_type_manager *man =
162 &bo->bdev->man[mem->mem_type];
164 ret = ttm_mem_io_reserve(bo->bdev, mem);
165 if (unlikely(ret != 0))
167 mem->bus.io_reserved_vm = true;
168 if (man->use_io_reserve_lru)
169 list_add_tail(&bo->io_reserve_lru,
170 &man->io_reserve_lru);
175 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
177 struct ttm_mem_reg *mem = &bo->mem;
179 if (mem->bus.io_reserved_vm) {
180 mem->bus.io_reserved_vm = false;
181 list_del_init(&bo->io_reserve_lru);
182 ttm_mem_io_free(bo->bdev, mem);
187 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
190 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195 (void) ttm_mem_io_lock(man, false);
196 ret = ttm_mem_io_reserve(bdev, mem);
197 ttm_mem_io_unlock(man);
198 if (ret || !mem->bus.is_iomem)
202 addr = mem->bus.addr;
204 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset,
205 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ?
206 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
208 (void) ttm_mem_io_lock(man, false);
209 ttm_mem_io_free(bdev, mem);
210 ttm_mem_io_unlock(man);
219 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
222 struct ttm_mem_type_manager *man;
224 man = &bdev->man[mem->mem_type];
226 if (virtual && mem->bus.addr == NULL)
227 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size);
228 (void) ttm_mem_io_lock(man, false);
229 ttm_mem_io_free(bdev, mem);
230 ttm_mem_io_unlock(man);
233 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
236 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
238 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
241 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
242 /* iowrite32(ioread32(srcP++), dstP++); */
247 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251 vm_page_t d = ttm->pages[page];
257 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
259 /* XXXKIB can't sleep ? */
260 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot);
264 memcpy_fromio(dst, src, PAGE_SIZE);
266 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE);
271 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
275 vm_page_t s = ttm->pages[page];
281 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
282 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot);
286 memcpy_toio(dst, src, PAGE_SIZE);
288 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE);
293 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
294 bool evict, bool no_wait_gpu,
295 struct ttm_mem_reg *new_mem)
297 struct ttm_bo_device *bdev = bo->bdev;
298 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
299 struct ttm_tt *ttm = bo->ttm;
300 struct ttm_mem_reg *old_mem = &bo->mem;
301 struct ttm_mem_reg old_copy = *old_mem;
307 unsigned long add = 0;
310 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
313 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
317 if (old_iomap == NULL && new_iomap == NULL)
319 if (old_iomap == NULL && ttm == NULL)
322 if (ttm->state == tt_unpopulated) {
323 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
325 /* if we fail here don't nuke the mm node
326 * as the bo still owns it */
327 old_copy.mm_node = NULL;
335 if ((old_mem->mem_type == new_mem->mem_type) &&
336 (new_mem->start < old_mem->start + old_mem->size)) {
338 add = new_mem->num_pages - 1;
341 for (i = 0; i < new_mem->num_pages; ++i) {
342 page = i * dir + add;
343 if (old_iomap == NULL) {
344 vm_memattr_t prot = ttm_io_prot(old_mem->placement);
345 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
347 } else if (new_iomap == NULL) {
348 vm_memattr_t prot = ttm_io_prot(new_mem->placement);
349 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
352 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
354 /* failing here, means keep old copy as-is */
355 old_copy.mm_node = NULL;
363 new_mem->mm_node = NULL;
365 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
372 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
374 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
375 ttm_bo_mem_put(bo, &old_copy);
378 EXPORT_SYMBOL(ttm_bo_move_memcpy);
380 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
386 * ttm_buffer_object_transfer
388 * @bo: A pointer to a struct ttm_buffer_object.
389 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
390 * holding the data of @bo with the old placement.
392 * This is a utility function that may be called after an accelerated move
393 * has been scheduled. A new buffer object is created as a placeholder for
394 * the old data while it's being copied. When that buffer object is idle,
395 * it can be destroyed, releasing the space of the old placement.
400 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
401 struct ttm_buffer_object **new_obj)
403 struct ttm_buffer_object *fbo;
404 struct ttm_bo_device *bdev = bo->bdev;
405 struct ttm_bo_driver *driver = bdev->driver;
407 fbo = kmalloc(sizeof(*fbo), M_DRM, M_WAITOK | M_ZERO);
414 * Fix up members that we shouldn't copy directly:
415 * TODO: Explicit member copy would probably be better here.
418 init_waitqueue_head(&fbo->event_queue);
419 INIT_LIST_HEAD(&fbo->ddestroy);
420 INIT_LIST_HEAD(&fbo->lru);
421 INIT_LIST_HEAD(&fbo->swap);
422 INIT_LIST_HEAD(&fbo->io_reserve_lru);
424 atomic_set(&fbo->cpu_writers, 0);
426 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
428 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
430 fbo->sync_obj = NULL;
431 lockmgr(&bdev->fence_lock, LK_RELEASE);
432 kref_init(&fbo->list_kref);
433 kref_init(&fbo->kref);
434 fbo->destroy = &ttm_transfered_destroy;
438 * Mirror ref from kref_init() for list_kref.
440 set_bit(TTM_BO_PRIV_FLAG_ACTIVE, &fbo->priv_flags);
447 ttm_io_prot(uint32_t caching_flags)
449 #if defined(__i386__) || defined(__x86_64__)
450 if (caching_flags & TTM_PL_FLAG_WC)
451 return (VM_MEMATTR_WRITE_COMBINING);
454 * We do not support i386, look at the linux source
455 * for the reason of the comment.
457 return (VM_MEMATTR_UNCACHEABLE);
462 EXPORT_SYMBOL(ttm_io_prot);
464 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
465 unsigned long offset,
467 struct ttm_bo_kmap_obj *map)
469 struct ttm_mem_reg *mem = &bo->mem;
471 if (bo->mem.bus.addr) {
472 map->bo_kmap_type = ttm_bo_map_premapped;
473 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
475 map->bo_kmap_type = ttm_bo_map_iomap;
476 map->virtual = pmap_mapdev_attr(bo->mem.bus.base +
477 bo->mem.bus.offset + offset, size,
478 (mem->placement & TTM_PL_FLAG_WC) ?
479 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE);
482 return (!map->virtual) ? -ENOMEM : 0;
485 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
486 unsigned long start_page,
487 unsigned long num_pages,
488 struct ttm_bo_kmap_obj *map)
490 struct ttm_mem_reg *mem = &bo->mem;
492 struct ttm_tt *ttm = bo->ttm;
497 if (ttm->state == tt_unpopulated) {
498 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
503 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
505 * We're mapping a single page, and the desired
506 * page protection is consistent with the bo.
509 map->bo_kmap_type = ttm_bo_map_kmap;
510 map->page = ttm->pages[start_page];
511 map->sf = sf_buf_alloc(map->page);
512 map->virtual = (void *)sf_buf_kva(map->sf);
515 * We need to use vmap to get the desired page protection
516 * or to make the buffer object look contiguous.
518 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
519 VM_MEMATTR_WRITE_COMBINING :
520 ttm_io_prot(mem->placement);
521 map->bo_kmap_type = ttm_bo_map_vmap;
522 map->num_pages = num_pages;
523 map->virtual = (void *)kmem_alloc_nofault(&kernel_map,
524 num_pages * PAGE_SIZE, PAGE_SIZE);
525 if (map->virtual != NULL) {
526 for (i = 0; i < num_pages; i++) {
528 pmap_page_set_memattr(ttm->pages[start_page +
531 pmap_qenter((vm_offset_t)map->virtual,
532 &ttm->pages[start_page], num_pages);
535 return (!map->virtual) ? -ENOMEM : 0;
538 int ttm_bo_kmap(struct ttm_buffer_object *bo,
539 unsigned long start_page, unsigned long num_pages,
540 struct ttm_bo_kmap_obj *map)
542 struct ttm_mem_type_manager *man =
543 &bo->bdev->man[bo->mem.mem_type];
544 unsigned long offset, size;
547 BUG_ON(!list_empty(&bo->swap));
550 if (num_pages > bo->num_pages)
552 if (start_page > bo->num_pages)
555 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
558 (void) ttm_mem_io_lock(man, false);
559 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
560 ttm_mem_io_unlock(man);
563 if (!bo->mem.bus.is_iomem) {
564 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
566 offset = start_page << PAGE_SHIFT;
567 size = num_pages << PAGE_SHIFT;
568 return ttm_bo_ioremap(bo, offset, size, map);
571 EXPORT_SYMBOL(ttm_bo_kmap);
573 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
575 struct ttm_buffer_object *bo = map->bo;
576 struct ttm_mem_type_manager *man =
577 &bo->bdev->man[bo->mem.mem_type];
581 switch (map->bo_kmap_type) {
582 case ttm_bo_map_iomap:
583 pmap_unmapdev((vm_offset_t)map->virtual, map->size);
585 case ttm_bo_map_vmap:
586 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages);
587 kmem_free(&kernel_map, (vm_offset_t)map->virtual,
588 map->num_pages * PAGE_SIZE);
590 case ttm_bo_map_kmap:
591 sf_buf_free(map->sf);
593 case ttm_bo_map_premapped:
598 (void) ttm_mem_io_lock(man, false);
599 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
600 ttm_mem_io_unlock(man);
605 EXPORT_SYMBOL(ttm_bo_kunmap);
607 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
611 struct ttm_mem_reg *new_mem)
613 struct ttm_bo_device *bdev = bo->bdev;
614 struct ttm_bo_driver *driver = bdev->driver;
615 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
616 struct ttm_mem_reg *old_mem = &bo->mem;
618 struct ttm_buffer_object *ghost_obj;
619 void *tmp_obj = NULL;
621 lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
623 tmp_obj = bo->sync_obj;
626 bo->sync_obj = driver->sync_obj_ref(sync_obj);
628 ret = ttm_bo_wait(bo, false, false, false);
629 lockmgr(&bdev->fence_lock, LK_RELEASE);
631 driver->sync_obj_unref(&tmp_obj);
635 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
637 ttm_tt_unbind(bo->ttm);
638 ttm_tt_destroy(bo->ttm);
641 ttm_bo_free_old_node(bo);
644 * This should help pipeline ordinary buffer moves.
646 * Hang old buffer memory on a new buffer object,
647 * and leave it to be released when the GPU
648 * operation has completed.
651 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
652 lockmgr(&bdev->fence_lock, LK_RELEASE);
654 driver->sync_obj_unref(&tmp_obj);
656 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
661 * If we're not moving to fixed memory, the TTM object
662 * needs to stay alive. Otherwhise hang it on the ghost
663 * bo to be unbound and destroyed.
666 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
667 ghost_obj->ttm = NULL;
671 ttm_bo_unreserve(ghost_obj);
672 ttm_bo_unref(&ghost_obj);
676 new_mem->mm_node = NULL;
680 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);