2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
26 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_prime.c 254885 2013-08-25 19:37:15Z dumbbell $
32 #include <drm/radeon_drm.h>
34 #include <linux/dma-buf.h>
36 static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
37 enum dma_data_direction dir)
39 struct radeon_bo *bo = attachment->dmabuf->priv;
40 struct drm_device *dev = bo->rdev->ddev;
41 int npages = bo->tbo.num_pages;
45 mutex_lock(&dev->struct_mutex);
46 sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
47 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
48 mutex_unlock(&dev->struct_mutex);
52 static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
53 struct sg_table *sg, enum dma_data_direction dir)
55 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
60 static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
62 struct radeon_bo *bo = dma_buf->priv;
64 if (bo->gem_base.export_dma_buf == dma_buf) {
65 DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
66 bo->gem_base.export_dma_buf = NULL;
67 drm_gem_object_unreference_unlocked(&bo->gem_base);
71 static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
76 static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
80 static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
85 static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
90 static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
95 static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
97 struct radeon_bo *bo = dma_buf->priv;
98 struct drm_device *dev = bo->rdev->ddev;
101 mutex_lock(&dev->struct_mutex);
102 if (bo->vmapping_count) {
103 bo->vmapping_count++;
107 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
110 mutex_unlock(&dev->struct_mutex);
113 bo->vmapping_count = 1;
115 mutex_unlock(&dev->struct_mutex);
116 return bo->dma_buf_vmap.virtual;
119 static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
121 struct radeon_bo *bo = dma_buf->priv;
122 struct drm_device *dev = bo->rdev->ddev;
124 mutex_lock(&dev->struct_mutex);
125 bo->vmapping_count--;
126 if (bo->vmapping_count == 0) {
127 ttm_bo_kunmap(&bo->dma_buf_vmap);
129 mutex_unlock(&dev->struct_mutex);
131 const static struct dma_buf_ops radeon_dmabuf_ops = {
132 .map_dma_buf = radeon_gem_map_dma_buf,
133 .unmap_dma_buf = radeon_gem_unmap_dma_buf,
134 .release = radeon_gem_dmabuf_release,
135 .kmap = radeon_gem_kmap,
136 .kmap_atomic = radeon_gem_kmap_atomic,
137 .kunmap = radeon_gem_kunmap,
138 .kunmap_atomic = radeon_gem_kunmap_atomic,
139 .mmap = radeon_gem_prime_mmap,
140 .vmap = radeon_gem_prime_vmap,
141 .vunmap = radeon_gem_prime_vunmap,
144 static int radeon_prime_create(struct drm_device *dev,
147 struct radeon_bo **pbo)
149 struct radeon_device *rdev = dev->dev_private;
150 struct radeon_bo *bo;
153 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
154 RADEON_GEM_DOMAIN_GTT, sg, pbo);
158 bo->gem_base.driver_private = bo;
160 mutex_lock(&rdev->gem.mutex);
161 list_add_tail(&bo->list, &rdev->gem.objects);
162 mutex_unlock(&rdev->gem.mutex);
167 struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *obj,
171 struct radeon_bo *bo = gem_to_radeon_bo(obj);
174 ret = radeon_bo_reserve(bo, false);
175 if (unlikely(ret != 0))
178 /* pin buffer into GTT */
179 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
181 radeon_bo_unreserve(bo);
184 radeon_bo_unreserve(bo);
185 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
188 struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
189 struct dma_buf *dma_buf)
191 struct dma_buf_attachment *attach;
193 struct radeon_bo *bo;
196 if (dma_buf->ops == &radeon_dmabuf_ops) {
198 if (bo->gem_base.dev == dev) {
199 drm_gem_object_reference(&bo->gem_base);
200 dma_buf_put(dma_buf);
201 return &bo->gem_base;
206 attach = dma_buf_attach(dma_buf, dev->dev);
208 return ERR_CAST(attach);
210 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
216 ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
220 bo->gem_base.import_attach = attach;
222 return &bo->gem_base;
225 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
227 dma_buf_detach(dma_buf, attach);