2 * Copyright 2003 Eric Anholt
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: src/sys/dev/drm2/drm_vm.c,v 1.1 2012/05/22 11:07:44 kib Exp $
27 * Support code for mmaping of DRM maps.
31 #include <dev/drm2/drmP.h>
32 #include <dev/drm2/drm.h>
33 #include <sys/mutex2.h>
34 #include <vm/vm_page.h>
35 #include <vm/vm_pager.h>
38 drm_mmap(struct dev_mmap_args *ap)
40 struct cdev *kdev, vm_ooffset_t offset, vm_paddr_t *paddr,
41 int prot, vm_memattr_t *memattr)*/
43 struct cdev *kdev = ap->a_head.a_dev;
44 vm_offset_t offset = ap->a_offset;
45 struct drm_device *dev = drm_get_device_from_kdev(kdev);
46 struct drm_file *file_priv = NULL;
48 enum drm_map_type type;
52 /* d_mmap gets called twice, we can only reference file_priv during
53 * the first call. We need to assume that if error is EBADF the
54 * call was succesful and the client is authenticated.
57 file_priv = drm_find_file_by_proc(dev, curthread);
61 DRM_ERROR("Could not find authenticator!\n");
65 if (!file_priv->authenticated)
68 DRM_DEBUG("called with offset %016jx\n", offset);
69 if (dev->dma && offset < ptoa(dev->dma->page_count)) {
70 drm_device_dma_t *dma = dev->dma;
72 DRM_SPINLOCK(&dev->dma_lock);
74 if (dma->pagelist != NULL) {
75 unsigned long page = offset >> PAGE_SHIFT;
76 phys = dma->pagelist[page];
78 DRM_SPINUNLOCK(&dev->dma_lock);
81 DRM_SPINUNLOCK(&dev->dma_lock);
86 /* A sequential search of a linked list is
87 fine here because: 1) there will only be
88 about 5-10 entries in the list and, 2) a
89 DRI client only has to do this mapping
90 once, so it doesn't have to be optimized
91 for performance, even if the list was a
95 TAILQ_FOREACH(map, &dev->maplist, link) {
96 if (offset >> DRM_MAP_HANDLE_SHIFT ==
97 (unsigned long)map->handle >> DRM_MAP_HANDLE_SHIFT)
102 DRM_DEBUG("Can't find map, request offset = %016jx\n", offset);
103 TAILQ_FOREACH(map, &dev->maplist, link) {
104 DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
105 map->offset, (unsigned long)map->handle);
110 if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
112 DRM_DEBUG("restricted map\n");
118 offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
121 case _DRM_FRAME_BUFFER:
124 memattr = VM_MEMATTR_WRITE_COMBINING;
128 phys = map->offset + offset;
130 case _DRM_SCATTER_GATHER:
132 memattr = VM_MEMATTR_WRITE_COMBINING;
135 case _DRM_CONSISTENT:
137 phys = vtophys((char *)map->virtual + offset);
140 DRM_ERROR("bad map type %d\n", type);
141 return -1; /* This should never happen. */
144 ap->a_result = atop(phys);
149 drm_mmap_single(struct dev_mmap_single_args *ap)
151 struct cdev *kdev = ap->a_head.a_dev;
152 return drm_gem_mmap_single(kdev, ap->a_offset, ap->a_size,
153 ap->a_object, ap->a_nprot);
156 /* XXX The following is just temporary hack to replace the
157 * vm_phys_fictitious functions available on FreeBSD
159 #define VM_PHYS_FICTITIOUS_NSEGS 8
160 static struct vm_phys_fictitious_seg {
163 vm_page_t first_page;
164 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
165 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER;
168 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
170 struct vm_phys_fictitious_seg *seg;
175 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
176 seg = &vm_phys_fictitious_segs[segind];
177 if (pa >= seg->start && pa < seg->end) {
178 m = &seg->first_page[atop(pa - seg->start)];
179 KASSERT((m->flags & PG_FICTITIOUS) != 0,
180 ("%p not fictitious", m));
187 static void page_init(vm_page_t m, vm_paddr_t paddr, int pat_mode)
189 bzero(m, sizeof(*m));
191 //m->flags = PG_BUSY | PG_FICTITIOUS;
192 m->flags = PG_FICTITIOUS;
193 m->valid = VM_PAGE_BITS_ALL;
198 m->pat_mode = pat_mode;
202 m->phys_addr = paddr;
206 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, int pat_mode)
208 struct vm_phys_fictitious_seg *seg;
213 page_count = (end - start) / PAGE_SIZE;
215 fp = kmalloc(page_count * sizeof(struct vm_page), DRM_MEM_DRIVER,
218 for (i = 0; i < page_count; i++) {
219 page_init(&fp[i], start + PAGE_SIZE * i, pat_mode);
221 mtx_lock(&vm_phys_fictitious_reg_mtx);
222 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
223 seg = &vm_phys_fictitious_segs[segind];
224 if (seg->start == 0 && seg->end == 0) {
227 seg->first_page = fp;
228 mtx_unlock(&vm_phys_fictitious_reg_mtx);
232 mtx_unlock(&vm_phys_fictitious_reg_mtx);
233 kfree(fp, DRM_MEM_DRIVER);