2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
29 * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
33 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
37 #include <bus/pci/pcireg.h>
41 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
43 * address for accessing them. Cleaned up at unload.
45 static int drm_alloc_resource(struct drm_device *dev, int resource)
52 if (resource >= DRM_MAX_PCI_RESOURCE) {
53 DRM_ERROR("Resource %d too large\n", resource);
57 if (dev->pcir[resource] != NULL) {
62 rid = PCIR_BAR(resource);
63 res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
67 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
71 if (dev->pcir[resource] == NULL) {
72 dev->pcirid[resource] = rid;
73 dev->pcir[resource] = res;
79 unsigned long drm_get_resource_start(struct drm_device *dev,
80 unsigned int resource)
82 if (drm_alloc_resource(dev, resource) != 0)
85 return rman_get_start(dev->pcir[resource]);
88 unsigned long drm_get_resource_len(struct drm_device *dev,
89 unsigned int resource)
91 if (drm_alloc_resource(dev, resource) != 0)
94 return rman_get_size(dev->pcir[resource]);
97 int drm_addmap(struct drm_device * dev, unsigned long offset,
99 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
101 struct drm_local_map *map;
102 struct drm_map_list *entry;
104 /*drm_agp_mem_t *entry;
107 /* Only allow shared memory to be removable since we only keep enough
108 * book keeping information about shared memory to allow for removal
109 * when processes fork.
111 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
112 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
115 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
116 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
120 if (offset + size < offset) {
121 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
126 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
129 /* Check if this is just another version of a kernel-allocated map, and
130 * just hand that back if so.
132 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
134 list_for_each_entry(entry, &dev->maplist, head) {
135 if (entry->map->type == type && (entry->map->offset == offset ||
136 (entry->map->type == _DRM_SHM &&
137 entry->map->flags == _DRM_CONTAINS_LOCK))) {
138 entry->map->size = size;
139 DRM_DEBUG("Found kernel map %d\n", type);
146 /* Allocate a new map structure, fill it in, and do any type-specific
147 * initialization necessary.
149 map = kmalloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
155 map->offset = offset;
159 map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
160 DRM_MAP_HANDLE_SHIFT);
164 map->virtual = drm_ioremap(dev, map);
165 if (!(map->flags & _DRM_WRITE_COMBINING))
168 case _DRM_FRAME_BUFFER:
169 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
173 map->virtual = kmalloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
174 DRM_DEBUG("%lu %d %p\n",
175 map->size, drm_order(map->size), map->virtual);
177 drm_free(map, DRM_MEM_MAPS);
181 map->offset = (unsigned long)map->virtual;
182 if (map->flags & _DRM_CONTAINS_LOCK) {
183 /* Prevent a 2nd X Server from creating a 2nd lock */
185 if (dev->lock.hw_lock != NULL) {
187 drm_free(map->virtual, DRM_MEM_MAPS);
188 drm_free(map, DRM_MEM_MAPS);
191 dev->lock.hw_lock = map->virtual; /* Pointer to lock */
197 /* In some cases (i810 driver), user space may have already
198 * added the AGP base itself, because dev->agp->base previously
199 * only got set during AGP enable. So, only add the base
200 * address if the map's offset isn't already within the
203 if (map->offset < dev->agp->base ||
204 map->offset > dev->agp->base +
205 dev->agp->info.ai_aperture_size - 1) {
206 map->offset += dev->agp->base;
208 map->mtrr = dev->agp->mtrr; /* for getmap */
209 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
210 if ((map->offset >= entry->bound) &&
211 (map->offset + map->size <=
212 entry->bound + entry->pages * PAGE_SIZE)) {
218 drm_free(map, DRM_MEM_MAPS);
223 case _DRM_SCATTER_GATHER:
225 drm_free(map, DRM_MEM_MAPS);
229 map->virtual = (void *)(dev->sg->vaddr + offset);
230 map->offset = dev->sg->vaddr + offset;
232 case _DRM_CONSISTENT:
233 /* Unfortunately, we don't get any alignment specification from
234 * the caller, so we have to guess. drm_pci_alloc requires
235 * a power-of-two alignment, so try to align the bus address of
236 * the map to it size if possible, otherwise just assume
237 * PAGE_SIZE alignment.
240 if ((align & (align - 1)) != 0)
242 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
243 if (map->dmah == NULL) {
244 drm_free(map, DRM_MEM_MAPS);
248 map->virtual = map->dmah->vaddr;
249 map->offset = map->dmah->busaddr;
252 DRM_ERROR("Bad map type %d\n", map->type);
253 drm_free(map, DRM_MEM_MAPS);
259 list_add(&entry->head, &dev->maplist);
262 /* Jumped to, with lock held, when a kernel map is found. */
264 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
272 int drm_addmap_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file_priv)
275 struct drm_map *request = data;
276 drm_local_map_t *map;
279 if (!(dev->flags & (FREAD|FWRITE)))
280 return EACCES; /* Require read/write */
282 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
286 err = drm_addmap(dev, request->offset, request->size, request->type,
287 request->flags, &map);
292 request->offset = map->offset;
293 request->size = map->size;
294 request->type = map->type;
295 request->flags = map->flags;
296 request->mtrr = map->mtrr;
297 request->handle = (void *)map->handle;
302 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
304 struct drm_map_list *r_list = NULL, *list_t;
307 DRM_LOCK_ASSERT(dev);
312 /* Find the list entry for the map and remove it */
313 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
314 if (r_list->map == map) {
315 list_del(&r_list->head);
316 drm_free(r_list, DRM_MEM_DRIVER);
327 if (map->bsr == NULL)
328 drm_ioremapfree(map);
330 case _DRM_FRAME_BUFFER:
332 int __unused retcode;
334 retcode = drm_mtrr_del(0, map->offset, map->size,
336 DRM_DEBUG("mtrr_del = %d\n", retcode);
340 drm_free(map->virtual, DRM_MEM_MAPS);
343 case _DRM_SCATTER_GATHER:
345 case _DRM_CONSISTENT:
346 drm_pci_free(dev, map->dmah);
349 DRM_ERROR("Bad map type %d\n", map->type);
353 if (map->bsr != NULL) {
354 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
360 free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
361 DRM_MAP_HANDLE_SHIFT);
364 drm_free(map, DRM_MEM_MAPS);
367 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
368 * the last close of the device, and this is necessary for cleanup when things
369 * exit uncleanly. Therefore, having userland manually remove mappings seems
370 * like a pointless exercise since they're going away anyway.
372 * One use case might be after addmap is allowed for normal users for SHM and
373 * gets used by drivers that the server doesn't need to care about. This seems
376 * \param inode device inode.
377 * \param file_priv DRM file private.
378 * \param cmd command.
379 * \param arg pointer to a struct drm_map structure.
380 * \return zero on success or a negative value on error.
382 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
383 struct drm_file *file_priv)
385 struct drm_map *request = data;
386 struct drm_local_map *map = NULL;
387 struct drm_map_list *r_list;
390 list_for_each_entry(r_list, &dev->maplist, head) {
392 r_list->user_token == (unsigned long)request->handle &&
393 r_list->map->flags & _DRM_REMOVABLE) {
399 /* List has wrapped around to the head pointer, or its empty we didn't
402 if (list_empty(&dev->maplist) || !map) {
407 /* Register and framebuffer maps are permanent */
408 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
421 static void drm_cleanup_buf_error(struct drm_device *dev,
422 drm_buf_entry_t *entry)
426 if (entry->seg_count) {
427 for (i = 0; i < entry->seg_count; i++) {
428 drm_pci_free(dev, entry->seglist[i]);
430 drm_free(entry->seglist, DRM_MEM_SEGS);
432 entry->seg_count = 0;
435 if (entry->buf_count) {
436 for (i = 0; i < entry->buf_count; i++) {
437 drm_free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
439 drm_free(entry->buflist, DRM_MEM_BUFS);
441 entry->buf_count = 0;
445 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
447 drm_device_dma_t *dma = dev->dma;
448 drm_buf_entry_t *entry;
449 /*drm_agp_mem_t *agp_entry;
452 unsigned long offset;
453 unsigned long agp_offset;
462 drm_buf_t **temp_buflist;
464 count = request->count;
465 order = drm_order(request->size);
468 alignment = (request->flags & _DRM_PAGE_ALIGN)
469 ? round_page(size) : size;
470 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
471 total = PAGE_SIZE << page_order;
474 agp_offset = dev->agp->base + request->agp_start;
476 DRM_DEBUG("count: %d\n", count);
477 DRM_DEBUG("order: %d\n", order);
478 DRM_DEBUG("size: %d\n", size);
479 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
480 DRM_DEBUG("alignment: %d\n", alignment);
481 DRM_DEBUG("page_order: %d\n", page_order);
482 DRM_DEBUG("total: %d\n", total);
484 /* Make sure buffers are located in AGP memory that we own */
485 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
486 * memory. Safe to ignore for now because these ioctls are still
490 for (agp_entry = dev->agp->memory; agp_entry;
491 agp_entry = agp_entry->next) {
492 if ((agp_offset >= agp_entry->bound) &&
493 (agp_offset + total * count <=
494 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
500 DRM_DEBUG("zone invalid\n");
504 entry = &dma->bufs[order];
506 entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
508 if (!entry->buflist) {
512 entry->buf_size = size;
513 entry->page_order = page_order;
517 while (entry->buf_count < count) {
518 buf = &entry->buflist[entry->buf_count];
519 buf->idx = dma->buf_count + entry->buf_count;
520 buf->total = alignment;
524 buf->offset = (dma->byte_count + offset);
525 buf->bus_address = agp_offset + offset;
526 buf->address = (void *)(agp_offset + offset);
529 buf->file_priv = NULL;
531 buf->dev_priv_size = dev->driver->buf_priv_size;
532 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
534 if (buf->dev_private == NULL) {
535 /* Set count correctly so we free the proper amount. */
536 entry->buf_count = count;
537 drm_cleanup_buf_error(dev, entry);
543 byte_count += PAGE_SIZE << page_order;
546 DRM_DEBUG("byte_count: %d\n", byte_count);
548 temp_buflist = krealloc(dma->buflist,
549 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
550 DRM_MEM_BUFS, M_NOWAIT);
551 if (temp_buflist == NULL) {
552 /* Free the entry because it isn't valid */
553 drm_cleanup_buf_error(dev, entry);
556 dma->buflist = temp_buflist;
558 for (i = 0; i < entry->buf_count; i++) {
559 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
562 dma->buf_count += entry->buf_count;
563 dma->byte_count += byte_count;
565 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
566 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
568 request->count = entry->buf_count;
569 request->size = size;
571 dma->flags = _DRM_DMA_USE_AGP;
576 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
578 drm_device_dma_t *dma = dev->dma;
584 drm_buf_entry_t *entry;
587 unsigned long offset;
591 unsigned long *temp_pagelist;
592 drm_buf_t **temp_buflist;
594 count = request->count;
595 order = drm_order(request->size);
598 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
599 request->count, request->size, size, order);
601 alignment = (request->flags & _DRM_PAGE_ALIGN)
602 ? round_page(size) : size;
603 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
604 total = PAGE_SIZE << page_order;
606 entry = &dma->bufs[order];
608 entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
610 entry->seglist = kmalloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
613 /* Keep the original pagelist until we know all the allocations
616 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
617 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
619 if (entry->buflist == NULL || entry->seglist == NULL ||
620 temp_pagelist == NULL) {
621 drm_free(temp_pagelist, DRM_MEM_PAGES);
622 drm_free(entry->seglist, DRM_MEM_SEGS);
623 drm_free(entry->buflist, DRM_MEM_BUFS);
627 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
628 sizeof(*dma->pagelist));
630 DRM_DEBUG("pagelist: %d entries\n",
631 dma->page_count + (count << page_order));
633 entry->buf_size = size;
634 entry->page_order = page_order;
638 while (entry->buf_count < count) {
639 spin_unlock(&dev->dma_lock);
640 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
642 spin_lock(&dev->dma_lock);
644 /* Set count correctly so we free the proper amount. */
645 entry->buf_count = count;
646 entry->seg_count = count;
647 drm_cleanup_buf_error(dev, entry);
648 drm_free(temp_pagelist, DRM_MEM_PAGES);
652 entry->seglist[entry->seg_count++] = dmah;
653 for (i = 0; i < (1 << page_order); i++) {
654 DRM_DEBUG("page %d @ %p\n",
655 dma->page_count + page_count,
656 (char *)dmah->vaddr + PAGE_SIZE * i);
657 temp_pagelist[dma->page_count + page_count++] =
658 (long)dmah->vaddr + PAGE_SIZE * i;
661 offset + size <= total && entry->buf_count < count;
662 offset += alignment, ++entry->buf_count) {
663 buf = &entry->buflist[entry->buf_count];
664 buf->idx = dma->buf_count + entry->buf_count;
665 buf->total = alignment;
668 buf->offset = (dma->byte_count + byte_count + offset);
669 buf->address = ((char *)dmah->vaddr + offset);
670 buf->bus_address = dmah->busaddr + offset;
673 buf->file_priv = NULL;
675 buf->dev_priv_size = dev->driver->buf_priv_size;
676 buf->dev_private = kmalloc(buf->dev_priv_size,
677 DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
678 if (buf->dev_private == NULL) {
679 /* Set count correctly so we free the proper amount. */
680 entry->buf_count = count;
681 entry->seg_count = count;
682 drm_cleanup_buf_error(dev, entry);
683 drm_free(temp_pagelist, DRM_MEM_PAGES);
687 DRM_DEBUG("buffer %d @ %p\n",
688 entry->buf_count, buf->address);
690 byte_count += PAGE_SIZE << page_order;
693 temp_buflist = krealloc(dma->buflist,
694 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
695 DRM_MEM_BUFS, M_NOWAIT);
696 if (temp_buflist == NULL) {
697 /* Free the entry because it isn't valid */
698 drm_cleanup_buf_error(dev, entry);
699 drm_free(temp_pagelist, DRM_MEM_PAGES);
702 dma->buflist = temp_buflist;
704 for (i = 0; i < entry->buf_count; i++) {
705 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
708 /* No allocations failed, so now we can replace the orginal pagelist
711 drm_free(dma->pagelist, DRM_MEM_PAGES);
712 dma->pagelist = temp_pagelist;
714 dma->buf_count += entry->buf_count;
715 dma->seg_count += entry->seg_count;
716 dma->page_count += entry->seg_count << page_order;
717 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
719 request->count = entry->buf_count;
720 request->size = size;
726 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
728 drm_device_dma_t *dma = dev->dma;
729 drm_buf_entry_t *entry;
731 unsigned long offset;
732 unsigned long agp_offset;
741 drm_buf_t **temp_buflist;
743 count = request->count;
744 order = drm_order(request->size);
747 alignment = (request->flags & _DRM_PAGE_ALIGN)
748 ? round_page(size) : size;
749 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
750 total = PAGE_SIZE << page_order;
753 agp_offset = request->agp_start;
755 DRM_DEBUG("count: %d\n", count);
756 DRM_DEBUG("order: %d\n", order);
757 DRM_DEBUG("size: %d\n", size);
758 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
759 DRM_DEBUG("alignment: %d\n", alignment);
760 DRM_DEBUG("page_order: %d\n", page_order);
761 DRM_DEBUG("total: %d\n", total);
763 entry = &dma->bufs[order];
765 entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
767 if (entry->buflist == NULL)
770 entry->buf_size = size;
771 entry->page_order = page_order;
775 while (entry->buf_count < count) {
776 buf = &entry->buflist[entry->buf_count];
777 buf->idx = dma->buf_count + entry->buf_count;
778 buf->total = alignment;
782 buf->offset = (dma->byte_count + offset);
783 buf->bus_address = agp_offset + offset;
784 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
787 buf->file_priv = NULL;
789 buf->dev_priv_size = dev->driver->buf_priv_size;
790 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
792 if (buf->dev_private == NULL) {
793 /* Set count correctly so we free the proper amount. */
794 entry->buf_count = count;
795 drm_cleanup_buf_error(dev, entry);
799 DRM_DEBUG("buffer %d @ %p\n",
800 entry->buf_count, buf->address);
804 byte_count += PAGE_SIZE << page_order;
807 DRM_DEBUG("byte_count: %d\n", byte_count);
809 temp_buflist = krealloc(dma->buflist,
810 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
811 DRM_MEM_BUFS, M_NOWAIT);
812 if (temp_buflist == NULL) {
813 /* Free the entry because it isn't valid */
814 drm_cleanup_buf_error(dev, entry);
817 dma->buflist = temp_buflist;
819 for (i = 0; i < entry->buf_count; i++) {
820 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
823 dma->buf_count += entry->buf_count;
824 dma->byte_count += byte_count;
826 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
827 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
829 request->count = entry->buf_count;
830 request->size = size;
832 dma->flags = _DRM_DMA_USE_SG;
837 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
841 if (request->count < 0 || request->count > 4096)
844 order = drm_order(request->size);
845 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
848 spin_lock(&dev->dma_lock);
850 /* No more allocations after first buffer-using ioctl. */
851 if (dev->buf_use != 0) {
852 spin_unlock(&dev->dma_lock);
855 /* No more than one allocation per order */
856 if (dev->dma->bufs[order].buf_count != 0) {
857 spin_unlock(&dev->dma_lock);
861 ret = drm_do_addbufs_agp(dev, request);
863 spin_unlock(&dev->dma_lock);
868 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
872 if (!DRM_SUSER(DRM_CURPROC))
875 if (request->count < 0 || request->count > 4096)
878 order = drm_order(request->size);
879 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
882 spin_lock(&dev->dma_lock);
884 /* No more allocations after first buffer-using ioctl. */
885 if (dev->buf_use != 0) {
886 spin_unlock(&dev->dma_lock);
889 /* No more than one allocation per order */
890 if (dev->dma->bufs[order].buf_count != 0) {
891 spin_unlock(&dev->dma_lock);
895 ret = drm_do_addbufs_sg(dev, request);
897 spin_unlock(&dev->dma_lock);
902 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
906 if (!DRM_SUSER(DRM_CURPROC))
909 if (request->count < 0 || request->count > 4096)
912 order = drm_order(request->size);
913 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
916 spin_lock(&dev->dma_lock);
918 /* No more allocations after first buffer-using ioctl. */
919 if (dev->buf_use != 0) {
920 spin_unlock(&dev->dma_lock);
923 /* No more than one allocation per order */
924 if (dev->dma->bufs[order].buf_count != 0) {
925 spin_unlock(&dev->dma_lock);
929 ret = drm_do_addbufs_pci(dev, request);
931 spin_unlock(&dev->dma_lock);
936 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
938 struct drm_buf_desc *request = data;
941 if (request->flags & _DRM_AGP_BUFFER)
942 err = drm_addbufs_agp(dev, request);
943 else if (request->flags & _DRM_SG_BUFFER)
944 err = drm_addbufs_sg(dev, request);
946 err = drm_addbufs_pci(dev, request);
951 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
953 drm_device_dma_t *dma = dev->dma;
954 struct drm_buf_info *request = data;
959 spin_lock(&dev->dma_lock);
960 ++dev->buf_use; /* Can't allocate more after this call */
961 spin_unlock(&dev->dma_lock);
963 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
964 if (dma->bufs[i].buf_count)
968 DRM_DEBUG("count = %d\n", count);
970 if (request->count >= count) {
971 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
972 if (dma->bufs[i].buf_count) {
973 struct drm_buf_desc from;
975 from.count = dma->bufs[i].buf_count;
976 from.size = dma->bufs[i].buf_size;
977 from.low_mark = dma->bufs[i].freelist.low_mark;
978 from.high_mark = dma->bufs[i].freelist.high_mark;
980 if (DRM_COPY_TO_USER(&request->list[count], &from,
981 sizeof(struct drm_buf_desc)) != 0) {
986 DRM_DEBUG("%d %d %d %d %d\n",
987 i, dma->bufs[i].buf_count,
988 dma->bufs[i].buf_size,
989 dma->bufs[i].freelist.low_mark,
990 dma->bufs[i].freelist.high_mark);
995 request->count = count;
1000 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1002 drm_device_dma_t *dma = dev->dma;
1003 struct drm_buf_desc *request = data;
1006 DRM_DEBUG("%d, %d, %d\n",
1007 request->size, request->low_mark, request->high_mark);
1010 order = drm_order(request->size);
1011 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1012 request->low_mark < 0 || request->high_mark < 0) {
1016 spin_lock(&dev->dma_lock);
1017 if (request->low_mark > dma->bufs[order].buf_count ||
1018 request->high_mark > dma->bufs[order].buf_count) {
1019 spin_unlock(&dev->dma_lock);
1023 dma->bufs[order].freelist.low_mark = request->low_mark;
1024 dma->bufs[order].freelist.high_mark = request->high_mark;
1025 spin_unlock(&dev->dma_lock);
1030 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1032 drm_device_dma_t *dma = dev->dma;
1033 struct drm_buf_free *request = data;
1039 DRM_DEBUG("%d\n", request->count);
1041 spin_lock(&dev->dma_lock);
1042 for (i = 0; i < request->count; i++) {
1043 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1047 if (idx < 0 || idx >= dma->buf_count) {
1048 DRM_ERROR("Index %d (of %d max)\n",
1049 idx, dma->buf_count - 1);
1053 buf = dma->buflist[idx];
1054 if (buf->file_priv != file_priv) {
1055 DRM_ERROR("Process %d freeing buffer not owned\n",
1060 drm_free_buffer(dev, buf);
1062 spin_unlock(&dev->dma_lock);
1067 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1069 drm_device_dma_t *dma = dev->dma;
1072 vm_offset_t address;
1073 struct vmspace *vms;
1077 struct drm_buf_map *request = data;
1080 vms = DRM_CURPROC->td_proc->p_vmspace;
1082 spin_lock(&dev->dma_lock);
1083 dev->buf_use++; /* Can't allocate more after this call */
1084 spin_unlock(&dev->dma_lock);
1086 if (request->count < dma->buf_count)
1089 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1090 (drm_core_check_feature(dev, DRIVER_SG) &&
1091 (dma->flags & _DRM_DMA_USE_SG))) {
1092 drm_local_map_t *map = dev->agp_buffer_map;
1098 size = round_page(map->size);
1099 foff = (unsigned long)map->handle;
1101 size = round_page(dma->byte_count),
1105 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1106 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1107 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1108 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1112 request->virtual = (void *)vaddr;
1114 for (i = 0; i < dma->buf_count; i++) {
1115 if (DRM_COPY_TO_USER(&request->list[i].idx,
1116 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1120 if (DRM_COPY_TO_USER(&request->list[i].total,
1121 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1125 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1130 address = vaddr + dma->buflist[i]->offset; /* *** */
1131 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1139 request->count = dma->buf_count;
1141 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1147 * Compute order. Can be made faster.
1149 int drm_order(unsigned long size)
1156 order = flsl(size) - 1;
1157 if (size & ~(1ul << order))