2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
31 #include <linux/vmalloc.h>
32 #include <linux/log2.h>
33 #include <linux/export.h>
34 #include <asm/shmparam.h>
36 #include "drm_legacy.h"
39 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
40 struct drm_local_map *map)
42 struct drm_map_list *entry;
43 list_for_each_entry(entry, &dev->maplist, head) {
45 * Because the kernel-userspace ABI is fixed at a 32-bit offset
46 * while PCI resources may live above that, we only compare the
47 * lower 32 bits of the map offset for maps of type
48 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
49 * It is assumed that if a driver have more than one resource
50 * of each type, the lower 32 bits are different.
53 map->type != entry->map->type ||
54 entry->master != dev->primary->master)
58 if (map->flags != _DRM_CONTAINS_LOCK)
62 case _DRM_FRAME_BUFFER:
63 if ((entry->map->offset & 0xffffffff) ==
64 (map->offset & 0xffffffff))
66 default: /* Make gcc happy */
69 if (entry->map->offset == map->offset)
76 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
77 unsigned long user_token, int hashed_handle, int shm)
83 * Core function to create a range of memory available for mapping by a
86 * Adjusts the memory offset to its absolute value according to the mapping
87 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
88 * applicable and if supported by the kernel.
90 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
91 unsigned int size, enum drm_map_type type,
92 enum drm_map_flags flags,
93 struct drm_map_list ** maplist)
95 struct drm_local_map *map;
96 struct drm_map_list *list = NULL;
97 drm_dma_handle_t *dmah;
99 /* Allocate a new map structure, fill it in, and do any type-specific
100 * initialization necessary.
102 map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
106 map->offset = offset;
111 /* Only allow shared memory to be removable since we only keep enough
112 * book keeping information about shared memory to allow for removal
113 * when processes fork.
115 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
119 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
120 DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
121 (uintmax_t)offset, size);
125 if (offset + size < offset) {
126 DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
127 (uintmax_t)offset, size);
132 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
133 (unsigned long long)map->offset, map->size, map->type);
135 /* Check if this is just another version of a kernel-allocated map, and
136 * just hand that back if so.
138 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
140 list_for_each_entry(list, &dev->maplist, head) {
141 if (list->map->type == type && (list->map->offset == offset ||
142 (list->map->type == _DRM_SHM &&
143 list->map->flags == _DRM_CONTAINS_LOCK))) {
144 list->map->size = size;
145 DRM_DEBUG("Found kernel map %d\n", type);
155 case _DRM_FRAME_BUFFER:
157 if (map->type == _DRM_FRAME_BUFFER ||
158 (map->flags & _DRM_WRITE_COMBINING)) {
160 arch_phys_wc_add(map->offset, map->size);
162 if (map->type == _DRM_REGISTERS) {
163 if (map->flags & _DRM_WRITE_COMBINING)
164 map->handle = ioremap_wc(map->offset,
167 map->handle = ioremap(map->offset, map->size);
176 map->handle = vmalloc_user(map->size);
177 DRM_DEBUG("%lu %d %p\n",
178 map->size, order_base_2(map->size), map->handle);
183 map->offset = (unsigned long)map->handle;
184 if (map->flags & _DRM_CONTAINS_LOCK) {
185 /* Prevent a 2nd X Server from creating a 2nd lock */
186 if (dev->lock.hw_lock != NULL) {
191 dev->lock.hw_lock = map->handle; /* Pointer to lock */
201 /* In some cases (i810 driver), user space may have already
202 * added the AGP base itself, because dev->agp->base previously
203 * only got set during AGP enable. So, only add the base
204 * address if the map's offset isn't already within the
207 if (map->offset < dev->agp->base ||
208 map->offset > dev->agp->base +
209 dev->agp->agp_info.ai_aperture_size - 1) {
210 map->offset += dev->agp->base;
212 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
213 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
214 if ((map->offset >= entry->bound) &&
215 (map->offset + map->size <=
216 entry->bound + entry->pages * PAGE_SIZE)) {
227 case _DRM_SCATTER_GATHER:
232 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
233 map->offset = dev->sg->vaddr + offset;
235 case _DRM_CONSISTENT:
236 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
237 * As we're limiting the address to 2^32-1 (or less),
238 * casting it down to 32 bits is no problem, but we
239 * need to point to a 64bit variable first. */
240 dmah = drm_pci_alloc(dev, map->size, map->size);
245 map->handle = dmah->vaddr;
246 map->offset = (unsigned long)dmah->busaddr;
250 DRM_ERROR("Bad map type %d\n", map->type);
255 list = kzalloc(sizeof(*list), GFP_KERNEL);
257 if (map->type == _DRM_REGISTERS)
258 iounmap(map->handle);
264 mutex_lock(&dev->struct_mutex);
265 list_add(&list->head, &dev->maplist);
266 mutex_unlock(&dev->struct_mutex);
269 /* Jumped to, with lock held, when a kernel map is found. */
271 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
279 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
280 unsigned int size, enum drm_map_type type,
281 enum drm_map_flags flags, struct drm_local_map **map_ptr)
283 struct drm_map_list *list;
286 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
288 *map_ptr = list->map;
291 EXPORT_SYMBOL(drm_legacy_addmap);
294 * Ioctl to specify a range of memory that is available for mapping by a
297 * \param inode device inode.
298 * \param file_priv DRM file private.
299 * \param cmd command.
300 * \param arg pointer to a drm_map structure.
301 * \return zero on success or a negative value on error.
304 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
305 struct drm_file *file_priv)
307 struct drm_map *request = data;
308 drm_local_map_t *map;
311 if (!(dev->flags & (FREAD|FWRITE)))
312 return -EACCES; /* Require read/write */
314 if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
318 err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
319 request->flags, &map);
324 request->offset = map->offset;
325 request->size = map->size;
326 request->type = map->type;
327 request->flags = map->flags;
328 request->mtrr = map->mtrr;
329 request->handle = (void *)map->handle;
335 * Get a mapping information.
337 * \param inode device inode.
338 * \param file_priv DRM file private.
339 * \param cmd command.
340 * \param arg user argument, pointing to a drm_map structure.
342 * \return zero on success or a negative number on failure.
344 * Searches for the mapping with the specified offset and copies its information
347 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
348 struct drm_file *file_priv)
350 struct drm_map *map = data;
351 struct drm_map_list *r_list = NULL;
352 struct list_head *list;
356 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
357 drm_core_check_feature(dev, DRIVER_MODESET))
365 mutex_lock(&dev->struct_mutex);
366 list_for_each(list, &dev->maplist) {
368 r_list = list_entry(list, struct drm_map_list, head);
373 if (!r_list || !r_list->map) {
374 mutex_unlock(&dev->struct_mutex);
378 map->offset = r_list->map->offset;
379 map->size = r_list->map->size;
380 map->type = r_list->map->type;
381 map->flags = r_list->map->flags;
382 map->handle = (void *)(unsigned long) r_list->user_token;
383 map->mtrr = r_list->map->mtrr;
385 mutex_unlock(&dev->struct_mutex);
391 * Remove a map private from list and deallocate resources if the mapping
394 * Searches the map on drm_device::maplist, removes it from the list, see if
395 * its being used, and free any associate resource (such as MTRR's) if it's not
398 * \sa drm_legacy_addmap
400 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
402 struct drm_map_list *r_list = NULL, *list_t;
403 drm_dma_handle_t dmah;
406 /* Find the list entry for the map and remove it */
407 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
408 if (r_list->map == map) {
409 list_del(&r_list->head);
421 drm_legacy_ioremapfree(map, dev);
423 case _DRM_FRAME_BUFFER:
424 arch_phys_wc_del(map->mtrr);
430 case _DRM_SCATTER_GATHER:
432 case _DRM_CONSISTENT:
433 dmah.vaddr = map->handle;
434 dmah.busaddr = map->offset;
435 dmah.size = map->size;
436 __drm_legacy_pci_free(dev, &dmah);
444 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
448 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
449 drm_core_check_feature(dev, DRIVER_MODESET))
452 mutex_lock(&dev->struct_mutex);
453 ret = drm_legacy_rmmap_locked(dev, map);
454 mutex_unlock(&dev->struct_mutex);
458 EXPORT_SYMBOL(drm_legacy_rmmap);
460 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
461 * the last close of the device, and this is necessary for cleanup when things
462 * exit uncleanly. Therefore, having userland manually remove mappings seems
463 * like a pointless exercise since they're going away anyway.
465 * One use case might be after addmap is allowed for normal users for SHM and
466 * gets used by drivers that the server doesn't need to care about. This seems
469 * \param inode device inode.
470 * \param file_priv DRM file private.
471 * \param cmd command.
472 * \param arg pointer to a struct drm_map structure.
473 * \return zero on success or a negative value on error.
475 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
476 struct drm_file *file_priv)
478 struct drm_map *request = data;
479 struct drm_local_map *map = NULL;
480 struct drm_map_list *r_list;
483 if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
484 drm_core_check_feature(dev, DRIVER_MODESET))
487 mutex_lock(&dev->struct_mutex);
488 list_for_each_entry(r_list, &dev->maplist, head) {
490 r_list->user_token == (unsigned long)request->handle &&
491 r_list->map->flags & _DRM_REMOVABLE) {
497 /* List has wrapped around to the head pointer, or its empty we didn't
500 if (list_empty(&dev->maplist) || !map) {
501 mutex_unlock(&dev->struct_mutex);
505 /* Register and framebuffer maps are permanent */
506 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
507 mutex_unlock(&dev->struct_mutex);
511 ret = drm_legacy_rmmap_locked(dev, map);
513 mutex_unlock(&dev->struct_mutex);
519 * Cleanup after an error on one of the addbufs() functions.
521 * \param dev DRM device.
522 * \param entry buffer entry where the error occurred.
524 * Frees any pages and buffers associated with the given entry.
526 static void drm_cleanup_buf_error(struct drm_device * dev,
527 struct drm_buf_entry * entry)
531 if (entry->seg_count) {
532 for (i = 0; i < entry->seg_count; i++) {
533 drm_pci_free(dev, entry->seglist[i]);
535 kfree(entry->seglist);
537 entry->seg_count = 0;
540 if (entry->buf_count) {
541 for (i = 0; i < entry->buf_count; i++) {
542 kfree(entry->buflist[i].dev_private);
544 kfree(entry->buflist);
546 entry->buf_count = 0;
550 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
552 struct drm_device_dma *dma = dev->dma;
553 struct drm_buf_entry *entry;
554 /* struct drm_agp_mem *agp_entry; */
557 unsigned long offset;
558 unsigned long agp_offset;
567 struct drm_buf **temp_buflist;
569 count = request->count;
570 order = order_base_2(request->size);
573 alignment = (request->flags & _DRM_PAGE_ALIGN)
574 ? round_page(size) : size;
575 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
576 total = PAGE_SIZE << page_order;
579 agp_offset = dev->agp->base + request->agp_start;
581 DRM_DEBUG("count: %d\n", count);
582 DRM_DEBUG("order: %d\n", order);
583 DRM_DEBUG("size: %d\n", size);
584 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
585 DRM_DEBUG("alignment: %d\n", alignment);
586 DRM_DEBUG("page_order: %d\n", page_order);
587 DRM_DEBUG("total: %d\n", total);
589 /* Make sure buffers are located in AGP memory that we own */
590 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
591 * memory. Safe to ignore for now because these ioctls are still
595 for (agp_entry = dev->agp->memory; agp_entry;
596 agp_entry = agp_entry->next) {
597 if ((agp_offset >= agp_entry->bound) &&
598 (agp_offset + total * count <=
599 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
605 DRM_DEBUG("zone invalid\n");
609 entry = &dma->bufs[order];
611 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
612 M_WAITOK | M_NULLOK | M_ZERO);
613 if (!entry->buflist) {
617 entry->buf_size = size;
618 entry->page_order = page_order;
622 while (entry->buf_count < count) {
623 buf = &entry->buflist[entry->buf_count];
624 buf->idx = dma->buf_count + entry->buf_count;
625 buf->total = alignment;
629 buf->offset = (dma->byte_count + offset);
630 buf->bus_address = agp_offset + offset;
631 buf->address = (void *)(agp_offset + offset);
634 buf->file_priv = NULL;
636 buf->dev_priv_size = dev->driver->dev_priv_size;
637 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
638 M_WAITOK | M_NULLOK | M_ZERO);
639 if (buf->dev_private == NULL) {
640 /* Set count correctly so we free the proper amount. */
641 entry->buf_count = count;
642 drm_cleanup_buf_error(dev, entry);
648 byte_count += PAGE_SIZE << page_order;
651 DRM_DEBUG("byte_count: %d\n", byte_count);
653 temp_buflist = krealloc(dma->buflist,
654 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
655 M_DRM, M_WAITOK | M_NULLOK);
656 if (temp_buflist == NULL) {
657 /* Free the entry because it isn't valid */
658 drm_cleanup_buf_error(dev, entry);
661 dma->buflist = temp_buflist;
663 for (i = 0; i < entry->buf_count; i++) {
664 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
667 dma->buf_count += entry->buf_count;
668 dma->byte_count += byte_count;
670 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
671 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
673 request->count = entry->buf_count;
674 request->size = size;
676 dma->flags = _DRM_DMA_USE_AGP;
681 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
683 struct drm_device_dma *dma = dev->dma;
689 struct drm_buf_entry *entry;
690 drm_dma_handle_t *dmah;
693 unsigned long offset;
697 unsigned long *temp_pagelist;
698 struct drm_buf **temp_buflist;
700 count = request->count;
701 order = order_base_2(request->size);
704 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
705 request->count, request->size, size, order);
707 alignment = (request->flags & _DRM_PAGE_ALIGN)
708 ? round_page(size) : size;
709 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 total = PAGE_SIZE << page_order;
712 entry = &dma->bufs[order];
714 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
715 M_WAITOK | M_NULLOK | M_ZERO);
716 entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
717 M_WAITOK | M_NULLOK | M_ZERO);
719 /* Keep the original pagelist until we know all the allocations
722 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
723 sizeof(*dma->pagelist),
724 M_DRM, M_WAITOK | M_NULLOK);
726 if (entry->buflist == NULL || entry->seglist == NULL ||
727 temp_pagelist == NULL) {
728 kfree(temp_pagelist);
729 kfree(entry->seglist);
730 kfree(entry->buflist);
734 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
735 sizeof(*dma->pagelist));
737 DRM_DEBUG("pagelist: %d entries\n",
738 dma->page_count + (count << page_order));
740 entry->buf_size = size;
741 entry->page_order = page_order;
745 while (entry->buf_count < count) {
746 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
749 /* Set count correctly so we free the proper amount. */
750 entry->buf_count = count;
751 entry->seg_count = count;
752 drm_cleanup_buf_error(dev, entry);
753 kfree(temp_pagelist);
756 entry->seglist[entry->seg_count++] = dmah;
757 for (i = 0; i < (1 << page_order); i++) {
758 DRM_DEBUG("page %d @ 0x%08lx\n",
759 dma->page_count + page_count,
760 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
761 temp_pagelist[dma->page_count + page_count++]
762 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
765 offset + size <= total && entry->buf_count < count;
766 offset += alignment, ++entry->buf_count) {
767 buf = &entry->buflist[entry->buf_count];
768 buf->idx = dma->buf_count + entry->buf_count;
769 buf->total = alignment;
772 buf->offset = (dma->byte_count + byte_count + offset);
773 buf->address = ((char *)dmah->vaddr + offset);
774 buf->bus_address = dmah->busaddr + offset;
777 buf->file_priv = NULL;
779 buf->dev_priv_size = dev->driver->dev_priv_size;
780 buf->dev_private = kmalloc(buf->dev_priv_size,
782 M_WAITOK | M_NULLOK |
784 if (buf->dev_private == NULL) {
785 /* Set count correctly so we free the proper amount. */
786 entry->buf_count = count;
787 entry->seg_count = count;
788 drm_cleanup_buf_error(dev, entry);
789 kfree(temp_pagelist);
793 DRM_DEBUG("buffer %d @ %p\n",
794 entry->buf_count, buf->address);
796 byte_count += PAGE_SIZE << page_order;
799 temp_buflist = krealloc(dma->buflist,
800 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
801 M_DRM, M_WAITOK | M_NULLOK);
802 if (temp_buflist == NULL) {
803 /* Free the entry because it isn't valid */
804 drm_cleanup_buf_error(dev, entry);
805 kfree(temp_pagelist);
808 dma->buflist = temp_buflist;
810 for (i = 0; i < entry->buf_count; i++) {
811 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
814 /* No allocations failed, so now we can replace the original pagelist
817 kfree(dma->pagelist);
818 dma->pagelist = temp_pagelist;
820 dma->buf_count += entry->buf_count;
821 dma->seg_count += entry->seg_count;
822 dma->page_count += entry->seg_count << page_order;
823 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
825 request->count = entry->buf_count;
826 request->size = size;
832 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
834 struct drm_device_dma *dma = dev->dma;
835 struct drm_buf_entry *entry;
837 unsigned long offset;
838 unsigned long agp_offset;
847 struct drm_buf **temp_buflist;
849 count = request->count;
850 order = order_base_2(request->size);
853 alignment = (request->flags & _DRM_PAGE_ALIGN)
854 ? round_page(size) : size;
855 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
856 total = PAGE_SIZE << page_order;
859 agp_offset = request->agp_start;
861 DRM_DEBUG("count: %d\n", count);
862 DRM_DEBUG("order: %d\n", order);
863 DRM_DEBUG("size: %d\n", size);
864 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
865 DRM_DEBUG("alignment: %d\n", alignment);
866 DRM_DEBUG("page_order: %d\n", page_order);
867 DRM_DEBUG("total: %d\n", total);
869 entry = &dma->bufs[order];
871 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
872 M_WAITOK | M_NULLOK | M_ZERO);
873 if (entry->buflist == NULL)
876 entry->buf_size = size;
877 entry->page_order = page_order;
881 while (entry->buf_count < count) {
882 buf = &entry->buflist[entry->buf_count];
883 buf->idx = dma->buf_count + entry->buf_count;
884 buf->total = alignment;
888 buf->offset = (dma->byte_count + offset);
889 buf->bus_address = agp_offset + offset;
890 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
893 buf->file_priv = NULL;
895 buf->dev_priv_size = dev->driver->dev_priv_size;
896 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
897 M_WAITOK | M_NULLOK | M_ZERO);
898 if (buf->dev_private == NULL) {
899 /* Set count correctly so we free the proper amount. */
900 entry->buf_count = count;
901 drm_cleanup_buf_error(dev, entry);
905 DRM_DEBUG("buffer %d @ %p\n",
906 entry->buf_count, buf->address);
910 byte_count += PAGE_SIZE << page_order;
913 DRM_DEBUG("byte_count: %d\n", byte_count);
915 temp_buflist = krealloc(dma->buflist,
916 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
917 M_DRM, M_WAITOK | M_NULLOK);
918 if (temp_buflist == NULL) {
919 /* Free the entry because it isn't valid */
920 drm_cleanup_buf_error(dev, entry);
923 dma->buflist = temp_buflist;
925 for (i = 0; i < entry->buf_count; i++) {
926 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
929 dma->buf_count += entry->buf_count;
930 dma->byte_count += byte_count;
932 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
933 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
935 request->count = entry->buf_count;
936 request->size = size;
938 dma->flags = _DRM_DMA_USE_SG;
944 * Add AGP buffers for DMA transfers.
946 * \param dev struct drm_device to which the buffers are to be added.
947 * \param request pointer to a struct drm_buf_desc describing the request.
948 * \return zero on success or a negative number on failure.
950 * After some sanity checks creates a drm_buf structure for each buffer and
951 * reallocates the buffer list of the same size order to accommodate the new
954 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
958 if (request->count < 0 || request->count > 4096)
961 order = order_base_2(request->size);
962 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
966 /* No more allocations after first buffer-using ioctl. */
967 if (dev->buf_use != 0) {
970 /* No more than one allocation per order */
971 if (dev->dma->bufs[order].buf_count != 0) {
975 ret = drm_do_addbufs_agp(dev, request);
980 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
984 if (!capable(CAP_SYS_ADMIN))
987 if (request->count < 0 || request->count > 4096)
990 order = order_base_2(request->size);
991 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
994 spin_lock(&dev->buf_lock);
996 spin_unlock(&dev->buf_lock);
999 atomic_inc(&dev->buf_alloc);
1000 spin_unlock(&dev->buf_lock);
1002 /* No more than one allocation per order */
1003 if (dev->dma->bufs[order].buf_count != 0) {
1007 ret = drm_do_addbufs_sg(dev, request);
1012 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
1016 if (!capable(CAP_SYS_ADMIN))
1019 if (request->count < 0 || request->count > 4096)
1022 order = order_base_2(request->size);
1023 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1026 spin_lock(&dev->buf_lock);
1028 spin_unlock(&dev->buf_lock);
1031 atomic_inc(&dev->buf_alloc);
1032 spin_unlock(&dev->buf_lock);
1034 /* No more allocations after first buffer-using ioctl. */
1035 if (dev->buf_use != 0) {
1038 /* No more than one allocation per order */
1039 if (dev->dma->bufs[order].buf_count != 0) {
1043 ret = drm_do_addbufs_pci(dev, request);
1049 * Add buffers for DMA transfers (ioctl).
1051 * \param inode device inode.
1052 * \param file_priv DRM file private.
1053 * \param cmd command.
1054 * \param arg pointer to a struct drm_buf_desc request.
1055 * \return zero on success or a negative number on failure.
1057 * According with the memory type specified in drm_buf_desc::flags and the
1058 * build options, it dispatches the call either to addbufs_agp(),
1059 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1060 * PCI memory respectively.
1062 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1063 struct drm_file *file_priv)
1065 struct drm_buf_desc *request = data;
1068 if (request->flags & _DRM_AGP_BUFFER)
1069 err = drm_legacy_addbufs_agp(dev, request);
1070 else if (request->flags & _DRM_SG_BUFFER)
1071 err = drm_legacy_addbufs_sg(dev, request);
1073 err = drm_legacy_addbufs_pci(dev, request);
1079 * Get information about the buffer mappings.
1081 * This was originally mean for debugging purposes, or by a sophisticated
1082 * client library to determine how best to use the available buffers (e.g.,
1083 * large buffers can be used for image transfer).
1085 * \param inode device inode.
1086 * \param file_priv DRM file private.
1087 * \param cmd command.
1088 * \param arg pointer to a drm_buf_info structure.
1089 * \return zero on success or a negative number on failure.
1091 * Increments drm_device::buf_use while holding the drm_device::buf_lock
1092 * lock, preventing of allocating more buffers after this call. Information
1093 * about each requested buffer is then copied into user space.
1095 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1096 struct drm_file *file_priv)
1098 struct drm_device_dma *dma = dev->dma;
1099 struct drm_buf_info *request = data;
1103 if (drm_core_check_feature(dev, DRIVER_MODESET))
1106 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1112 spin_lock(&dev->buf_lock);
1113 if (atomic_read(&dev->buf_alloc)) {
1114 spin_unlock(&dev->buf_lock);
1117 ++dev->buf_use; /* Can't allocate more after this call */
1118 spin_unlock(&dev->buf_lock);
1120 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1121 if (dma->bufs[i].buf_count)
1125 DRM_DEBUG("count = %d\n", count);
1127 if (request->count >= count) {
1128 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1129 if (dma->bufs[i].buf_count) {
1130 struct drm_buf_desc __user *to =
1131 &request->list[count];
1132 struct drm_buf_entry *from = &dma->bufs[i];
1133 if (copy_to_user(&to->count,
1135 sizeof(from->buf_count)) ||
1136 copy_to_user(&to->size,
1138 sizeof(from->buf_size)) ||
1139 copy_to_user(&to->low_mark,
1141 sizeof(from->low_mark)) ||
1142 copy_to_user(&to->high_mark,
1144 sizeof(from->high_mark)))
1147 DRM_DEBUG("%d %d %d %d %d\n",
1149 dma->bufs[i].buf_count,
1150 dma->bufs[i].buf_size,
1151 dma->bufs[i].low_mark,
1152 dma->bufs[i].high_mark);
1157 request->count = count;
1163 * Specifies a low and high water mark for buffer allocation
1165 * \param inode device inode.
1166 * \param file_priv DRM file private.
1167 * \param cmd command.
1168 * \param arg a pointer to a drm_buf_desc structure.
1169 * \return zero on success or a negative number on failure.
1171 * Verifies that the size order is bounded between the admissible orders and
1172 * updates the respective drm_device_dma::bufs entry low and high water mark.
1174 * \note This ioctl is deprecated and mostly never used.
1176 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1177 struct drm_file *file_priv)
1179 struct drm_device_dma *dma = dev->dma;
1180 struct drm_buf_desc *request = data;
1182 struct drm_buf_entry *entry;
1184 if (drm_core_check_feature(dev, DRIVER_MODESET))
1187 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1193 DRM_DEBUG("%d, %d, %d\n",
1194 request->size, request->low_mark, request->high_mark);
1195 order = order_base_2(request->size);
1196 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1198 entry = &dma->bufs[order];
1200 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1202 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1205 entry->low_mark = request->low_mark;
1206 entry->high_mark = request->high_mark;
1212 * Unreserve the buffers in list, previously reserved using drmDMA.
1214 * \param inode device inode.
1215 * \param file_priv DRM file private.
1216 * \param cmd command.
1217 * \param arg pointer to a drm_buf_free structure.
1218 * \return zero on success or a negative number on failure.
1220 * Calls free_buffer() for each used buffer.
1221 * This function is primarily used for debugging.
1223 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1224 struct drm_file *file_priv)
1226 struct drm_device_dma *dma = dev->dma;
1227 struct drm_buf_free *request = data;
1230 struct drm_buf *buf;
1233 DRM_DEBUG("%d\n", request->count);
1235 for (i = 0; i < request->count; i++) {
1236 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1240 if (idx < 0 || idx >= dma->buf_count) {
1241 DRM_ERROR("Index %d (of %d max)\n",
1242 idx, dma->buf_count - 1);
1246 buf = dma->buflist[idx];
1247 if (buf->file_priv != file_priv) {
1248 DRM_ERROR("Process %d freeing buffer not owned\n",
1253 drm_legacy_free_buffer(dev, buf);
1260 * Maps all of the DMA buffers into client-virtual space (ioctl).
1262 * \param inode device inode.
1263 * \param file_priv DRM file private.
1264 * \param cmd command.
1265 * \param arg pointer to a drm_buf_map structure.
1266 * \return zero on success or a negative number on failure.
1268 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1269 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1270 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1273 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv)
1276 struct drm_device_dma *dma = dev->dma;
1279 vm_offset_t address;
1280 struct vmspace *vms;
1284 struct drm_buf_map *request = data;
1287 vms = DRM_CURPROC->td_proc->p_vmspace;
1289 if (drm_core_check_feature(dev, DRIVER_MODESET))
1292 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1298 spin_lock(&dev->buf_lock);
1299 if (atomic_read(&dev->buf_alloc)) {
1300 spin_unlock(&dev->buf_lock);
1303 dev->buf_use++; /* Can't allocate more after this call */
1304 spin_unlock(&dev->buf_lock);
1306 if (request->count < dma->buf_count)
1309 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1310 (drm_core_check_feature(dev, DRIVER_SG) &&
1311 (dma->flags & _DRM_DMA_USE_SG))) {
1312 drm_local_map_t *map = dev->agp_buffer_map;
1318 size = round_page(map->size);
1319 foff = (unsigned long)map->handle;
1321 size = round_page(dma->byte_count),
1325 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1326 retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1327 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1328 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1332 request->virtual = (void *)vaddr;
1334 for (i = 0; i < dma->buf_count; i++) {
1335 if (copy_to_user(&request->list[i].idx,
1336 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1340 if (copy_to_user(&request->list[i].total,
1341 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1345 if (copy_to_user(&request->list[i].used, &zero,
1350 address = vaddr + dma->buflist[i]->offset; /* *** */
1351 if (copy_to_user(&request->list[i].address, &address,
1358 request->count = dma->buf_count;
1359 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1364 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1365 struct drm_file *file_priv)
1367 if (drm_core_check_feature(dev, DRIVER_MODESET))
1370 if (dev->driver->dma_ioctl)
1371 return dev->driver->dma_ioctl(dev, data, file_priv);
1376 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1378 struct drm_map_list *entry;
1380 list_for_each_entry(entry, &dev->maplist, head) {
1381 if (entry->map && entry->map->type == _DRM_SHM &&
1382 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1388 EXPORT_SYMBOL(drm_legacy_getsarea);