2 * Legacy: Generic DRM Buffer Management
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9 * Author: Gareth Hughes <gareth@valinux.com>
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
32 #include <bus/pci/pcireg.h>
33 #include <linux/types.h>
34 #include <linux/export.h>
36 #include "drm_legacy.h"
38 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
39 unsigned int size, enum drm_map_type type,
40 enum drm_map_flags flags, struct drm_local_map **map_ptr)
42 struct drm_local_map *map;
43 struct drm_map_list *entry = NULL;
44 drm_dma_handle_t *dmah;
46 /* Allocate a new map structure, fill it in, and do any type-specific
47 * initialization necessary.
49 map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
59 /* Only allow shared memory to be removable since we only keep enough
60 * book keeping information about shared memory to allow for removal
61 * when processes fork.
63 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
64 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
68 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
69 DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
70 (uintmax_t)offset, size);
74 if (offset + size < offset) {
75 DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
76 (uintmax_t)offset, size);
81 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
82 (unsigned long long)map->offset, map->size, map->type);
84 /* Check if this is just another version of a kernel-allocated map, and
85 * just hand that back if so.
87 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
89 list_for_each_entry(entry, &dev->maplist, head) {
90 if (entry->map->type == type && (entry->map->offset == offset ||
91 (entry->map->type == _DRM_SHM &&
92 entry->map->flags == _DRM_CONTAINS_LOCK))) {
93 entry->map->size = size;
94 DRM_DEBUG("Found kernel map %d\n", type);
102 map->handle = drm_ioremap(dev, map);
103 if (!(map->flags & _DRM_WRITE_COMBINING))
106 case _DRM_FRAME_BUFFER:
107 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
111 map->handle = kmalloc(map->size, M_DRM, M_WAITOK | M_NULLOK);
112 DRM_DEBUG("%lu %d %p\n",
113 map->size, order_base_2(map->size), map->handle);
115 drm_free(map, M_DRM);
118 map->offset = (unsigned long)map->handle;
119 if (map->flags & _DRM_CONTAINS_LOCK) {
120 /* Prevent a 2nd X Server from creating a 2nd lock */
122 if (dev->lock.hw_lock != NULL) {
124 drm_free(map->handle, M_DRM);
125 drm_free(map, M_DRM);
128 dev->lock.hw_lock = map->handle; /* Pointer to lock */
134 /* In some cases (i810 driver), user space may have already
135 * added the AGP base itself, because dev->agp->base previously
136 * only got set during AGP enable. So, only add the base
137 * address if the map's offset isn't already within the
140 if (map->offset < dev->agp->base ||
141 map->offset > dev->agp->base +
142 dev->agp->agp_info.ai_aperture_size - 1) {
143 map->offset += dev->agp->base;
145 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
146 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
147 if ((map->offset >= entry->bound) &&
148 (map->offset + map->size <=
149 entry->bound + entry->pages * PAGE_SIZE)) {
155 drm_free(map, M_DRM);
159 case _DRM_SCATTER_GATHER:
161 drm_free(map, M_DRM);
164 map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
165 map->offset = dev->sg->vaddr + offset;
167 case _DRM_CONSISTENT:
168 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
169 * As we're limiting the address to 2^32-1 (or less),
170 * casting it down to 32 bits is no problem, but we
171 * need to point to a 64bit variable first. */
172 dmah = drm_pci_alloc(dev, map->size, map->size);
177 map->handle = dmah->vaddr;
178 map->offset = dmah->busaddr;
181 DRM_ERROR("Bad map type %d\n", map->type);
182 drm_free(map, M_DRM);
186 list_add(&entry->head, &dev->maplist);
189 /* Jumped to, with lock held, when a kernel map is found. */
191 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
200 * Ioctl to specify a range of memory that is available for mapping by a
203 * \param inode device inode.
204 * \param file_priv DRM file private.
205 * \param cmd command.
206 * \param arg pointer to a drm_map structure.
207 * \return zero on success or a negative value on error.
210 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
211 struct drm_file *file_priv)
213 struct drm_map *request = data;
214 drm_local_map_t *map;
217 if (!(dev->flags & (FREAD|FWRITE)))
218 return -EACCES; /* Require read/write */
220 if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
224 err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
225 request->flags, &map);
230 request->offset = map->offset;
231 request->size = map->size;
232 request->type = map->type;
233 request->flags = map->flags;
234 request->mtrr = map->mtrr;
235 request->handle = (void *)map->handle;
241 * Remove a map private from list and deallocate resources if the mapping
244 * Searches the map on drm_device::maplist, removes it from the list, see if
245 * its being used, and free any associate resource (such as MTRR's) if it's not
248 * \sa drm_legacy_addmap
250 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
252 struct drm_map_list *r_list = NULL, *list_t;
253 drm_dma_handle_t dmah;
256 /* Find the list entry for the map and remove it */
257 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
258 if (r_list->map == map) {
259 list_del(&r_list->head);
271 drm_ioremapfree(map);
273 case _DRM_FRAME_BUFFER:
275 int __unused retcode;
277 retcode = drm_mtrr_del(0, map->offset, map->size,
279 DRM_DEBUG("mtrr_del = %d\n", retcode);
283 drm_free(map->handle, M_DRM);
286 case _DRM_SCATTER_GATHER:
288 case _DRM_CONSISTENT:
289 dmah.vaddr = map->handle;
290 dmah.busaddr = map->offset;
291 dmah.size = map->size;
292 __drm_legacy_pci_free(dev, &dmah);
300 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
304 mutex_lock(&dev->struct_mutex);
305 ret = drm_legacy_rmmap_locked(dev, map);
306 mutex_unlock(&dev->struct_mutex);
310 EXPORT_SYMBOL(drm_legacy_rmmap);
312 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
313 * the last close of the device, and this is necessary for cleanup when things
314 * exit uncleanly. Therefore, having userland manually remove mappings seems
315 * like a pointless exercise since they're going away anyway.
317 * One use case might be after addmap is allowed for normal users for SHM and
318 * gets used by drivers that the server doesn't need to care about. This seems
321 * \param inode device inode.
322 * \param file_priv DRM file private.
323 * \param cmd command.
324 * \param arg pointer to a struct drm_map structure.
325 * \return zero on success or a negative value on error.
327 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv)
330 struct drm_map *request = data;
331 struct drm_local_map *map = NULL;
332 struct drm_map_list *r_list;
335 list_for_each_entry(r_list, &dev->maplist, head) {
337 r_list->user_token == (unsigned long)request->handle &&
338 r_list->map->flags & _DRM_REMOVABLE) {
344 /* List has wrapped around to the head pointer, or its empty we didn't
347 if (list_empty(&dev->maplist) || !map) {
352 /* Register and framebuffer maps are permanent */
353 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
358 drm_legacy_rmmap(dev, map);
366 * Cleanup after an error on one of the addbufs() functions.
368 * \param dev DRM device.
369 * \param entry buffer entry where the error occurred.
371 * Frees any pages and buffers associated with the given entry.
373 static void drm_cleanup_buf_error(struct drm_device * dev,
374 struct drm_buf_entry * entry)
378 if (entry->seg_count) {
379 for (i = 0; i < entry->seg_count; i++) {
380 drm_pci_free(dev, entry->seglist[i]);
382 drm_free(entry->seglist, M_DRM);
384 entry->seg_count = 0;
387 if (entry->buf_count) {
388 for (i = 0; i < entry->buf_count; i++) {
389 drm_free(entry->buflist[i].dev_private, M_DRM);
391 drm_free(entry->buflist, M_DRM);
393 entry->buf_count = 0;
397 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
399 drm_device_dma_t *dma = dev->dma;
400 drm_buf_entry_t *entry;
401 /*drm_agp_mem_t *agp_entry;
404 unsigned long offset;
405 unsigned long agp_offset;
414 drm_buf_t **temp_buflist;
416 count = request->count;
417 order = order_base_2(request->size);
420 alignment = (request->flags & _DRM_PAGE_ALIGN)
421 ? round_page(size) : size;
422 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
423 total = PAGE_SIZE << page_order;
426 agp_offset = dev->agp->base + request->agp_start;
428 DRM_DEBUG("count: %d\n", count);
429 DRM_DEBUG("order: %d\n", order);
430 DRM_DEBUG("size: %d\n", size);
431 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
432 DRM_DEBUG("alignment: %d\n", alignment);
433 DRM_DEBUG("page_order: %d\n", page_order);
434 DRM_DEBUG("total: %d\n", total);
436 /* Make sure buffers are located in AGP memory that we own */
437 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
438 * memory. Safe to ignore for now because these ioctls are still
442 for (agp_entry = dev->agp->memory; agp_entry;
443 agp_entry = agp_entry->next) {
444 if ((agp_offset >= agp_entry->bound) &&
445 (agp_offset + total * count <=
446 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
452 DRM_DEBUG("zone invalid\n");
456 entry = &dma->bufs[order];
458 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
459 M_WAITOK | M_NULLOK | M_ZERO);
460 if (!entry->buflist) {
464 entry->buf_size = size;
465 entry->page_order = page_order;
469 while (entry->buf_count < count) {
470 buf = &entry->buflist[entry->buf_count];
471 buf->idx = dma->buf_count + entry->buf_count;
472 buf->total = alignment;
476 buf->offset = (dma->byte_count + offset);
477 buf->bus_address = agp_offset + offset;
478 buf->address = (void *)(agp_offset + offset);
481 buf->file_priv = NULL;
483 buf->dev_priv_size = dev->driver->dev_priv_size;
484 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
485 M_WAITOK | M_NULLOK | M_ZERO);
486 if (buf->dev_private == NULL) {
487 /* Set count correctly so we free the proper amount. */
488 entry->buf_count = count;
489 drm_cleanup_buf_error(dev, entry);
495 byte_count += PAGE_SIZE << page_order;
498 DRM_DEBUG("byte_count: %d\n", byte_count);
500 temp_buflist = krealloc(dma->buflist,
501 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
502 M_DRM, M_WAITOK | M_NULLOK);
503 if (temp_buflist == NULL) {
504 /* Free the entry because it isn't valid */
505 drm_cleanup_buf_error(dev, entry);
508 dma->buflist = temp_buflist;
510 for (i = 0; i < entry->buf_count; i++) {
511 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
514 dma->buf_count += entry->buf_count;
515 dma->byte_count += byte_count;
517 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
518 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
520 request->count = entry->buf_count;
521 request->size = size;
523 dma->flags = _DRM_DMA_USE_AGP;
528 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
530 drm_device_dma_t *dma = dev->dma;
536 drm_buf_entry_t *entry;
537 drm_dma_handle_t *dmah;
540 unsigned long offset;
544 unsigned long *temp_pagelist;
545 drm_buf_t **temp_buflist;
547 count = request->count;
548 order = order_base_2(request->size);
551 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
552 request->count, request->size, size, order);
554 alignment = (request->flags & _DRM_PAGE_ALIGN)
555 ? round_page(size) : size;
556 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
557 total = PAGE_SIZE << page_order;
559 entry = &dma->bufs[order];
561 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
562 M_WAITOK | M_NULLOK | M_ZERO);
563 entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
564 M_WAITOK | M_NULLOK | M_ZERO);
566 /* Keep the original pagelist until we know all the allocations
569 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
570 sizeof(*dma->pagelist),
571 M_DRM, M_WAITOK | M_NULLOK);
573 if (entry->buflist == NULL || entry->seglist == NULL ||
574 temp_pagelist == NULL) {
575 drm_free(temp_pagelist, M_DRM);
576 drm_free(entry->seglist, M_DRM);
577 drm_free(entry->buflist, M_DRM);
581 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
582 sizeof(*dma->pagelist));
584 DRM_DEBUG("pagelist: %d entries\n",
585 dma->page_count + (count << page_order));
587 entry->buf_size = size;
588 entry->page_order = page_order;
592 while (entry->buf_count < count) {
593 spin_unlock(&dev->dma_lock);
594 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
595 spin_lock(&dev->dma_lock);
598 /* Set count correctly so we free the proper amount. */
599 entry->buf_count = count;
600 entry->seg_count = count;
601 drm_cleanup_buf_error(dev, entry);
602 drm_free(temp_pagelist, M_DRM);
606 entry->seglist[entry->seg_count++] = dmah;
607 for (i = 0; i < (1 << page_order); i++) {
608 DRM_DEBUG("page %d @ 0x%08lx\n",
609 dma->page_count + page_count,
610 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
611 temp_pagelist[dma->page_count + page_count++]
612 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
615 offset + size <= total && entry->buf_count < count;
616 offset += alignment, ++entry->buf_count) {
617 buf = &entry->buflist[entry->buf_count];
618 buf->idx = dma->buf_count + entry->buf_count;
619 buf->total = alignment;
622 buf->offset = (dma->byte_count + byte_count + offset);
623 buf->address = ((char *)dmah->vaddr + offset);
624 buf->bus_address = dmah->busaddr + offset;
627 buf->file_priv = NULL;
629 buf->dev_priv_size = dev->driver->dev_priv_size;
630 buf->dev_private = kmalloc(buf->dev_priv_size,
632 M_WAITOK | M_NULLOK |
634 if (buf->dev_private == NULL) {
635 /* Set count correctly so we free the proper amount. */
636 entry->buf_count = count;
637 entry->seg_count = count;
638 drm_cleanup_buf_error(dev, entry);
639 drm_free(temp_pagelist, M_DRM);
643 DRM_DEBUG("buffer %d @ %p\n",
644 entry->buf_count, buf->address);
646 byte_count += PAGE_SIZE << page_order;
649 temp_buflist = krealloc(dma->buflist,
650 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
651 M_DRM, M_WAITOK | M_NULLOK);
652 if (temp_buflist == NULL) {
653 /* Free the entry because it isn't valid */
654 drm_cleanup_buf_error(dev, entry);
655 drm_free(temp_pagelist, M_DRM);
658 dma->buflist = temp_buflist;
660 for (i = 0; i < entry->buf_count; i++) {
661 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
664 /* No allocations failed, so now we can replace the orginal pagelist
667 drm_free(dma->pagelist, M_DRM);
668 dma->pagelist = temp_pagelist;
670 dma->buf_count += entry->buf_count;
671 dma->seg_count += entry->seg_count;
672 dma->page_count += entry->seg_count << page_order;
673 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
675 request->count = entry->buf_count;
676 request->size = size;
682 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
684 drm_device_dma_t *dma = dev->dma;
685 drm_buf_entry_t *entry;
687 unsigned long offset;
688 unsigned long agp_offset;
697 drm_buf_t **temp_buflist;
699 count = request->count;
700 order = order_base_2(request->size);
703 alignment = (request->flags & _DRM_PAGE_ALIGN)
704 ? round_page(size) : size;
705 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
706 total = PAGE_SIZE << page_order;
709 agp_offset = request->agp_start;
711 DRM_DEBUG("count: %d\n", count);
712 DRM_DEBUG("order: %d\n", order);
713 DRM_DEBUG("size: %d\n", size);
714 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
715 DRM_DEBUG("alignment: %d\n", alignment);
716 DRM_DEBUG("page_order: %d\n", page_order);
717 DRM_DEBUG("total: %d\n", total);
719 entry = &dma->bufs[order];
721 entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
722 M_WAITOK | M_NULLOK | M_ZERO);
723 if (entry->buflist == NULL)
726 entry->buf_size = size;
727 entry->page_order = page_order;
731 while (entry->buf_count < count) {
732 buf = &entry->buflist[entry->buf_count];
733 buf->idx = dma->buf_count + entry->buf_count;
734 buf->total = alignment;
738 buf->offset = (dma->byte_count + offset);
739 buf->bus_address = agp_offset + offset;
740 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
743 buf->file_priv = NULL;
745 buf->dev_priv_size = dev->driver->dev_priv_size;
746 buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
747 M_WAITOK | M_NULLOK | M_ZERO);
748 if (buf->dev_private == NULL) {
749 /* Set count correctly so we free the proper amount. */
750 entry->buf_count = count;
751 drm_cleanup_buf_error(dev, entry);
755 DRM_DEBUG("buffer %d @ %p\n",
756 entry->buf_count, buf->address);
760 byte_count += PAGE_SIZE << page_order;
763 DRM_DEBUG("byte_count: %d\n", byte_count);
765 temp_buflist = krealloc(dma->buflist,
766 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
767 M_DRM, M_WAITOK | M_NULLOK);
768 if (temp_buflist == NULL) {
769 /* Free the entry because it isn't valid */
770 drm_cleanup_buf_error(dev, entry);
773 dma->buflist = temp_buflist;
775 for (i = 0; i < entry->buf_count; i++) {
776 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
779 dma->buf_count += entry->buf_count;
780 dma->byte_count += byte_count;
782 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
783 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
785 request->count = entry->buf_count;
786 request->size = size;
788 dma->flags = _DRM_DMA_USE_SG;
794 * Add AGP buffers for DMA transfers.
796 * \param dev struct drm_device to which the buffers are to be added.
797 * \param request pointer to a struct drm_buf_desc describing the request.
798 * \return zero on success or a negative number on failure.
800 * After some sanity checks creates a drm_buf structure for each buffer and
801 * reallocates the buffer list of the same size order to accommodate the new
804 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
808 if (request->count < 0 || request->count > 4096)
811 order = order_base_2(request->size);
812 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
815 spin_lock(&dev->dma_lock);
817 /* No more allocations after first buffer-using ioctl. */
818 if (dev->buf_use != 0) {
819 spin_unlock(&dev->dma_lock);
822 /* No more than one allocation per order */
823 if (dev->dma->bufs[order].buf_count != 0) {
824 spin_unlock(&dev->dma_lock);
828 ret = drm_do_addbufs_agp(dev, request);
830 spin_unlock(&dev->dma_lock);
835 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
839 if (!capable(CAP_SYS_ADMIN))
842 if (request->count < 0 || request->count > 4096)
845 order = order_base_2(request->size);
846 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
849 spin_lock(&dev->dma_lock);
851 /* No more allocations after first buffer-using ioctl. */
852 if (dev->buf_use != 0) {
853 spin_unlock(&dev->dma_lock);
856 /* No more than one allocation per order */
857 if (dev->dma->bufs[order].buf_count != 0) {
858 spin_unlock(&dev->dma_lock);
862 ret = drm_do_addbufs_sg(dev, request);
864 spin_unlock(&dev->dma_lock);
869 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
873 if (!capable(CAP_SYS_ADMIN))
876 if (request->count < 0 || request->count > 4096)
879 order = order_base_2(request->size);
880 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
883 spin_lock(&dev->dma_lock);
885 /* No more allocations after first buffer-using ioctl. */
886 if (dev->buf_use != 0) {
887 spin_unlock(&dev->dma_lock);
890 /* No more than one allocation per order */
891 if (dev->dma->bufs[order].buf_count != 0) {
892 spin_unlock(&dev->dma_lock);
896 ret = drm_do_addbufs_pci(dev, request);
898 spin_unlock(&dev->dma_lock);
904 * Add buffers for DMA transfers (ioctl).
906 * \param inode device inode.
907 * \param file_priv DRM file private.
908 * \param cmd command.
909 * \param arg pointer to a struct drm_buf_desc request.
910 * \return zero on success or a negative number on failure.
912 * According with the memory type specified in drm_buf_desc::flags and the
913 * build options, it dispatches the call either to addbufs_agp(),
914 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
915 * PCI memory respectively.
917 int drm_legacy_addbufs(struct drm_device *dev, void *data,
918 struct drm_file *file_priv)
920 struct drm_buf_desc *request = data;
923 if (request->flags & _DRM_AGP_BUFFER)
924 err = drm_legacy_addbufs_agp(dev, request);
925 else if (request->flags & _DRM_SG_BUFFER)
926 err = drm_legacy_addbufs_sg(dev, request);
928 err = drm_legacy_addbufs_pci(dev, request);
934 * Get information about the buffer mappings.
936 * This was originally mean for debugging purposes, or by a sophisticated
937 * client library to determine how best to use the available buffers (e.g.,
938 * large buffers can be used for image transfer).
940 * \param inode device inode.
941 * \param file_priv DRM file private.
942 * \param cmd command.
943 * \param arg pointer to a drm_buf_info structure.
944 * \return zero on success or a negative number on failure.
946 * Increments drm_device::buf_use while holding the drm_device::buf_lock
947 * lock, preventing of allocating more buffers after this call. Information
948 * about each requested buffer is then copied into user space.
950 int drm_legacy_infobufs(struct drm_device *dev, void *data,
951 struct drm_file *file_priv)
953 struct drm_device_dma *dma = dev->dma;
954 struct drm_buf_info *request = data;
958 if (drm_core_check_feature(dev, DRIVER_MODESET))
961 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
967 spin_lock(&dev->buf_lock);
968 if (atomic_read(&dev->buf_alloc)) {
969 spin_unlock(&dev->buf_lock);
972 ++dev->buf_use; /* Can't allocate more after this call */
973 spin_unlock(&dev->buf_lock);
975 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
976 if (dma->bufs[i].buf_count)
980 DRM_DEBUG("count = %d\n", count);
982 if (request->count >= count) {
983 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
984 if (dma->bufs[i].buf_count) {
985 struct drm_buf_desc __user *to =
986 &request->list[count];
987 struct drm_buf_entry *from = &dma->bufs[i];
988 if (copy_to_user(&to->count,
990 sizeof(from->buf_count)) ||
991 copy_to_user(&to->size,
993 sizeof(from->buf_size)) ||
994 copy_to_user(&to->low_mark,
996 sizeof(from->low_mark)) ||
997 copy_to_user(&to->high_mark,
999 sizeof(from->high_mark)))
1002 DRM_DEBUG("%d %d %d %d %d\n",
1004 dma->bufs[i].buf_count,
1005 dma->bufs[i].buf_size,
1006 dma->bufs[i].low_mark,
1007 dma->bufs[i].high_mark);
1012 request->count = count;
1018 * Specifies a low and high water mark for buffer allocation
1020 * \param inode device inode.
1021 * \param file_priv DRM file private.
1022 * \param cmd command.
1023 * \param arg a pointer to a drm_buf_desc structure.
1024 * \return zero on success or a negative number on failure.
1026 * Verifies that the size order is bounded between the admissible orders and
1027 * updates the respective drm_device_dma::bufs entry low and high water mark.
1029 * \note This ioctl is deprecated and mostly never used.
1031 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1032 struct drm_file *file_priv)
1034 struct drm_device_dma *dma = dev->dma;
1035 struct drm_buf_desc *request = data;
1037 struct drm_buf_entry *entry;
1039 if (drm_core_check_feature(dev, DRIVER_MODESET))
1042 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1048 DRM_DEBUG("%d, %d, %d\n",
1049 request->size, request->low_mark, request->high_mark);
1050 order = order_base_2(request->size);
1051 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1053 entry = &dma->bufs[order];
1055 if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1057 if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1060 entry->low_mark = request->low_mark;
1061 entry->high_mark = request->high_mark;
1067 * Unreserve the buffers in list, previously reserved using drmDMA.
1069 * \param inode device inode.
1070 * \param file_priv DRM file private.
1071 * \param cmd command.
1072 * \param arg pointer to a drm_buf_free structure.
1073 * \return zero on success or a negative number on failure.
1075 * Calls free_buffer() for each used buffer.
1076 * This function is primarily used for debugging.
1078 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv)
1081 drm_device_dma_t *dma = dev->dma;
1082 struct drm_buf_free *request = data;
1088 DRM_DEBUG("%d\n", request->count);
1090 spin_lock(&dev->dma_lock);
1091 for (i = 0; i < request->count; i++) {
1092 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1096 if (idx < 0 || idx >= dma->buf_count) {
1097 DRM_ERROR("Index %d (of %d max)\n",
1098 idx, dma->buf_count - 1);
1102 buf = dma->buflist[idx];
1103 if (buf->file_priv != file_priv) {
1104 DRM_ERROR("Process %d freeing buffer not owned\n",
1109 drm_legacy_free_buffer(dev, buf);
1111 spin_unlock(&dev->dma_lock);
1117 * Maps all of the DMA buffers into client-virtual space (ioctl).
1119 * \param inode device inode.
1120 * \param file_priv DRM file private.
1121 * \param cmd command.
1122 * \param arg pointer to a drm_buf_map structure.
1123 * \return zero on success or a negative number on failure.
1125 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1126 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1127 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1130 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1131 struct drm_file *file_priv)
1133 drm_device_dma_t *dma = dev->dma;
1136 vm_offset_t address;
1137 struct vmspace *vms;
1141 struct drm_buf_map *request = data;
1144 vms = DRM_CURPROC->td_proc->p_vmspace;
1146 spin_lock(&dev->dma_lock);
1147 dev->buf_use++; /* Can't allocate more after this call */
1148 spin_unlock(&dev->dma_lock);
1150 if (request->count < dma->buf_count)
1153 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1154 (drm_core_check_feature(dev, DRIVER_SG) &&
1155 (dma->flags & _DRM_DMA_USE_SG))) {
1156 drm_local_map_t *map = dev->agp_buffer_map;
1162 size = round_page(map->size);
1163 foff = (unsigned long)map->handle;
1165 size = round_page(dma->byte_count),
1169 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1170 retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1171 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1172 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1176 request->virtual = (void *)vaddr;
1178 for (i = 0; i < dma->buf_count; i++) {
1179 if (copy_to_user(&request->list[i].idx,
1180 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1184 if (copy_to_user(&request->list[i].total,
1185 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1189 if (copy_to_user(&request->list[i].used, &zero,
1194 address = vaddr + dma->buflist[i]->offset; /* *** */
1195 if (copy_to_user(&request->list[i].address, &address,
1202 request->count = dma->buf_count;
1203 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1208 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1209 struct drm_file *file_priv)
1211 if (drm_core_check_feature(dev, DRIVER_MODESET))
1214 if (dev->driver->dma_ioctl)
1215 return dev->driver->dma_ioctl(dev, data, file_priv);
1220 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1222 struct drm_map_list *entry;
1224 list_for_each_entry(entry, &dev->maplist, head) {
1225 if (entry->map && entry->map->type == _DRM_SHM &&
1226 (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1232 EXPORT_SYMBOL(drm_legacy_getsarea);