drm: Rename device to dev in struct drm_device
[dragonfly.git] / sys / dev / drm / drm_bufs.c
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
30  */
31
32 /** @file drm_bufs.c
33  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
34  */
35
36 #include <sys/conf.h>
37 #include <bus/pci/pcireg.h>
38
39 #include <drm/drmP.h>
40
41 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
43  * address for accessing them.  Cleaned up at unload.
44  */
45 static int drm_alloc_resource(struct drm_device *dev, int resource)
46 {
47         struct resource *res;
48         int rid;
49
50         DRM_LOCK_ASSERT(dev);
51
52         if (resource >= DRM_MAX_PCI_RESOURCE) {
53                 DRM_ERROR("Resource %d too large\n", resource);
54                 return 1;
55         }
56
57         if (dev->pcir[resource] != NULL) {
58                 return 0;
59         }
60
61         DRM_UNLOCK(dev);
62         rid = PCIR_BAR(resource);
63         res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
64             RF_SHAREABLE);
65         DRM_LOCK(dev);
66         if (res == NULL) {
67                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
68                 return 1;
69         }
70
71         if (dev->pcir[resource] == NULL) {
72                 dev->pcirid[resource] = rid;
73                 dev->pcir[resource] = res;
74         }
75
76         return 0;
77 }
78
79 unsigned long drm_get_resource_start(struct drm_device *dev,
80                                      unsigned int resource)
81 {
82         if (drm_alloc_resource(dev, resource) != 0)
83                 return 0;
84
85         return rman_get_start(dev->pcir[resource]);
86 }
87
88 unsigned long drm_get_resource_len(struct drm_device *dev,
89                                    unsigned int resource)
90 {
91         if (drm_alloc_resource(dev, resource) != 0)
92                 return 0;
93
94         return rman_get_size(dev->pcir[resource]);
95 }
96
97 int drm_addmap(struct drm_device * dev, unsigned long offset,
98                unsigned long size,
99     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
100 {
101         struct drm_local_map *map;
102         struct drm_map_list *entry = NULL;
103         int align;
104
105         /* Allocate a new map structure, fill it in, and do any type-specific
106          * initialization necessary.
107          */
108         map = kmalloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
109         if (!map) {
110                 return ENOMEM;
111         }
112
113         map->offset = offset;
114         map->size = size;
115         map->type = type;
116         map->flags = flags;
117         map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
118             DRM_MAP_HANDLE_SHIFT);
119
120         /* Only allow shared memory to be removable since we only keep enough
121          * book keeping information about shared memory to allow for removal
122          * when processes fork.
123          */
124         if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
125                 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
126                 drm_free(map, DRM_MEM_MAPS);
127                 return EINVAL;
128         }
129         if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
130                 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
131                     offset, size);
132                 drm_free(map, DRM_MEM_MAPS);
133                 return EINVAL;
134         }
135         if (offset + size < offset) {
136                 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
137                     offset, size);
138                 drm_free(map, DRM_MEM_MAPS);
139                 return EINVAL;
140         }
141
142         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
143             size, type);
144
145         /* Check if this is just another version of a kernel-allocated map, and
146          * just hand that back if so.
147          */
148         if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
149             type == _DRM_SHM) {
150                 list_for_each_entry(entry, &dev->maplist, head) {
151                         if (entry->map->type == type && (entry->map->offset == offset ||
152                             (entry->map->type == _DRM_SHM &&
153                             entry->map->flags == _DRM_CONTAINS_LOCK))) {
154                                 entry->map->size = size;
155                                 DRM_DEBUG("Found kernel map %d\n", type);
156                                 goto done;
157                         }
158                 }
159         }
160
161         switch (map->type) {
162         case _DRM_REGISTERS:
163                 map->virtual = drm_ioremap(dev, map);
164                 if (!(map->flags & _DRM_WRITE_COMBINING))
165                         break;
166                 /* FALLTHROUGH */
167         case _DRM_FRAME_BUFFER:
168                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
169                         map->mtrr = 1;
170                 break;
171         case _DRM_SHM:
172                 map->virtual = kmalloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
173                 DRM_DEBUG("%lu %d %p\n",
174                     map->size, drm_order(map->size), map->virtual);
175                 if (!map->virtual) {
176                         drm_free(map, DRM_MEM_MAPS);
177                         return ENOMEM;
178                 }
179                 map->offset = (unsigned long)map->virtual;
180                 if (map->flags & _DRM_CONTAINS_LOCK) {
181                         /* Prevent a 2nd X Server from creating a 2nd lock */
182                         DRM_LOCK(dev);
183                         if (dev->lock.hw_lock != NULL) {
184                                 DRM_UNLOCK(dev);
185                                 drm_free(map->virtual, DRM_MEM_MAPS);
186                                 drm_free(map, DRM_MEM_MAPS);
187                                 return EBUSY;
188                         }
189                         dev->lock.hw_lock = map->virtual; /* Pointer to lock */
190                         DRM_UNLOCK(dev);
191                 }
192                 break;
193         case _DRM_AGP:
194                 /*valid = 0;*/
195                 /* In some cases (i810 driver), user space may have already
196                  * added the AGP base itself, because dev->agp->base previously
197                  * only got set during AGP enable.  So, only add the base
198                  * address if the map's offset isn't already within the
199                  * aperture.
200                  */
201                 if (map->offset < dev->agp->base ||
202                     map->offset > dev->agp->base +
203                     dev->agp->agp_info.ai_aperture_size - 1) {
204                         map->offset += dev->agp->base;
205                 }
206                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
207                 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
208                         if ((map->offset >= entry->bound) &&
209                             (map->offset + map->size <=
210                             entry->bound + entry->pages * PAGE_SIZE)) {
211                                 valid = 1;
212                                 break;
213                         }
214                 }
215                 if (!valid) {
216                         drm_free(map, DRM_MEM_MAPS);
217                         return EACCES;
218                 }*/
219                 break;
220         case _DRM_SCATTER_GATHER:
221                 if (!dev->sg) {
222                         drm_free(map, DRM_MEM_MAPS);
223                         return EINVAL;
224                 }
225                 map->virtual = (void *)(dev->sg->vaddr + offset);
226                 map->offset = dev->sg->vaddr + offset;
227                 break;
228         case _DRM_CONSISTENT:
229                 /* Unfortunately, we don't get any alignment specification from
230                  * the caller, so we have to guess.  drm_pci_alloc requires
231                  * a power-of-two alignment, so try to align the bus address of
232                  * the map to it size if possible, otherwise just assume
233                  * PAGE_SIZE alignment.
234                  */
235                 align = map->size;
236                 if ((align & (align - 1)) != 0)
237                         align = PAGE_SIZE;
238                 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
239                 if (map->dmah == NULL) {
240                         drm_free(map, DRM_MEM_MAPS);
241                         return ENOMEM;
242                 }
243                 map->virtual = map->dmah->vaddr;
244                 map->offset = map->dmah->busaddr;
245                 break;
246         default:
247                 DRM_ERROR("Bad map type %d\n", map->type);
248                 drm_free(map, DRM_MEM_MAPS);
249                 return EINVAL;
250         }
251
252         list_add(&entry->head, &dev->maplist);
253
254 done:
255         /* Jumped to, with lock held, when a kernel map is found. */
256
257         DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
258             map->size);
259
260         *map_ptr = map;
261
262         return 0;
263 }
264
265 int drm_addmap_ioctl(struct drm_device *dev, void *data,
266                      struct drm_file *file_priv)
267 {
268         struct drm_map *request = data;
269         drm_local_map_t *map;
270         int err;
271
272         if (!(dev->flags & (FREAD|FWRITE)))
273                 return EACCES; /* Require read/write */
274
275         if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
276                 return EACCES;
277
278         DRM_LOCK(dev);
279         err = drm_addmap(dev, request->offset, request->size, request->type,
280             request->flags, &map);
281         DRM_UNLOCK(dev);
282         if (err != 0)
283                 return err;
284
285         request->offset = map->offset;
286         request->size = map->size;
287         request->type = map->type;
288         request->flags = map->flags;
289         request->mtrr   = map->mtrr;
290         request->handle = (void *)map->handle;
291
292         return 0;
293 }
294
295 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
296 {
297         struct drm_map_list *r_list = NULL, *list_t;
298         int found = 0;
299
300         DRM_LOCK_ASSERT(dev);
301
302         if (map == NULL)
303                 return;
304
305         /* Find the list entry for the map and remove it */
306         list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
307                 if (r_list->map == map) {
308                         list_del(&r_list->head);
309                         drm_free(r_list, DRM_MEM_DRIVER);
310                         found = 1;
311                         break;
312                 }
313         }
314
315         if (!found)
316                 return;
317
318         switch (map->type) {
319         case _DRM_REGISTERS:
320                 if (map->bsr == NULL)
321                         drm_ioremapfree(map);
322                 /* FALLTHROUGH */
323         case _DRM_FRAME_BUFFER:
324                 if (map->mtrr) {
325                         int __unused retcode;
326                         
327                         retcode = drm_mtrr_del(0, map->offset, map->size,
328                             DRM_MTRR_WC);
329                         DRM_DEBUG("mtrr_del = %d\n", retcode);
330                 }
331                 break;
332         case _DRM_SHM:
333                 drm_free(map->virtual, DRM_MEM_MAPS);
334                 break;
335         case _DRM_AGP:
336         case _DRM_SCATTER_GATHER:
337                 break;
338         case _DRM_CONSISTENT:
339                 drm_pci_free(dev, map->dmah);
340                 break;
341         default:
342                 DRM_ERROR("Bad map type %d\n", map->type);
343                 break;
344         }
345
346         if (map->bsr != NULL) {
347                 bus_release_resource(dev->dev, SYS_RES_MEMORY, map->rid,
348                     map->bsr);
349         }
350
351         DRM_UNLOCK(dev);
352         if (map->handle)
353                 free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
354                     DRM_MAP_HANDLE_SHIFT);
355         DRM_LOCK(dev);
356
357         drm_free(map, DRM_MEM_MAPS);
358 }
359
360 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
361  * the last close of the device, and this is necessary for cleanup when things
362  * exit uncleanly.  Therefore, having userland manually remove mappings seems
363  * like a pointless exercise since they're going away anyway.
364  *
365  * One use case might be after addmap is allowed for normal users for SHM and
366  * gets used by drivers that the server doesn't need to care about.  This seems
367  * unlikely.
368  *
369  * \param inode device inode.
370  * \param file_priv DRM file private.
371  * \param cmd command.
372  * \param arg pointer to a struct drm_map structure.
373  * \return zero on success or a negative value on error.
374  */
375 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
376                     struct drm_file *file_priv)
377 {
378         struct drm_map *request = data;
379         struct drm_local_map *map = NULL;
380         struct drm_map_list *r_list;
381
382         DRM_LOCK(dev);
383         list_for_each_entry(r_list, &dev->maplist, head) {
384                 if (r_list->map &&
385                     r_list->user_token == (unsigned long)request->handle &&
386                     r_list->map->flags & _DRM_REMOVABLE) {
387                         map = r_list->map;
388                         break;
389                 }
390         }
391
392         /* List has wrapped around to the head pointer, or its empty we didn't
393          * find anything.
394          */
395         if (list_empty(&dev->maplist) || !map) {
396                 DRM_UNLOCK(dev);
397                 return -EINVAL;
398         }
399
400         /* Register and framebuffer maps are permanent */
401         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
402                 DRM_UNLOCK(dev);
403                 return 0;
404         }
405
406         drm_rmmap(dev, map);
407
408         DRM_UNLOCK(dev);
409
410         return 0;
411 }
412
413
414 static void drm_cleanup_buf_error(struct drm_device *dev,
415                                   drm_buf_entry_t *entry)
416 {
417         int i;
418
419         if (entry->seg_count) {
420                 for (i = 0; i < entry->seg_count; i++) {
421                         drm_pci_free(dev, entry->seglist[i]);
422                 }
423                 drm_free(entry->seglist, DRM_MEM_SEGS);
424
425                 entry->seg_count = 0;
426         }
427
428         if (entry->buf_count) {
429                 for (i = 0; i < entry->buf_count; i++) {
430                         drm_free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
431                 }
432                 drm_free(entry->buflist, DRM_MEM_BUFS);
433
434                 entry->buf_count = 0;
435         }
436 }
437
438 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
439 {
440         drm_device_dma_t *dma = dev->dma;
441         drm_buf_entry_t *entry;
442         /*drm_agp_mem_t *agp_entry;
443         int valid*/
444         drm_buf_t *buf;
445         unsigned long offset;
446         unsigned long agp_offset;
447         int count;
448         int order;
449         int size;
450         int alignment;
451         int page_order;
452         int total;
453         int byte_count;
454         int i;
455         drm_buf_t **temp_buflist;
456
457         count = request->count;
458         order = drm_order(request->size);
459         size = 1 << order;
460
461         alignment  = (request->flags & _DRM_PAGE_ALIGN)
462             ? round_page(size) : size;
463         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
464         total = PAGE_SIZE << page_order;
465
466         byte_count = 0;
467         agp_offset = dev->agp->base + request->agp_start;
468
469         DRM_DEBUG("count:      %d\n",  count);
470         DRM_DEBUG("order:      %d\n",  order);
471         DRM_DEBUG("size:       %d\n",  size);
472         DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
473         DRM_DEBUG("alignment:  %d\n",  alignment);
474         DRM_DEBUG("page_order: %d\n",  page_order);
475         DRM_DEBUG("total:      %d\n",  total);
476
477         /* Make sure buffers are located in AGP memory that we own */
478         /* Breaks MGA due to drm_alloc_agp not setting up entries for the
479          * memory.  Safe to ignore for now because these ioctls are still
480          * root-only.
481          */
482         /*valid = 0;
483         for (agp_entry = dev->agp->memory; agp_entry;
484             agp_entry = agp_entry->next) {
485                 if ((agp_offset >= agp_entry->bound) &&
486                     (agp_offset + total * count <=
487                     agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
488                         valid = 1;
489                         break;
490                 }
491         }
492         if (!valid) {
493                 DRM_DEBUG("zone invalid\n");
494                 return EINVAL;
495         }*/
496
497         entry = &dma->bufs[order];
498
499         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
500             M_NOWAIT | M_ZERO);
501         if (!entry->buflist) {
502                 return ENOMEM;
503         }
504
505         entry->buf_size = size;
506         entry->page_order = page_order;
507
508         offset = 0;
509
510         while (entry->buf_count < count) {
511                 buf          = &entry->buflist[entry->buf_count];
512                 buf->idx     = dma->buf_count + entry->buf_count;
513                 buf->total   = alignment;
514                 buf->order   = order;
515                 buf->used    = 0;
516
517                 buf->offset  = (dma->byte_count + offset);
518                 buf->bus_address = agp_offset + offset;
519                 buf->address = (void *)(agp_offset + offset);
520                 buf->next    = NULL;
521                 buf->pending = 0;
522                 buf->file_priv = NULL;
523
524                 buf->dev_priv_size = dev->driver->buf_priv_size;
525                 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
526                     M_NOWAIT | M_ZERO);
527                 if (buf->dev_private == NULL) {
528                         /* Set count correctly so we free the proper amount. */
529                         entry->buf_count = count;
530                         drm_cleanup_buf_error(dev, entry);
531                         return ENOMEM;
532                 }
533
534                 offset += alignment;
535                 entry->buf_count++;
536                 byte_count += PAGE_SIZE << page_order;
537         }
538
539         DRM_DEBUG("byte_count: %d\n", byte_count);
540
541         temp_buflist = krealloc(dma->buflist,
542             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
543             DRM_MEM_BUFS, M_NOWAIT);
544         if (temp_buflist == NULL) {
545                 /* Free the entry because it isn't valid */
546                 drm_cleanup_buf_error(dev, entry);
547                 return ENOMEM;
548         }
549         dma->buflist = temp_buflist;
550
551         for (i = 0; i < entry->buf_count; i++) {
552                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
553         }
554
555         dma->buf_count += entry->buf_count;
556         dma->byte_count += byte_count;
557
558         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
559         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
560
561         request->count = entry->buf_count;
562         request->size = size;
563
564         dma->flags = _DRM_DMA_USE_AGP;
565
566         return 0;
567 }
568
569 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
570 {
571         drm_device_dma_t *dma = dev->dma;
572         int count;
573         int order;
574         int size;
575         int total;
576         int page_order;
577         drm_buf_entry_t *entry;
578         drm_buf_t *buf;
579         int alignment;
580         unsigned long offset;
581         int i;
582         int byte_count;
583         int page_count;
584         unsigned long *temp_pagelist;
585         drm_buf_t **temp_buflist;
586
587         count = request->count;
588         order = drm_order(request->size);
589         size = 1 << order;
590
591         DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
592             request->count, request->size, size, order);
593
594         alignment = (request->flags & _DRM_PAGE_ALIGN)
595             ? round_page(size) : size;
596         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
597         total = PAGE_SIZE << page_order;
598
599         entry = &dma->bufs[order];
600
601         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
602             M_NOWAIT | M_ZERO);
603         entry->seglist = kmalloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
604             M_NOWAIT | M_ZERO);
605
606         /* Keep the original pagelist until we know all the allocations
607          * have succeeded
608          */
609         temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
610             sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
611
612         if (entry->buflist == NULL || entry->seglist == NULL || 
613             temp_pagelist == NULL) {
614                 drm_free(temp_pagelist, DRM_MEM_PAGES);
615                 drm_free(entry->seglist, DRM_MEM_SEGS);
616                 drm_free(entry->buflist, DRM_MEM_BUFS);
617                 return ENOMEM;
618         }
619
620         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
621             sizeof(*dma->pagelist));
622
623         DRM_DEBUG("pagelist: %d entries\n",
624             dma->page_count + (count << page_order));
625
626         entry->buf_size = size;
627         entry->page_order = page_order;
628         byte_count = 0;
629         page_count = 0;
630
631         while (entry->buf_count < count) {
632                 spin_unlock(&dev->dma_lock);
633                 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
634                     0xfffffffful);
635                 spin_lock(&dev->dma_lock);
636                 if (dmah == NULL) {
637                         /* Set count correctly so we free the proper amount. */
638                         entry->buf_count = count;
639                         entry->seg_count = count;
640                         drm_cleanup_buf_error(dev, entry);
641                         drm_free(temp_pagelist, DRM_MEM_PAGES);
642                         return ENOMEM;
643                 }
644
645                 entry->seglist[entry->seg_count++] = dmah;
646                 for (i = 0; i < (1 << page_order); i++) {
647                         DRM_DEBUG("page %d @ %p\n",
648                             dma->page_count + page_count,
649                             (char *)dmah->vaddr + PAGE_SIZE * i);
650                         temp_pagelist[dma->page_count + page_count++] = 
651                             (long)dmah->vaddr + PAGE_SIZE * i;
652                 }
653                 for (offset = 0;
654                     offset + size <= total && entry->buf_count < count;
655                     offset += alignment, ++entry->buf_count) {
656                         buf          = &entry->buflist[entry->buf_count];
657                         buf->idx     = dma->buf_count + entry->buf_count;
658                         buf->total   = alignment;
659                         buf->order   = order;
660                         buf->used    = 0;
661                         buf->offset  = (dma->byte_count + byte_count + offset);
662                         buf->address = ((char *)dmah->vaddr + offset);
663                         buf->bus_address = dmah->busaddr + offset;
664                         buf->next    = NULL;
665                         buf->pending = 0;
666                         buf->file_priv = NULL;
667
668                         buf->dev_priv_size = dev->driver->buf_priv_size;
669                         buf->dev_private = kmalloc(buf->dev_priv_size,
670                             DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
671                         if (buf->dev_private == NULL) {
672                                 /* Set count correctly so we free the proper amount. */
673                                 entry->buf_count = count;
674                                 entry->seg_count = count;
675                                 drm_cleanup_buf_error(dev, entry);
676                                 drm_free(temp_pagelist, DRM_MEM_PAGES);
677                                 return ENOMEM;
678                         }
679
680                         DRM_DEBUG("buffer %d @ %p\n",
681                             entry->buf_count, buf->address);
682                 }
683                 byte_count += PAGE_SIZE << page_order;
684         }
685
686         temp_buflist = krealloc(dma->buflist,
687             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
688             DRM_MEM_BUFS, M_NOWAIT);
689         if (temp_buflist == NULL) {
690                 /* Free the entry because it isn't valid */
691                 drm_cleanup_buf_error(dev, entry);
692                 drm_free(temp_pagelist, DRM_MEM_PAGES);
693                 return ENOMEM;
694         }
695         dma->buflist = temp_buflist;
696
697         for (i = 0; i < entry->buf_count; i++) {
698                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
699         }
700
701         /* No allocations failed, so now we can replace the orginal pagelist
702          * with the new one.
703          */
704         drm_free(dma->pagelist, DRM_MEM_PAGES);
705         dma->pagelist = temp_pagelist;
706
707         dma->buf_count += entry->buf_count;
708         dma->seg_count += entry->seg_count;
709         dma->page_count += entry->seg_count << page_order;
710         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
711
712         request->count = entry->buf_count;
713         request->size = size;
714
715         return 0;
716
717 }
718
719 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
720 {
721         drm_device_dma_t *dma = dev->dma;
722         drm_buf_entry_t *entry;
723         drm_buf_t *buf;
724         unsigned long offset;
725         unsigned long agp_offset;
726         int count;
727         int order;
728         int size;
729         int alignment;
730         int page_order;
731         int total;
732         int byte_count;
733         int i;
734         drm_buf_t **temp_buflist;
735
736         count = request->count;
737         order = drm_order(request->size);
738         size = 1 << order;
739
740         alignment  = (request->flags & _DRM_PAGE_ALIGN)
741             ? round_page(size) : size;
742         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
743         total = PAGE_SIZE << page_order;
744
745         byte_count = 0;
746         agp_offset = request->agp_start;
747
748         DRM_DEBUG("count:      %d\n",  count);
749         DRM_DEBUG("order:      %d\n",  order);
750         DRM_DEBUG("size:       %d\n",  size);
751         DRM_DEBUG("agp_offset: %ld\n", agp_offset);
752         DRM_DEBUG("alignment:  %d\n",  alignment);
753         DRM_DEBUG("page_order: %d\n",  page_order);
754         DRM_DEBUG("total:      %d\n",  total);
755
756         entry = &dma->bufs[order];
757
758         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
759             M_NOWAIT | M_ZERO);
760         if (entry->buflist == NULL)
761                 return ENOMEM;
762
763         entry->buf_size = size;
764         entry->page_order = page_order;
765
766         offset = 0;
767
768         while (entry->buf_count < count) {
769                 buf          = &entry->buflist[entry->buf_count];
770                 buf->idx     = dma->buf_count + entry->buf_count;
771                 buf->total   = alignment;
772                 buf->order   = order;
773                 buf->used    = 0;
774
775                 buf->offset  = (dma->byte_count + offset);
776                 buf->bus_address = agp_offset + offset;
777                 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
778                 buf->next    = NULL;
779                 buf->pending = 0;
780                 buf->file_priv = NULL;
781
782                 buf->dev_priv_size = dev->driver->buf_priv_size;
783                 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
784                     M_NOWAIT | M_ZERO);
785                 if (buf->dev_private == NULL) {
786                         /* Set count correctly so we free the proper amount. */
787                         entry->buf_count = count;
788                         drm_cleanup_buf_error(dev, entry);
789                         return ENOMEM;
790                 }
791
792                 DRM_DEBUG("buffer %d @ %p\n",
793                     entry->buf_count, buf->address);
794
795                 offset += alignment;
796                 entry->buf_count++;
797                 byte_count += PAGE_SIZE << page_order;
798         }
799
800         DRM_DEBUG("byte_count: %d\n", byte_count);
801
802         temp_buflist = krealloc(dma->buflist,
803             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
804             DRM_MEM_BUFS, M_NOWAIT);
805         if (temp_buflist == NULL) {
806                 /* Free the entry because it isn't valid */
807                 drm_cleanup_buf_error(dev, entry);
808                 return ENOMEM;
809         }
810         dma->buflist = temp_buflist;
811
812         for (i = 0; i < entry->buf_count; i++) {
813                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
814         }
815
816         dma->buf_count += entry->buf_count;
817         dma->byte_count += byte_count;
818
819         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
820         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
821
822         request->count = entry->buf_count;
823         request->size = size;
824
825         dma->flags = _DRM_DMA_USE_SG;
826
827         return 0;
828 }
829
830 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
831 {
832         int order, ret;
833
834         if (request->count < 0 || request->count > 4096)
835                 return EINVAL;
836         
837         order = drm_order(request->size);
838         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
839                 return EINVAL;
840
841         spin_lock(&dev->dma_lock);
842
843         /* No more allocations after first buffer-using ioctl. */
844         if (dev->buf_use != 0) {
845                 spin_unlock(&dev->dma_lock);
846                 return EBUSY;
847         }
848         /* No more than one allocation per order */
849         if (dev->dma->bufs[order].buf_count != 0) {
850                 spin_unlock(&dev->dma_lock);
851                 return ENOMEM;
852         }
853
854         ret = drm_do_addbufs_agp(dev, request);
855
856         spin_unlock(&dev->dma_lock);
857
858         return ret;
859 }
860
861 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
862 {
863         int order, ret;
864
865         if (!DRM_SUSER(DRM_CURPROC))
866                 return EACCES;
867
868         if (request->count < 0 || request->count > 4096)
869                 return EINVAL;
870
871         order = drm_order(request->size);
872         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
873                 return EINVAL;
874
875         spin_lock(&dev->dma_lock);
876
877         /* No more allocations after first buffer-using ioctl. */
878         if (dev->buf_use != 0) {
879                 spin_unlock(&dev->dma_lock);
880                 return EBUSY;
881         }
882         /* No more than one allocation per order */
883         if (dev->dma->bufs[order].buf_count != 0) {
884                 spin_unlock(&dev->dma_lock);
885                 return ENOMEM;
886         }
887
888         ret = drm_do_addbufs_sg(dev, request);
889
890         spin_unlock(&dev->dma_lock);
891
892         return ret;
893 }
894
895 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
896 {
897         int order, ret;
898
899         if (!DRM_SUSER(DRM_CURPROC))
900                 return EACCES;
901
902         if (request->count < 0 || request->count > 4096)
903                 return EINVAL;
904
905         order = drm_order(request->size);
906         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
907                 return EINVAL;
908
909         spin_lock(&dev->dma_lock);
910
911         /* No more allocations after first buffer-using ioctl. */
912         if (dev->buf_use != 0) {
913                 spin_unlock(&dev->dma_lock);
914                 return EBUSY;
915         }
916         /* No more than one allocation per order */
917         if (dev->dma->bufs[order].buf_count != 0) {
918                 spin_unlock(&dev->dma_lock);
919                 return ENOMEM;
920         }
921
922         ret = drm_do_addbufs_pci(dev, request);
923
924         spin_unlock(&dev->dma_lock);
925
926         return ret;
927 }
928
929 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
930 {
931         struct drm_buf_desc *request = data;
932         int err;
933
934         if (request->flags & _DRM_AGP_BUFFER)
935                 err = drm_addbufs_agp(dev, request);
936         else if (request->flags & _DRM_SG_BUFFER)
937                 err = drm_addbufs_sg(dev, request);
938         else
939                 err = drm_addbufs_pci(dev, request);
940
941         return err;
942 }
943
944 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
945 {
946         drm_device_dma_t *dma = dev->dma;
947         struct drm_buf_info *request = data;
948         int i;
949         int count;
950         int retcode = 0;
951
952         spin_lock(&dev->dma_lock);
953         ++dev->buf_use;         /* Can't allocate more after this call */
954         spin_unlock(&dev->dma_lock);
955
956         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
957                 if (dma->bufs[i].buf_count)
958                         ++count;
959         }
960
961         DRM_DEBUG("count = %d\n", count);
962
963         if (request->count >= count) {
964                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
965                         if (dma->bufs[i].buf_count) {
966                                 struct drm_buf_desc from;
967
968                                 from.count = dma->bufs[i].buf_count;
969                                 from.size = dma->bufs[i].buf_size;
970                                 from.low_mark = dma->bufs[i].freelist.low_mark;
971                                 from.high_mark = dma->bufs[i].freelist.high_mark;
972
973                                 if (DRM_COPY_TO_USER(&request->list[count], &from,
974                                     sizeof(struct drm_buf_desc)) != 0) {
975                                         retcode = EFAULT;
976                                         break;
977                                 }
978
979                                 DRM_DEBUG("%d %d %d %d %d\n",
980                                     i, dma->bufs[i].buf_count,
981                                     dma->bufs[i].buf_size,
982                                     dma->bufs[i].freelist.low_mark,
983                                     dma->bufs[i].freelist.high_mark);
984                                 ++count;
985                         }
986                 }
987         }
988         request->count = count;
989
990         return retcode;
991 }
992
993 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
994 {
995         drm_device_dma_t *dma = dev->dma;
996         struct drm_buf_desc *request = data;
997         int order;
998
999         DRM_DEBUG("%d, %d, %d\n",
1000                   request->size, request->low_mark, request->high_mark);
1001         
1002
1003         order = drm_order(request->size);       
1004         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1005             request->low_mark < 0 || request->high_mark < 0) {
1006                 return EINVAL;
1007         }
1008
1009         spin_lock(&dev->dma_lock);
1010         if (request->low_mark > dma->bufs[order].buf_count ||
1011             request->high_mark > dma->bufs[order].buf_count) {
1012                 spin_unlock(&dev->dma_lock);
1013                 return EINVAL;
1014         }
1015
1016         dma->bufs[order].freelist.low_mark  = request->low_mark;
1017         dma->bufs[order].freelist.high_mark = request->high_mark;
1018         spin_unlock(&dev->dma_lock);
1019
1020         return 0;
1021 }
1022
1023 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1024 {
1025         drm_device_dma_t *dma = dev->dma;
1026         struct drm_buf_free *request = data;
1027         int i;
1028         int idx;
1029         drm_buf_t *buf;
1030         int retcode = 0;
1031
1032         DRM_DEBUG("%d\n", request->count);
1033         
1034         spin_lock(&dev->dma_lock);
1035         for (i = 0; i < request->count; i++) {
1036                 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1037                         retcode = EFAULT;
1038                         break;
1039                 }
1040                 if (idx < 0 || idx >= dma->buf_count) {
1041                         DRM_ERROR("Index %d (of %d max)\n",
1042                             idx, dma->buf_count - 1);
1043                         retcode = EINVAL;
1044                         break;
1045                 }
1046                 buf = dma->buflist[idx];
1047                 if (buf->file_priv != file_priv) {
1048                         DRM_ERROR("Process %d freeing buffer not owned\n",
1049                             DRM_CURRENTPID);
1050                         retcode = EINVAL;
1051                         break;
1052                 }
1053                 drm_free_buffer(dev, buf);
1054         }
1055         spin_unlock(&dev->dma_lock);
1056
1057         return retcode;
1058 }
1059
1060 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1061 {
1062         drm_device_dma_t *dma = dev->dma;
1063         int retcode = 0;
1064         const int zero = 0;
1065         vm_offset_t address;
1066         struct vmspace *vms;
1067         vm_ooffset_t foff;
1068         vm_size_t size;
1069         vm_offset_t vaddr;
1070         struct drm_buf_map *request = data;
1071         int i;
1072
1073         vms = DRM_CURPROC->td_proc->p_vmspace;
1074
1075         spin_lock(&dev->dma_lock);
1076         dev->buf_use++;         /* Can't allocate more after this call */
1077         spin_unlock(&dev->dma_lock);
1078
1079         if (request->count < dma->buf_count)
1080                 goto done;
1081
1082         if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1083             (drm_core_check_feature(dev, DRIVER_SG) &&
1084             (dma->flags & _DRM_DMA_USE_SG))) {
1085                 drm_local_map_t *map = dev->agp_buffer_map;
1086
1087                 if (map == NULL) {
1088                         retcode = EINVAL;
1089                         goto done;
1090                 }
1091                 size = round_page(map->size);
1092                 foff = (unsigned long)map->handle;
1093         } else {
1094                 size = round_page(dma->byte_count),
1095                 foff = 0;
1096         }
1097
1098         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1099         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1100             VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1101             SLIST_FIRST(&dev->devnode->si_hlist), foff);
1102         if (retcode)
1103                 goto done;
1104
1105         request->virtual = (void *)vaddr;
1106
1107         for (i = 0; i < dma->buf_count; i++) {
1108                 if (DRM_COPY_TO_USER(&request->list[i].idx,
1109                     &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1110                         retcode = EFAULT;
1111                         goto done;
1112                 }
1113                 if (DRM_COPY_TO_USER(&request->list[i].total,
1114                     &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1115                         retcode = EFAULT;
1116                         goto done;
1117                 }
1118                 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1119                     sizeof(zero))) {
1120                         retcode = EFAULT;
1121                         goto done;
1122                 }
1123                 address = vaddr + dma->buflist[i]->offset; /* *** */
1124                 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1125                     sizeof(address))) {
1126                         retcode = EFAULT;
1127                         goto done;
1128                 }
1129         }
1130
1131  done:
1132         request->count = dma->buf_count;
1133
1134         DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1135
1136         return retcode;
1137 }
1138
1139 /*
1140  * Compute order.  Can be made faster.
1141  */
1142 int drm_order(unsigned long size)
1143 {
1144         int order;
1145
1146         if (size == 0)
1147                 return 0;
1148
1149         order = flsl(size) - 1;
1150         if (size & ~(1ul << order))
1151                 ++order;
1152
1153         return order;
1154 }