Merge branch 'vendor/OPENSSH'
[dragonfly.git] / sys / dev / drm / drm_bufs.c
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30
31 /** @file drm_bufs.c
32  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
33  */
34
35 #include "bus/pci/pcireg.h"
36
37 #include "dev/drm/drmP.h"
38
39 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
40  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
41  * address for accessing them.  Cleaned up at unload.
42  */
43 static int drm_alloc_resource(struct drm_device *dev, int resource)
44 {
45         if (resource >= DRM_MAX_PCI_RESOURCE) {
46                 DRM_ERROR("Resource %d too large\n", resource);
47                 return 1;
48         }
49
50         DRM_UNLOCK();
51         if (dev->pcir[resource] != NULL) {
52                 DRM_LOCK();
53                 return 0;
54         }
55
56         dev->pcirid[resource] = PCIR_BAR(resource);
57         dev->pcir[resource] = bus_alloc_resource_any(dev->device,
58             SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
59         DRM_LOCK();
60
61         if (dev->pcir[resource] == NULL) {
62                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
63                 return 1;
64         }
65
66         return 0;
67 }
68
69 unsigned long drm_get_resource_start(struct drm_device *dev,
70                                      unsigned int resource)
71 {
72         if (drm_alloc_resource(dev, resource) != 0)
73                 return 0;
74
75         return rman_get_start(dev->pcir[resource]);
76 }
77
78 unsigned long drm_get_resource_len(struct drm_device *dev,
79                                    unsigned int resource)
80 {
81         if (drm_alloc_resource(dev, resource) != 0)
82                 return 0;
83
84         return rman_get_size(dev->pcir[resource]);
85 }
86
87 int drm_addmap(struct drm_device * dev, unsigned long offset,
88                unsigned long size,
89     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
90 {
91         drm_local_map_t *map;
92         int align;
93         /*drm_agp_mem_t *entry;
94         int valid;*/
95
96         /* Only allow shared memory to be removable since we only keep enough
97          * book keeping information about shared memory to allow for removal
98          * when processes fork.
99          */
100         if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
101                 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
102                 return EINVAL;
103         }
104         if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
105                 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
106                     offset, size);
107                 return EINVAL;
108         }
109         if (offset + size < offset) {
110                 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
111                     offset, size);
112                 return EINVAL;
113         }
114
115         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
116             size, type);
117
118         /* Check if this is just another version of a kernel-allocated map, and
119          * just hand that back if so.
120          */
121         if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
122             type == _DRM_SHM) {
123                 TAILQ_FOREACH(map, &dev->maplist, link) {
124                         if (map->type == type && (map->offset == offset ||
125                             (map->type == _DRM_SHM &&
126                             map->flags == _DRM_CONTAINS_LOCK))) {
127                                 map->size = size;
128                                 DRM_DEBUG("Found kernel map %d\n", type);
129                                 goto done;
130                         }
131                 }
132         }
133         DRM_UNLOCK();
134
135         /* Allocate a new map structure, fill it in, and do any type-specific
136          * initialization necessary.
137          */
138         map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
139         if (!map) {
140                 DRM_LOCK();
141                 return ENOMEM;
142         }
143
144         map->offset = offset;
145         map->size = size;
146         map->type = type;
147         map->flags = flags;
148
149         switch (map->type) {
150         case _DRM_REGISTERS:
151                 map->handle = drm_ioremap(dev, map);
152                 if (!(map->flags & _DRM_WRITE_COMBINING))
153                         break;
154                 /* FALLTHROUGH */
155         case _DRM_FRAME_BUFFER:
156                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
157                         map->mtrr = 1;
158                 break;
159         case _DRM_SHM:
160                 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
161                 DRM_DEBUG("%lu %d %p\n",
162                     map->size, drm_order(map->size), map->handle);
163                 if (!map->handle) {
164                         free(map, DRM_MEM_MAPS);
165                         DRM_LOCK();
166                         return ENOMEM;
167                 }
168                 map->offset = (unsigned long)map->handle;
169                 if (map->flags & _DRM_CONTAINS_LOCK) {
170                         /* Prevent a 2nd X Server from creating a 2nd lock */
171                         DRM_LOCK();
172                         if (dev->lock.hw_lock != NULL) {
173                                 DRM_UNLOCK();
174                                 free(map->handle, DRM_MEM_MAPS);
175                                 free(map, DRM_MEM_MAPS);
176                                 return EBUSY;
177                         }
178                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
179                         DRM_UNLOCK();
180                 }
181                 break;
182         case _DRM_AGP:
183                 /*valid = 0;*/
184                 /* In some cases (i810 driver), user space may have already
185                  * added the AGP base itself, because dev->agp->base previously
186                  * only got set during AGP enable.  So, only add the base
187                  * address if the map's offset isn't already within the
188                  * aperture.
189                  */
190                 if (map->offset < dev->agp->base ||
191                     map->offset > dev->agp->base +
192                     dev->agp->info.ai_aperture_size - 1) {
193                         map->offset += dev->agp->base;
194                 }
195                 map->mtrr   = dev->agp->mtrr; /* for getmap */
196                 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
197                         if ((map->offset >= entry->bound) &&
198                             (map->offset + map->size <=
199                             entry->bound + entry->pages * PAGE_SIZE)) {
200                                 valid = 1;
201                                 break;
202                         }
203                 }
204                 if (!valid) {
205                         free(map, DRM_MEM_MAPS);
206                         DRM_LOCK();
207                         return EACCES;
208                 }*/
209                 break;
210         case _DRM_SCATTER_GATHER:
211                 if (!dev->sg) {
212                         free(map, DRM_MEM_MAPS);
213                         DRM_LOCK();
214                         return EINVAL;
215                 }
216                 map->offset += dev->sg->handle;
217                 break;
218         case _DRM_CONSISTENT:
219                 /* Unfortunately, we don't get any alignment specification from
220                  * the caller, so we have to guess.  drm_pci_alloc requires
221                  * a power-of-two alignment, so try to align the bus address of
222                  * the map to it size if possible, otherwise just assume
223                  * PAGE_SIZE alignment.
224                  */
225                 align = map->size;
226                 if ((align & (align - 1)) != 0)
227                         align = PAGE_SIZE;
228                 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
229                 if (map->dmah == NULL) {
230                         free(map, DRM_MEM_MAPS);
231                         DRM_LOCK();
232                         return ENOMEM;
233                 }
234                 map->handle = map->dmah->vaddr;
235                 map->offset = map->dmah->busaddr;
236                 break;
237         default:
238                 DRM_ERROR("Bad map type %d\n", map->type);
239                 free(map, DRM_MEM_MAPS);
240                 DRM_LOCK();
241                 return EINVAL;
242         }
243
244         DRM_LOCK();
245         TAILQ_INSERT_TAIL(&dev->maplist, map, link);
246
247 done:
248         /* Jumped to, with lock held, when a kernel map is found. */
249
250         DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
251             map->size);
252
253         *map_ptr = map;
254
255         return 0;
256 }
257
258 int drm_addmap_ioctl(struct drm_device *dev, void *data,
259                      struct drm_file *file_priv)
260 {
261         struct drm_map *request = data;
262         drm_local_map_t *map;
263         int err;
264
265         if (!(dev->flags & (FREAD|FWRITE)))
266                 return EACCES; /* Require read/write */
267
268         if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
269                 return EACCES;
270
271         DRM_LOCK();
272         err = drm_addmap(dev, request->offset, request->size, request->type,
273             request->flags, &map);
274         DRM_UNLOCK();
275         if (err != 0)
276                 return err;
277
278         request->offset = map->offset;
279         request->size = map->size;
280         request->type = map->type;
281         request->flags = map->flags;
282         request->mtrr   = map->mtrr;
283         request->handle = map->handle;
284
285         if (request->type != _DRM_SHM) {
286                 request->handle = (void *)request->offset;
287         }
288
289         return 0;
290 }
291
292 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
293 {
294         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
295
296         TAILQ_REMOVE(&dev->maplist, map, link);
297
298         switch (map->type) {
299         case _DRM_REGISTERS:
300                 if (map->bsr == NULL)
301                         drm_ioremapfree(map);
302                 /* FALLTHROUGH */
303         case _DRM_FRAME_BUFFER:
304                 if (map->mtrr) {
305                         int __unused retcode;
306                         
307                         retcode = drm_mtrr_del(0, map->offset, map->size,
308                             DRM_MTRR_WC);
309                         DRM_DEBUG("mtrr_del = %d\n", retcode);
310                 }
311                 break;
312         case _DRM_SHM:
313                 free(map->handle, DRM_MEM_MAPS);
314                 break;
315         case _DRM_AGP:
316         case _DRM_SCATTER_GATHER:
317                 break;
318         case _DRM_CONSISTENT:
319                 drm_pci_free(dev, map->dmah);
320                 break;
321         default:
322                 DRM_ERROR("Bad map type %d\n", map->type);
323                 break;
324         }
325
326         if (map->bsr != NULL) {
327                 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
328                     map->bsr);
329         }
330
331         free(map, DRM_MEM_MAPS);
332 }
333
334 /* Remove a map private from list and deallocate resources if the mapping
335  * isn't in use.
336  */
337
338 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
339                     struct drm_file *file_priv)
340 {
341         drm_local_map_t *map;
342         struct drm_map *request = data;
343
344         DRM_LOCK();
345         TAILQ_FOREACH(map, &dev->maplist, link) {
346                 if (map->handle == request->handle &&
347                     map->flags & _DRM_REMOVABLE)
348                         break;
349         }
350
351         /* No match found. */
352         if (map == NULL) {
353                 DRM_UNLOCK();
354                 return EINVAL;
355         }
356
357         drm_rmmap(dev, map);
358
359         DRM_UNLOCK();
360
361         return 0;
362 }
363
364
365 static void drm_cleanup_buf_error(struct drm_device *dev,
366                                   drm_buf_entry_t *entry)
367 {
368         int i;
369
370         if (entry->seg_count) {
371                 for (i = 0; i < entry->seg_count; i++) {
372                         drm_pci_free(dev, entry->seglist[i]);
373                 }
374                 free(entry->seglist, DRM_MEM_SEGS);
375
376                 entry->seg_count = 0;
377         }
378
379         if (entry->buf_count) {
380                 for (i = 0; i < entry->buf_count; i++) {
381                         free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
382                 }
383                 free(entry->buflist, DRM_MEM_BUFS);
384
385                 entry->buf_count = 0;
386         }
387 }
388
389 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
390 {
391         drm_device_dma_t *dma = dev->dma;
392         drm_buf_entry_t *entry;
393         /*drm_agp_mem_t *agp_entry;
394         int valid*/
395         drm_buf_t *buf;
396         unsigned long offset;
397         unsigned long agp_offset;
398         int count;
399         int order;
400         int size;
401         int alignment;
402         int page_order;
403         int total;
404         int byte_count;
405         int i;
406         drm_buf_t **temp_buflist;
407
408         count = request->count;
409         order = drm_order(request->size);
410         size = 1 << order;
411
412         alignment  = (request->flags & _DRM_PAGE_ALIGN)
413             ? round_page(size) : size;
414         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
415         total = PAGE_SIZE << page_order;
416
417         byte_count = 0;
418         agp_offset = dev->agp->base + request->agp_start;
419
420         DRM_DEBUG("count:      %d\n",  count);
421         DRM_DEBUG("order:      %d\n",  order);
422         DRM_DEBUG("size:       %d\n",  size);
423         DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
424         DRM_DEBUG("alignment:  %d\n",  alignment);
425         DRM_DEBUG("page_order: %d\n",  page_order);
426         DRM_DEBUG("total:      %d\n",  total);
427
428         /* Make sure buffers are located in AGP memory that we own */
429         /* Breaks MGA due to drm_alloc_agp not setting up entries for the
430          * memory.  Safe to ignore for now because these ioctls are still
431          * root-only.
432          */
433         /*valid = 0;
434         for (agp_entry = dev->agp->memory; agp_entry;
435             agp_entry = agp_entry->next) {
436                 if ((agp_offset >= agp_entry->bound) &&
437                     (agp_offset + total * count <=
438                     agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
439                         valid = 1;
440                         break;
441                 }
442         }
443         if (!valid) {
444                 DRM_DEBUG("zone invalid\n");
445                 return EINVAL;
446         }*/
447
448         entry = &dma->bufs[order];
449
450         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
451             M_NOWAIT | M_ZERO);
452         if (!entry->buflist) {
453                 return ENOMEM;
454         }
455
456         entry->buf_size = size;
457         entry->page_order = page_order;
458
459         offset = 0;
460
461         while (entry->buf_count < count) {
462                 buf          = &entry->buflist[entry->buf_count];
463                 buf->idx     = dma->buf_count + entry->buf_count;
464                 buf->total   = alignment;
465                 buf->order   = order;
466                 buf->used    = 0;
467
468                 buf->offset  = (dma->byte_count + offset);
469                 buf->bus_address = agp_offset + offset;
470                 buf->address = (void *)(agp_offset + offset);
471                 buf->next    = NULL;
472                 buf->pending = 0;
473                 buf->file_priv = NULL;
474
475                 buf->dev_priv_size = dev->driver->buf_priv_size;
476                 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
477                     M_NOWAIT | M_ZERO);
478                 if (buf->dev_private == NULL) {
479                         /* Set count correctly so we free the proper amount. */
480                         entry->buf_count = count;
481                         drm_cleanup_buf_error(dev, entry);
482                         return ENOMEM;
483                 }
484
485                 offset += alignment;
486                 entry->buf_count++;
487                 byte_count += PAGE_SIZE << page_order;
488         }
489
490         DRM_DEBUG("byte_count: %d\n", byte_count);
491
492         temp_buflist = realloc(dma->buflist,
493             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
494             DRM_MEM_BUFS, M_NOWAIT);
495         if (temp_buflist == NULL) {
496                 /* Free the entry because it isn't valid */
497                 drm_cleanup_buf_error(dev, entry);
498                 return ENOMEM;
499         }
500         dma->buflist = temp_buflist;
501
502         for (i = 0; i < entry->buf_count; i++) {
503                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
504         }
505
506         dma->buf_count += entry->buf_count;
507         dma->byte_count += byte_count;
508
509         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
510         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
511
512         request->count = entry->buf_count;
513         request->size = size;
514
515         dma->flags = _DRM_DMA_USE_AGP;
516
517         return 0;
518 }
519
520 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
521 {
522         drm_device_dma_t *dma = dev->dma;
523         int count;
524         int order;
525         int size;
526         int total;
527         int page_order;
528         drm_buf_entry_t *entry;
529         drm_buf_t *buf;
530         int alignment;
531         unsigned long offset;
532         int i;
533         int byte_count;
534         int page_count;
535         unsigned long *temp_pagelist;
536         drm_buf_t **temp_buflist;
537
538         count = request->count;
539         order = drm_order(request->size);
540         size = 1 << order;
541
542         DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
543             request->count, request->size, size, order);
544
545         alignment = (request->flags & _DRM_PAGE_ALIGN)
546             ? round_page(size) : size;
547         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
548         total = PAGE_SIZE << page_order;
549
550         entry = &dma->bufs[order];
551
552         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
553             M_NOWAIT | M_ZERO);
554         entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
555             M_NOWAIT | M_ZERO);
556
557         /* Keep the original pagelist until we know all the allocations
558          * have succeeded
559          */
560         temp_pagelist = malloc((dma->page_count + (count << page_order)) *
561             sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
562
563         if (entry->buflist == NULL || entry->seglist == NULL || 
564             temp_pagelist == NULL) {
565                 free(temp_pagelist, DRM_MEM_PAGES);
566                 free(entry->seglist, DRM_MEM_SEGS);
567                 free(entry->buflist, DRM_MEM_BUFS);
568                 return ENOMEM;
569         }
570
571         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
572             sizeof(*dma->pagelist));
573
574         DRM_DEBUG("pagelist: %d entries\n",
575             dma->page_count + (count << page_order));
576
577         entry->buf_size = size;
578         entry->page_order = page_order;
579         byte_count = 0;
580         page_count = 0;
581
582         while (entry->buf_count < count) {
583                 DRM_SPINUNLOCK(&dev->dma_lock);
584                 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
585                     0xfffffffful);
586                 DRM_SPINLOCK(&dev->dma_lock);
587                 if (dmah == NULL) {
588                         /* Set count correctly so we free the proper amount. */
589                         entry->buf_count = count;
590                         entry->seg_count = count;
591                         drm_cleanup_buf_error(dev, entry);
592                         free(temp_pagelist, DRM_MEM_PAGES);
593                         return ENOMEM;
594                 }
595
596                 entry->seglist[entry->seg_count++] = dmah;
597                 for (i = 0; i < (1 << page_order); i++) {
598                         DRM_DEBUG("page %d @ %p\n",
599                             dma->page_count + page_count,
600                             (char *)dmah->vaddr + PAGE_SIZE * i);
601                         temp_pagelist[dma->page_count + page_count++] = 
602                             (long)dmah->vaddr + PAGE_SIZE * i;
603                 }
604                 for (offset = 0;
605                     offset + size <= total && entry->buf_count < count;
606                     offset += alignment, ++entry->buf_count) {
607                         buf          = &entry->buflist[entry->buf_count];
608                         buf->idx     = dma->buf_count + entry->buf_count;
609                         buf->total   = alignment;
610                         buf->order   = order;
611                         buf->used    = 0;
612                         buf->offset  = (dma->byte_count + byte_count + offset);
613                         buf->address = ((char *)dmah->vaddr + offset);
614                         buf->bus_address = dmah->busaddr + offset;
615                         buf->next    = NULL;
616                         buf->pending = 0;
617                         buf->file_priv = NULL;
618
619                         buf->dev_priv_size = dev->driver->buf_priv_size;
620                         buf->dev_private = malloc(buf->dev_priv_size,
621                             DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
622                         if (buf->dev_private == NULL) {
623                                 /* Set count correctly so we free the proper amount. */
624                                 entry->buf_count = count;
625                                 entry->seg_count = count;
626                                 drm_cleanup_buf_error(dev, entry);
627                                 free(temp_pagelist, DRM_MEM_PAGES);
628                                 return ENOMEM;
629                         }
630
631                         DRM_DEBUG("buffer %d @ %p\n",
632                             entry->buf_count, buf->address);
633                 }
634                 byte_count += PAGE_SIZE << page_order;
635         }
636
637         temp_buflist = realloc(dma->buflist,
638             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
639             DRM_MEM_BUFS, M_NOWAIT);
640         if (temp_buflist == NULL) {
641                 /* Free the entry because it isn't valid */
642                 drm_cleanup_buf_error(dev, entry);
643                 free(temp_pagelist, DRM_MEM_PAGES);
644                 return ENOMEM;
645         }
646         dma->buflist = temp_buflist;
647
648         for (i = 0; i < entry->buf_count; i++) {
649                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
650         }
651
652         /* No allocations failed, so now we can replace the orginal pagelist
653          * with the new one.
654          */
655         free(dma->pagelist, DRM_MEM_PAGES);
656         dma->pagelist = temp_pagelist;
657
658         dma->buf_count += entry->buf_count;
659         dma->seg_count += entry->seg_count;
660         dma->page_count += entry->seg_count << page_order;
661         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
662
663         request->count = entry->buf_count;
664         request->size = size;
665
666         return 0;
667
668 }
669
670 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
671 {
672         drm_device_dma_t *dma = dev->dma;
673         drm_buf_entry_t *entry;
674         drm_buf_t *buf;
675         unsigned long offset;
676         unsigned long agp_offset;
677         int count;
678         int order;
679         int size;
680         int alignment;
681         int page_order;
682         int total;
683         int byte_count;
684         int i;
685         drm_buf_t **temp_buflist;
686
687         count = request->count;
688         order = drm_order(request->size);
689         size = 1 << order;
690
691         alignment  = (request->flags & _DRM_PAGE_ALIGN)
692             ? round_page(size) : size;
693         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
694         total = PAGE_SIZE << page_order;
695
696         byte_count = 0;
697         agp_offset = request->agp_start;
698
699         DRM_DEBUG("count:      %d\n",  count);
700         DRM_DEBUG("order:      %d\n",  order);
701         DRM_DEBUG("size:       %d\n",  size);
702         DRM_DEBUG("agp_offset: %ld\n", agp_offset);
703         DRM_DEBUG("alignment:  %d\n",  alignment);
704         DRM_DEBUG("page_order: %d\n",  page_order);
705         DRM_DEBUG("total:      %d\n",  total);
706
707         entry = &dma->bufs[order];
708
709         entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
710             M_NOWAIT | M_ZERO);
711         if (entry->buflist == NULL)
712                 return ENOMEM;
713
714         entry->buf_size = size;
715         entry->page_order = page_order;
716
717         offset = 0;
718
719         while (entry->buf_count < count) {
720                 buf          = &entry->buflist[entry->buf_count];
721                 buf->idx     = dma->buf_count + entry->buf_count;
722                 buf->total   = alignment;
723                 buf->order   = order;
724                 buf->used    = 0;
725
726                 buf->offset  = (dma->byte_count + offset);
727                 buf->bus_address = agp_offset + offset;
728                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
729                 buf->next    = NULL;
730                 buf->pending = 0;
731                 buf->file_priv = NULL;
732
733                 buf->dev_priv_size = dev->driver->buf_priv_size;
734                 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
735                     M_NOWAIT | M_ZERO);
736                 if (buf->dev_private == NULL) {
737                         /* Set count correctly so we free the proper amount. */
738                         entry->buf_count = count;
739                         drm_cleanup_buf_error(dev, entry);
740                         return ENOMEM;
741                 }
742
743                 DRM_DEBUG("buffer %d @ %p\n",
744                     entry->buf_count, buf->address);
745
746                 offset += alignment;
747                 entry->buf_count++;
748                 byte_count += PAGE_SIZE << page_order;
749         }
750
751         DRM_DEBUG("byte_count: %d\n", byte_count);
752
753         temp_buflist = realloc(dma->buflist,
754             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
755             DRM_MEM_BUFS, M_NOWAIT);
756         if (temp_buflist == NULL) {
757                 /* Free the entry because it isn't valid */
758                 drm_cleanup_buf_error(dev, entry);
759                 return ENOMEM;
760         }
761         dma->buflist = temp_buflist;
762
763         for (i = 0; i < entry->buf_count; i++) {
764                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
765         }
766
767         dma->buf_count += entry->buf_count;
768         dma->byte_count += byte_count;
769
770         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
771         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
772
773         request->count = entry->buf_count;
774         request->size = size;
775
776         dma->flags = _DRM_DMA_USE_SG;
777
778         return 0;
779 }
780
781 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
782 {
783         int order, ret;
784
785         if (request->count < 0 || request->count > 4096)
786                 return EINVAL;
787         
788         order = drm_order(request->size);
789         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
790                 return EINVAL;
791
792         DRM_SPINLOCK(&dev->dma_lock);
793
794         /* No more allocations after first buffer-using ioctl. */
795         if (dev->buf_use != 0) {
796                 DRM_SPINUNLOCK(&dev->dma_lock);
797                 return EBUSY;
798         }
799         /* No more than one allocation per order */
800         if (dev->dma->bufs[order].buf_count != 0) {
801                 DRM_SPINUNLOCK(&dev->dma_lock);
802                 return ENOMEM;
803         }
804
805         ret = drm_do_addbufs_agp(dev, request);
806
807         DRM_SPINUNLOCK(&dev->dma_lock);
808
809         return ret;
810 }
811
812 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
813 {
814         int order, ret;
815
816         if (!DRM_SUSER(DRM_CURPROC))
817                 return EACCES;
818
819         if (request->count < 0 || request->count > 4096)
820                 return EINVAL;
821
822         order = drm_order(request->size);
823         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
824                 return EINVAL;
825
826         DRM_SPINLOCK(&dev->dma_lock);
827
828         /* No more allocations after first buffer-using ioctl. */
829         if (dev->buf_use != 0) {
830                 DRM_SPINUNLOCK(&dev->dma_lock);
831                 return EBUSY;
832         }
833         /* No more than one allocation per order */
834         if (dev->dma->bufs[order].buf_count != 0) {
835                 DRM_SPINUNLOCK(&dev->dma_lock);
836                 return ENOMEM;
837         }
838
839         ret = drm_do_addbufs_sg(dev, request);
840
841         DRM_SPINUNLOCK(&dev->dma_lock);
842
843         return ret;
844 }
845
846 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
847 {
848         int order, ret;
849
850         if (!DRM_SUSER(DRM_CURPROC))
851                 return EACCES;
852
853         if (request->count < 0 || request->count > 4096)
854                 return EINVAL;
855
856         order = drm_order(request->size);
857         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
858                 return EINVAL;
859
860         DRM_SPINLOCK(&dev->dma_lock);
861
862         /* No more allocations after first buffer-using ioctl. */
863         if (dev->buf_use != 0) {
864                 DRM_SPINUNLOCK(&dev->dma_lock);
865                 return EBUSY;
866         }
867         /* No more than one allocation per order */
868         if (dev->dma->bufs[order].buf_count != 0) {
869                 DRM_SPINUNLOCK(&dev->dma_lock);
870                 return ENOMEM;
871         }
872
873         ret = drm_do_addbufs_pci(dev, request);
874
875         DRM_SPINUNLOCK(&dev->dma_lock);
876
877         return ret;
878 }
879
880 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
881 {
882         struct drm_buf_desc *request = data;
883         int err;
884
885         if (request->flags & _DRM_AGP_BUFFER)
886                 err = drm_addbufs_agp(dev, request);
887         else if (request->flags & _DRM_SG_BUFFER)
888                 err = drm_addbufs_sg(dev, request);
889         else
890                 err = drm_addbufs_pci(dev, request);
891
892         return err;
893 }
894
895 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
896 {
897         drm_device_dma_t *dma = dev->dma;
898         struct drm_buf_info *request = data;
899         int i;
900         int count;
901         int retcode = 0;
902
903         DRM_SPINLOCK(&dev->dma_lock);
904         ++dev->buf_use;         /* Can't allocate more after this call */
905         DRM_SPINUNLOCK(&dev->dma_lock);
906
907         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
908                 if (dma->bufs[i].buf_count)
909                         ++count;
910         }
911
912         DRM_DEBUG("count = %d\n", count);
913
914         if (request->count >= count) {
915                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
916                         if (dma->bufs[i].buf_count) {
917                                 struct drm_buf_desc from;
918
919                                 from.count = dma->bufs[i].buf_count;
920                                 from.size = dma->bufs[i].buf_size;
921                                 from.low_mark = dma->bufs[i].freelist.low_mark;
922                                 from.high_mark = dma->bufs[i].freelist.high_mark;
923
924                                 if (DRM_COPY_TO_USER(&request->list[count], &from,
925                                     sizeof(struct drm_buf_desc)) != 0) {
926                                         retcode = EFAULT;
927                                         break;
928                                 }
929
930                                 DRM_DEBUG("%d %d %d %d %d\n",
931                                     i, dma->bufs[i].buf_count,
932                                     dma->bufs[i].buf_size,
933                                     dma->bufs[i].freelist.low_mark,
934                                     dma->bufs[i].freelist.high_mark);
935                                 ++count;
936                         }
937                 }
938         }
939         request->count = count;
940
941         return retcode;
942 }
943
944 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
945 {
946         drm_device_dma_t *dma = dev->dma;
947         struct drm_buf_desc *request = data;
948         int order;
949
950         DRM_DEBUG("%d, %d, %d\n",
951                   request->size, request->low_mark, request->high_mark);
952         
953
954         order = drm_order(request->size);       
955         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
956             request->low_mark < 0 || request->high_mark < 0) {
957                 return EINVAL;
958         }
959
960         DRM_SPINLOCK(&dev->dma_lock);
961         if (request->low_mark > dma->bufs[order].buf_count ||
962             request->high_mark > dma->bufs[order].buf_count) {
963                 DRM_SPINUNLOCK(&dev->dma_lock);
964                 return EINVAL;
965         }
966
967         dma->bufs[order].freelist.low_mark  = request->low_mark;
968         dma->bufs[order].freelist.high_mark = request->high_mark;
969         DRM_SPINUNLOCK(&dev->dma_lock);
970
971         return 0;
972 }
973
974 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
975 {
976         drm_device_dma_t *dma = dev->dma;
977         struct drm_buf_free *request = data;
978         int i;
979         int idx;
980         drm_buf_t *buf;
981         int retcode = 0;
982
983         DRM_DEBUG("%d\n", request->count);
984         
985         DRM_SPINLOCK(&dev->dma_lock);
986         for (i = 0; i < request->count; i++) {
987                 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
988                         retcode = EFAULT;
989                         break;
990                 }
991                 if (idx < 0 || idx >= dma->buf_count) {
992                         DRM_ERROR("Index %d (of %d max)\n",
993                             idx, dma->buf_count - 1);
994                         retcode = EINVAL;
995                         break;
996                 }
997                 buf = dma->buflist[idx];
998                 if (buf->file_priv != file_priv) {
999                         DRM_ERROR("Process %d freeing buffer not owned\n",
1000                             DRM_CURRENTPID);
1001                         retcode = EINVAL;
1002                         break;
1003                 }
1004                 drm_free_buffer(dev, buf);
1005         }
1006         DRM_SPINUNLOCK(&dev->dma_lock);
1007
1008         return retcode;
1009 }
1010
1011 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1012 {
1013         drm_device_dma_t *dma = dev->dma;
1014         int retcode = 0;
1015         const int zero = 0;
1016         vm_offset_t address;
1017         struct vmspace *vms;
1018         vm_ooffset_t foff;
1019         vm_size_t size;
1020         vm_offset_t vaddr;
1021         struct drm_buf_map *request = data;
1022         int i;
1023
1024         vms = DRM_CURPROC->td_proc->p_vmspace;
1025
1026         DRM_SPINLOCK(&dev->dma_lock);
1027         dev->buf_use++;         /* Can't allocate more after this call */
1028         DRM_SPINUNLOCK(&dev->dma_lock);
1029
1030         if (request->count < dma->buf_count)
1031                 goto done;
1032
1033         if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1034             (drm_core_check_feature(dev, DRIVER_SG) &&
1035             (dma->flags & _DRM_DMA_USE_SG))) {
1036                 drm_local_map_t *map = dev->agp_buffer_map;
1037
1038                 if (map == NULL) {
1039                         retcode = EINVAL;
1040                         goto done;
1041                 }
1042                 size = round_page(map->size);
1043                 foff = map->offset;
1044         } else {
1045                 size = round_page(dma->byte_count),
1046                 foff = 0;
1047         }
1048
1049         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1050 #if __FreeBSD_version >= 600023
1051         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1052             VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1053             dev->devnode, foff);
1054 #else
1055         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1056             VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1057             SLIST_FIRST(&dev->devnode->si_hlist), foff);
1058 #endif
1059         if (retcode)
1060                 goto done;
1061
1062         request->virtual = (void *)vaddr;
1063
1064         for (i = 0; i < dma->buf_count; i++) {
1065                 if (DRM_COPY_TO_USER(&request->list[i].idx,
1066                     &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1067                         retcode = EFAULT;
1068                         goto done;
1069                 }
1070                 if (DRM_COPY_TO_USER(&request->list[i].total,
1071                     &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1072                         retcode = EFAULT;
1073                         goto done;
1074                 }
1075                 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1076                     sizeof(zero))) {
1077                         retcode = EFAULT;
1078                         goto done;
1079                 }
1080                 address = vaddr + dma->buflist[i]->offset; /* *** */
1081                 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1082                     sizeof(address))) {
1083                         retcode = EFAULT;
1084                         goto done;
1085                 }
1086         }
1087
1088  done:
1089         request->count = dma->buf_count;
1090
1091         DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1092
1093         return retcode;
1094 }
1095
1096 /*
1097  * Compute order.  Can be made faster.
1098  */
1099 int drm_order(unsigned long size)
1100 {
1101         int order;
1102
1103         if (size == 0)
1104                 return 0;
1105
1106         order = flsl(size) - 1;
1107         if (size & ~(1ul << order))
1108                 ++order;
1109
1110         return order;
1111 }