drm: Manage drm_local_map structures with linux list functions
[dragonfly.git] / sys / dev / drm / drm_bufs.c
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
30  */
31
32 /** @file drm_bufs.c
33  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
34  */
35
36 #include <sys/conf.h>
37 #include <bus/pci/pcireg.h>
38
39 #include <drm/drmP.h>
40
41 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
43  * address for accessing them.  Cleaned up at unload.
44  */
45 static int drm_alloc_resource(struct drm_device *dev, int resource)
46 {
47         struct resource *res;
48         int rid;
49
50         DRM_LOCK_ASSERT(dev);
51
52         if (resource >= DRM_MAX_PCI_RESOURCE) {
53                 DRM_ERROR("Resource %d too large\n", resource);
54                 return 1;
55         }
56
57         if (dev->pcir[resource] != NULL) {
58                 return 0;
59         }
60
61         DRM_UNLOCK(dev);
62         rid = PCIR_BAR(resource);
63         res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
64             RF_SHAREABLE);
65         DRM_LOCK(dev);
66         if (res == NULL) {
67                 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
68                 return 1;
69         }
70
71         if (dev->pcir[resource] == NULL) {
72                 dev->pcirid[resource] = rid;
73                 dev->pcir[resource] = res;
74         }
75
76         return 0;
77 }
78
79 unsigned long drm_get_resource_start(struct drm_device *dev,
80                                      unsigned int resource)
81 {
82         if (drm_alloc_resource(dev, resource) != 0)
83                 return 0;
84
85         return rman_get_start(dev->pcir[resource]);
86 }
87
88 unsigned long drm_get_resource_len(struct drm_device *dev,
89                                    unsigned int resource)
90 {
91         if (drm_alloc_resource(dev, resource) != 0)
92                 return 0;
93
94         return rman_get_size(dev->pcir[resource]);
95 }
96
97 int drm_addmap(struct drm_device * dev, unsigned long offset,
98                unsigned long size,
99     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
100 {
101         struct drm_local_map *map;
102         struct drm_map_list *entry;
103         int align;
104         /*drm_agp_mem_t *entry;
105         int valid;*/
106
107         /* Only allow shared memory to be removable since we only keep enough
108          * book keeping information about shared memory to allow for removal
109          * when processes fork.
110          */
111         if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
112                 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
113                 return EINVAL;
114         }
115         if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
116                 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
117                     offset, size);
118                 return EINVAL;
119         }
120         if (offset + size < offset) {
121                 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
122                     offset, size);
123                 return EINVAL;
124         }
125
126         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
127             size, type);
128
129         /* Check if this is just another version of a kernel-allocated map, and
130          * just hand that back if so.
131          */
132         if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
133             type == _DRM_SHM) {
134                 list_for_each_entry(entry, &dev->maplist, head) {
135                         if (entry->map->type == type && (entry->map->offset == offset ||
136                             (entry->map->type == _DRM_SHM &&
137                             entry->map->flags == _DRM_CONTAINS_LOCK))) {
138                                 entry->map->size = size;
139                                 DRM_DEBUG("Found kernel map %d\n", type);
140                                 goto done;
141                         }
142                 }
143         }
144         DRM_UNLOCK(dev);
145
146         /* Allocate a new map structure, fill it in, and do any type-specific
147          * initialization necessary.
148          */
149         map = kmalloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
150         if (!map) {
151                 DRM_LOCK(dev);
152                 return ENOMEM;
153         }
154
155         map->offset = offset;
156         map->size = size;
157         map->type = type;
158         map->flags = flags;
159         map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
160             DRM_MAP_HANDLE_SHIFT);
161
162         switch (map->type) {
163         case _DRM_REGISTERS:
164                 map->virtual = drm_ioremap(dev, map);
165                 if (!(map->flags & _DRM_WRITE_COMBINING))
166                         break;
167                 /* FALLTHROUGH */
168         case _DRM_FRAME_BUFFER:
169                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
170                         map->mtrr = 1;
171                 break;
172         case _DRM_SHM:
173                 map->virtual = kmalloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
174                 DRM_DEBUG("%lu %d %p\n",
175                     map->size, drm_order(map->size), map->virtual);
176                 if (!map->virtual) {
177                         drm_free(map, DRM_MEM_MAPS);
178                         DRM_LOCK(dev);
179                         return ENOMEM;
180                 }
181                 map->offset = (unsigned long)map->virtual;
182                 if (map->flags & _DRM_CONTAINS_LOCK) {
183                         /* Prevent a 2nd X Server from creating a 2nd lock */
184                         DRM_LOCK(dev);
185                         if (dev->lock.hw_lock != NULL) {
186                                 DRM_UNLOCK(dev);
187                                 drm_free(map->virtual, DRM_MEM_MAPS);
188                                 drm_free(map, DRM_MEM_MAPS);
189                                 return EBUSY;
190                         }
191                         dev->lock.hw_lock = map->virtual; /* Pointer to lock */
192                         DRM_UNLOCK(dev);
193                 }
194                 break;
195         case _DRM_AGP:
196                 /*valid = 0;*/
197                 /* In some cases (i810 driver), user space may have already
198                  * added the AGP base itself, because dev->agp->base previously
199                  * only got set during AGP enable.  So, only add the base
200                  * address if the map's offset isn't already within the
201                  * aperture.
202                  */
203                 if (map->offset < dev->agp->base ||
204                     map->offset > dev->agp->base +
205                     dev->agp->info.ai_aperture_size - 1) {
206                         map->offset += dev->agp->base;
207                 }
208                 map->mtrr   = dev->agp->mtrr; /* for getmap */
209                 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
210                         if ((map->offset >= entry->bound) &&
211                             (map->offset + map->size <=
212                             entry->bound + entry->pages * PAGE_SIZE)) {
213                                 valid = 1;
214                                 break;
215                         }
216                 }
217                 if (!valid) {
218                         drm_free(map, DRM_MEM_MAPS);
219                         DRM_LOCK(dev);
220                         return EACCES;
221                 }*/
222                 break;
223         case _DRM_SCATTER_GATHER:
224                 if (!dev->sg) {
225                         drm_free(map, DRM_MEM_MAPS);
226                         DRM_LOCK(dev);
227                         return EINVAL;
228                 }
229                 map->virtual = (void *)(dev->sg->vaddr + offset);
230                 map->offset = dev->sg->vaddr + offset;
231                 break;
232         case _DRM_CONSISTENT:
233                 /* Unfortunately, we don't get any alignment specification from
234                  * the caller, so we have to guess.  drm_pci_alloc requires
235                  * a power-of-two alignment, so try to align the bus address of
236                  * the map to it size if possible, otherwise just assume
237                  * PAGE_SIZE alignment.
238                  */
239                 align = map->size;
240                 if ((align & (align - 1)) != 0)
241                         align = PAGE_SIZE;
242                 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
243                 if (map->dmah == NULL) {
244                         drm_free(map, DRM_MEM_MAPS);
245                         DRM_LOCK(dev);
246                         return ENOMEM;
247                 }
248                 map->virtual = map->dmah->vaddr;
249                 map->offset = map->dmah->busaddr;
250                 break;
251         default:
252                 DRM_ERROR("Bad map type %d\n", map->type);
253                 drm_free(map, DRM_MEM_MAPS);
254                 DRM_LOCK(dev);
255                 return EINVAL;
256         }
257
258         DRM_LOCK(dev);
259         list_add(&entry->head, &dev->maplist);
260
261 done:
262         /* Jumped to, with lock held, when a kernel map is found. */
263
264         DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
265             map->size);
266
267         *map_ptr = map;
268
269         return 0;
270 }
271
272 int drm_addmap_ioctl(struct drm_device *dev, void *data,
273                      struct drm_file *file_priv)
274 {
275         struct drm_map *request = data;
276         drm_local_map_t *map;
277         int err;
278
279         if (!(dev->flags & (FREAD|FWRITE)))
280                 return EACCES; /* Require read/write */
281
282         if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
283                 return EACCES;
284
285         DRM_LOCK(dev);
286         err = drm_addmap(dev, request->offset, request->size, request->type,
287             request->flags, &map);
288         DRM_UNLOCK(dev);
289         if (err != 0)
290                 return err;
291
292         request->offset = map->offset;
293         request->size = map->size;
294         request->type = map->type;
295         request->flags = map->flags;
296         request->mtrr   = map->mtrr;
297         request->handle = (void *)map->handle;
298
299         return 0;
300 }
301
302 void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
303 {
304         struct drm_map_list *r_list = NULL, *list_t;
305         int found = 0;
306
307         DRM_LOCK_ASSERT(dev);
308
309         if (map == NULL)
310                 return;
311
312         /* Find the list entry for the map and remove it */
313         list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
314                 if (r_list->map == map) {
315                         list_del(&r_list->head);
316                         drm_free(r_list, DRM_MEM_DRIVER);
317                         found = 1;
318                         break;
319                 }
320         }
321
322         if (!found)
323                 return;
324
325         switch (map->type) {
326         case _DRM_REGISTERS:
327                 if (map->bsr == NULL)
328                         drm_ioremapfree(map);
329                 /* FALLTHROUGH */
330         case _DRM_FRAME_BUFFER:
331                 if (map->mtrr) {
332                         int __unused retcode;
333                         
334                         retcode = drm_mtrr_del(0, map->offset, map->size,
335                             DRM_MTRR_WC);
336                         DRM_DEBUG("mtrr_del = %d\n", retcode);
337                 }
338                 break;
339         case _DRM_SHM:
340                 drm_free(map->virtual, DRM_MEM_MAPS);
341                 break;
342         case _DRM_AGP:
343         case _DRM_SCATTER_GATHER:
344                 break;
345         case _DRM_CONSISTENT:
346                 drm_pci_free(dev, map->dmah);
347                 break;
348         default:
349                 DRM_ERROR("Bad map type %d\n", map->type);
350                 break;
351         }
352
353         if (map->bsr != NULL) {
354                 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
355                     map->bsr);
356         }
357
358         DRM_UNLOCK(dev);
359         if (map->handle)
360                 free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
361                     DRM_MAP_HANDLE_SHIFT);
362         DRM_LOCK(dev);
363
364         drm_free(map, DRM_MEM_MAPS);
365 }
366
367 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
368  * the last close of the device, and this is necessary for cleanup when things
369  * exit uncleanly.  Therefore, having userland manually remove mappings seems
370  * like a pointless exercise since they're going away anyway.
371  *
372  * One use case might be after addmap is allowed for normal users for SHM and
373  * gets used by drivers that the server doesn't need to care about.  This seems
374  * unlikely.
375  *
376  * \param inode device inode.
377  * \param file_priv DRM file private.
378  * \param cmd command.
379  * \param arg pointer to a struct drm_map structure.
380  * \return zero on success or a negative value on error.
381  */
382 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
383                     struct drm_file *file_priv)
384 {
385         struct drm_map *request = data;
386         struct drm_local_map *map = NULL;
387         struct drm_map_list *r_list;
388
389         DRM_LOCK(dev);
390         list_for_each_entry(r_list, &dev->maplist, head) {
391                 if (r_list->map &&
392                     r_list->user_token == (unsigned long)request->handle &&
393                     r_list->map->flags & _DRM_REMOVABLE) {
394                         map = r_list->map;
395                         break;
396                 }
397         }
398
399         /* List has wrapped around to the head pointer, or its empty we didn't
400          * find anything.
401          */
402         if (list_empty(&dev->maplist) || !map) {
403                 DRM_UNLOCK(dev);
404                 return -EINVAL;
405         }
406
407         /* Register and framebuffer maps are permanent */
408         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
409                 DRM_UNLOCK(dev);
410                 return 0;
411         }
412
413         drm_rmmap(dev, map);
414
415         DRM_UNLOCK(dev);
416
417         return 0;
418 }
419
420
421 static void drm_cleanup_buf_error(struct drm_device *dev,
422                                   drm_buf_entry_t *entry)
423 {
424         int i;
425
426         if (entry->seg_count) {
427                 for (i = 0; i < entry->seg_count; i++) {
428                         drm_pci_free(dev, entry->seglist[i]);
429                 }
430                 drm_free(entry->seglist, DRM_MEM_SEGS);
431
432                 entry->seg_count = 0;
433         }
434
435         if (entry->buf_count) {
436                 for (i = 0; i < entry->buf_count; i++) {
437                         drm_free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
438                 }
439                 drm_free(entry->buflist, DRM_MEM_BUFS);
440
441                 entry->buf_count = 0;
442         }
443 }
444
445 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
446 {
447         drm_device_dma_t *dma = dev->dma;
448         drm_buf_entry_t *entry;
449         /*drm_agp_mem_t *agp_entry;
450         int valid*/
451         drm_buf_t *buf;
452         unsigned long offset;
453         unsigned long agp_offset;
454         int count;
455         int order;
456         int size;
457         int alignment;
458         int page_order;
459         int total;
460         int byte_count;
461         int i;
462         drm_buf_t **temp_buflist;
463
464         count = request->count;
465         order = drm_order(request->size);
466         size = 1 << order;
467
468         alignment  = (request->flags & _DRM_PAGE_ALIGN)
469             ? round_page(size) : size;
470         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
471         total = PAGE_SIZE << page_order;
472
473         byte_count = 0;
474         agp_offset = dev->agp->base + request->agp_start;
475
476         DRM_DEBUG("count:      %d\n",  count);
477         DRM_DEBUG("order:      %d\n",  order);
478         DRM_DEBUG("size:       %d\n",  size);
479         DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
480         DRM_DEBUG("alignment:  %d\n",  alignment);
481         DRM_DEBUG("page_order: %d\n",  page_order);
482         DRM_DEBUG("total:      %d\n",  total);
483
484         /* Make sure buffers are located in AGP memory that we own */
485         /* Breaks MGA due to drm_alloc_agp not setting up entries for the
486          * memory.  Safe to ignore for now because these ioctls are still
487          * root-only.
488          */
489         /*valid = 0;
490         for (agp_entry = dev->agp->memory; agp_entry;
491             agp_entry = agp_entry->next) {
492                 if ((agp_offset >= agp_entry->bound) &&
493                     (agp_offset + total * count <=
494                     agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
495                         valid = 1;
496                         break;
497                 }
498         }
499         if (!valid) {
500                 DRM_DEBUG("zone invalid\n");
501                 return EINVAL;
502         }*/
503
504         entry = &dma->bufs[order];
505
506         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
507             M_NOWAIT | M_ZERO);
508         if (!entry->buflist) {
509                 return ENOMEM;
510         }
511
512         entry->buf_size = size;
513         entry->page_order = page_order;
514
515         offset = 0;
516
517         while (entry->buf_count < count) {
518                 buf          = &entry->buflist[entry->buf_count];
519                 buf->idx     = dma->buf_count + entry->buf_count;
520                 buf->total   = alignment;
521                 buf->order   = order;
522                 buf->used    = 0;
523
524                 buf->offset  = (dma->byte_count + offset);
525                 buf->bus_address = agp_offset + offset;
526                 buf->address = (void *)(agp_offset + offset);
527                 buf->next    = NULL;
528                 buf->pending = 0;
529                 buf->file_priv = NULL;
530
531                 buf->dev_priv_size = dev->driver->buf_priv_size;
532                 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
533                     M_NOWAIT | M_ZERO);
534                 if (buf->dev_private == NULL) {
535                         /* Set count correctly so we free the proper amount. */
536                         entry->buf_count = count;
537                         drm_cleanup_buf_error(dev, entry);
538                         return ENOMEM;
539                 }
540
541                 offset += alignment;
542                 entry->buf_count++;
543                 byte_count += PAGE_SIZE << page_order;
544         }
545
546         DRM_DEBUG("byte_count: %d\n", byte_count);
547
548         temp_buflist = krealloc(dma->buflist,
549             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
550             DRM_MEM_BUFS, M_NOWAIT);
551         if (temp_buflist == NULL) {
552                 /* Free the entry because it isn't valid */
553                 drm_cleanup_buf_error(dev, entry);
554                 return ENOMEM;
555         }
556         dma->buflist = temp_buflist;
557
558         for (i = 0; i < entry->buf_count; i++) {
559                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
560         }
561
562         dma->buf_count += entry->buf_count;
563         dma->byte_count += byte_count;
564
565         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
566         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
567
568         request->count = entry->buf_count;
569         request->size = size;
570
571         dma->flags = _DRM_DMA_USE_AGP;
572
573         return 0;
574 }
575
576 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
577 {
578         drm_device_dma_t *dma = dev->dma;
579         int count;
580         int order;
581         int size;
582         int total;
583         int page_order;
584         drm_buf_entry_t *entry;
585         drm_buf_t *buf;
586         int alignment;
587         unsigned long offset;
588         int i;
589         int byte_count;
590         int page_count;
591         unsigned long *temp_pagelist;
592         drm_buf_t **temp_buflist;
593
594         count = request->count;
595         order = drm_order(request->size);
596         size = 1 << order;
597
598         DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
599             request->count, request->size, size, order);
600
601         alignment = (request->flags & _DRM_PAGE_ALIGN)
602             ? round_page(size) : size;
603         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
604         total = PAGE_SIZE << page_order;
605
606         entry = &dma->bufs[order];
607
608         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
609             M_NOWAIT | M_ZERO);
610         entry->seglist = kmalloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
611             M_NOWAIT | M_ZERO);
612
613         /* Keep the original pagelist until we know all the allocations
614          * have succeeded
615          */
616         temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
617             sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
618
619         if (entry->buflist == NULL || entry->seglist == NULL || 
620             temp_pagelist == NULL) {
621                 drm_free(temp_pagelist, DRM_MEM_PAGES);
622                 drm_free(entry->seglist, DRM_MEM_SEGS);
623                 drm_free(entry->buflist, DRM_MEM_BUFS);
624                 return ENOMEM;
625         }
626
627         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
628             sizeof(*dma->pagelist));
629
630         DRM_DEBUG("pagelist: %d entries\n",
631             dma->page_count + (count << page_order));
632
633         entry->buf_size = size;
634         entry->page_order = page_order;
635         byte_count = 0;
636         page_count = 0;
637
638         while (entry->buf_count < count) {
639                 spin_unlock(&dev->dma_lock);
640                 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
641                     0xfffffffful);
642                 spin_lock(&dev->dma_lock);
643                 if (dmah == NULL) {
644                         /* Set count correctly so we free the proper amount. */
645                         entry->buf_count = count;
646                         entry->seg_count = count;
647                         drm_cleanup_buf_error(dev, entry);
648                         drm_free(temp_pagelist, DRM_MEM_PAGES);
649                         return ENOMEM;
650                 }
651
652                 entry->seglist[entry->seg_count++] = dmah;
653                 for (i = 0; i < (1 << page_order); i++) {
654                         DRM_DEBUG("page %d @ %p\n",
655                             dma->page_count + page_count,
656                             (char *)dmah->vaddr + PAGE_SIZE * i);
657                         temp_pagelist[dma->page_count + page_count++] = 
658                             (long)dmah->vaddr + PAGE_SIZE * i;
659                 }
660                 for (offset = 0;
661                     offset + size <= total && entry->buf_count < count;
662                     offset += alignment, ++entry->buf_count) {
663                         buf          = &entry->buflist[entry->buf_count];
664                         buf->idx     = dma->buf_count + entry->buf_count;
665                         buf->total   = alignment;
666                         buf->order   = order;
667                         buf->used    = 0;
668                         buf->offset  = (dma->byte_count + byte_count + offset);
669                         buf->address = ((char *)dmah->vaddr + offset);
670                         buf->bus_address = dmah->busaddr + offset;
671                         buf->next    = NULL;
672                         buf->pending = 0;
673                         buf->file_priv = NULL;
674
675                         buf->dev_priv_size = dev->driver->buf_priv_size;
676                         buf->dev_private = kmalloc(buf->dev_priv_size,
677                             DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
678                         if (buf->dev_private == NULL) {
679                                 /* Set count correctly so we free the proper amount. */
680                                 entry->buf_count = count;
681                                 entry->seg_count = count;
682                                 drm_cleanup_buf_error(dev, entry);
683                                 drm_free(temp_pagelist, DRM_MEM_PAGES);
684                                 return ENOMEM;
685                         }
686
687                         DRM_DEBUG("buffer %d @ %p\n",
688                             entry->buf_count, buf->address);
689                 }
690                 byte_count += PAGE_SIZE << page_order;
691         }
692
693         temp_buflist = krealloc(dma->buflist,
694             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
695             DRM_MEM_BUFS, M_NOWAIT);
696         if (temp_buflist == NULL) {
697                 /* Free the entry because it isn't valid */
698                 drm_cleanup_buf_error(dev, entry);
699                 drm_free(temp_pagelist, DRM_MEM_PAGES);
700                 return ENOMEM;
701         }
702         dma->buflist = temp_buflist;
703
704         for (i = 0; i < entry->buf_count; i++) {
705                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
706         }
707
708         /* No allocations failed, so now we can replace the orginal pagelist
709          * with the new one.
710          */
711         drm_free(dma->pagelist, DRM_MEM_PAGES);
712         dma->pagelist = temp_pagelist;
713
714         dma->buf_count += entry->buf_count;
715         dma->seg_count += entry->seg_count;
716         dma->page_count += entry->seg_count << page_order;
717         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
718
719         request->count = entry->buf_count;
720         request->size = size;
721
722         return 0;
723
724 }
725
726 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
727 {
728         drm_device_dma_t *dma = dev->dma;
729         drm_buf_entry_t *entry;
730         drm_buf_t *buf;
731         unsigned long offset;
732         unsigned long agp_offset;
733         int count;
734         int order;
735         int size;
736         int alignment;
737         int page_order;
738         int total;
739         int byte_count;
740         int i;
741         drm_buf_t **temp_buflist;
742
743         count = request->count;
744         order = drm_order(request->size);
745         size = 1 << order;
746
747         alignment  = (request->flags & _DRM_PAGE_ALIGN)
748             ? round_page(size) : size;
749         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
750         total = PAGE_SIZE << page_order;
751
752         byte_count = 0;
753         agp_offset = request->agp_start;
754
755         DRM_DEBUG("count:      %d\n",  count);
756         DRM_DEBUG("order:      %d\n",  order);
757         DRM_DEBUG("size:       %d\n",  size);
758         DRM_DEBUG("agp_offset: %ld\n", agp_offset);
759         DRM_DEBUG("alignment:  %d\n",  alignment);
760         DRM_DEBUG("page_order: %d\n",  page_order);
761         DRM_DEBUG("total:      %d\n",  total);
762
763         entry = &dma->bufs[order];
764
765         entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
766             M_NOWAIT | M_ZERO);
767         if (entry->buflist == NULL)
768                 return ENOMEM;
769
770         entry->buf_size = size;
771         entry->page_order = page_order;
772
773         offset = 0;
774
775         while (entry->buf_count < count) {
776                 buf          = &entry->buflist[entry->buf_count];
777                 buf->idx     = dma->buf_count + entry->buf_count;
778                 buf->total   = alignment;
779                 buf->order   = order;
780                 buf->used    = 0;
781
782                 buf->offset  = (dma->byte_count + offset);
783                 buf->bus_address = agp_offset + offset;
784                 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
785                 buf->next    = NULL;
786                 buf->pending = 0;
787                 buf->file_priv = NULL;
788
789                 buf->dev_priv_size = dev->driver->buf_priv_size;
790                 buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
791                     M_NOWAIT | M_ZERO);
792                 if (buf->dev_private == NULL) {
793                         /* Set count correctly so we free the proper amount. */
794                         entry->buf_count = count;
795                         drm_cleanup_buf_error(dev, entry);
796                         return ENOMEM;
797                 }
798
799                 DRM_DEBUG("buffer %d @ %p\n",
800                     entry->buf_count, buf->address);
801
802                 offset += alignment;
803                 entry->buf_count++;
804                 byte_count += PAGE_SIZE << page_order;
805         }
806
807         DRM_DEBUG("byte_count: %d\n", byte_count);
808
809         temp_buflist = krealloc(dma->buflist,
810             (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
811             DRM_MEM_BUFS, M_NOWAIT);
812         if (temp_buflist == NULL) {
813                 /* Free the entry because it isn't valid */
814                 drm_cleanup_buf_error(dev, entry);
815                 return ENOMEM;
816         }
817         dma->buflist = temp_buflist;
818
819         for (i = 0; i < entry->buf_count; i++) {
820                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
821         }
822
823         dma->buf_count += entry->buf_count;
824         dma->byte_count += byte_count;
825
826         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
827         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
828
829         request->count = entry->buf_count;
830         request->size = size;
831
832         dma->flags = _DRM_DMA_USE_SG;
833
834         return 0;
835 }
836
837 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
838 {
839         int order, ret;
840
841         if (request->count < 0 || request->count > 4096)
842                 return EINVAL;
843         
844         order = drm_order(request->size);
845         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
846                 return EINVAL;
847
848         spin_lock(&dev->dma_lock);
849
850         /* No more allocations after first buffer-using ioctl. */
851         if (dev->buf_use != 0) {
852                 spin_unlock(&dev->dma_lock);
853                 return EBUSY;
854         }
855         /* No more than one allocation per order */
856         if (dev->dma->bufs[order].buf_count != 0) {
857                 spin_unlock(&dev->dma_lock);
858                 return ENOMEM;
859         }
860
861         ret = drm_do_addbufs_agp(dev, request);
862
863         spin_unlock(&dev->dma_lock);
864
865         return ret;
866 }
867
868 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
869 {
870         int order, ret;
871
872         if (!DRM_SUSER(DRM_CURPROC))
873                 return EACCES;
874
875         if (request->count < 0 || request->count > 4096)
876                 return EINVAL;
877
878         order = drm_order(request->size);
879         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
880                 return EINVAL;
881
882         spin_lock(&dev->dma_lock);
883
884         /* No more allocations after first buffer-using ioctl. */
885         if (dev->buf_use != 0) {
886                 spin_unlock(&dev->dma_lock);
887                 return EBUSY;
888         }
889         /* No more than one allocation per order */
890         if (dev->dma->bufs[order].buf_count != 0) {
891                 spin_unlock(&dev->dma_lock);
892                 return ENOMEM;
893         }
894
895         ret = drm_do_addbufs_sg(dev, request);
896
897         spin_unlock(&dev->dma_lock);
898
899         return ret;
900 }
901
902 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
903 {
904         int order, ret;
905
906         if (!DRM_SUSER(DRM_CURPROC))
907                 return EACCES;
908
909         if (request->count < 0 || request->count > 4096)
910                 return EINVAL;
911
912         order = drm_order(request->size);
913         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
914                 return EINVAL;
915
916         spin_lock(&dev->dma_lock);
917
918         /* No more allocations after first buffer-using ioctl. */
919         if (dev->buf_use != 0) {
920                 spin_unlock(&dev->dma_lock);
921                 return EBUSY;
922         }
923         /* No more than one allocation per order */
924         if (dev->dma->bufs[order].buf_count != 0) {
925                 spin_unlock(&dev->dma_lock);
926                 return ENOMEM;
927         }
928
929         ret = drm_do_addbufs_pci(dev, request);
930
931         spin_unlock(&dev->dma_lock);
932
933         return ret;
934 }
935
936 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
937 {
938         struct drm_buf_desc *request = data;
939         int err;
940
941         if (request->flags & _DRM_AGP_BUFFER)
942                 err = drm_addbufs_agp(dev, request);
943         else if (request->flags & _DRM_SG_BUFFER)
944                 err = drm_addbufs_sg(dev, request);
945         else
946                 err = drm_addbufs_pci(dev, request);
947
948         return err;
949 }
950
951 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
952 {
953         drm_device_dma_t *dma = dev->dma;
954         struct drm_buf_info *request = data;
955         int i;
956         int count;
957         int retcode = 0;
958
959         spin_lock(&dev->dma_lock);
960         ++dev->buf_use;         /* Can't allocate more after this call */
961         spin_unlock(&dev->dma_lock);
962
963         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
964                 if (dma->bufs[i].buf_count)
965                         ++count;
966         }
967
968         DRM_DEBUG("count = %d\n", count);
969
970         if (request->count >= count) {
971                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
972                         if (dma->bufs[i].buf_count) {
973                                 struct drm_buf_desc from;
974
975                                 from.count = dma->bufs[i].buf_count;
976                                 from.size = dma->bufs[i].buf_size;
977                                 from.low_mark = dma->bufs[i].freelist.low_mark;
978                                 from.high_mark = dma->bufs[i].freelist.high_mark;
979
980                                 if (DRM_COPY_TO_USER(&request->list[count], &from,
981                                     sizeof(struct drm_buf_desc)) != 0) {
982                                         retcode = EFAULT;
983                                         break;
984                                 }
985
986                                 DRM_DEBUG("%d %d %d %d %d\n",
987                                     i, dma->bufs[i].buf_count,
988                                     dma->bufs[i].buf_size,
989                                     dma->bufs[i].freelist.low_mark,
990                                     dma->bufs[i].freelist.high_mark);
991                                 ++count;
992                         }
993                 }
994         }
995         request->count = count;
996
997         return retcode;
998 }
999
1000 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1001 {
1002         drm_device_dma_t *dma = dev->dma;
1003         struct drm_buf_desc *request = data;
1004         int order;
1005
1006         DRM_DEBUG("%d, %d, %d\n",
1007                   request->size, request->low_mark, request->high_mark);
1008         
1009
1010         order = drm_order(request->size);       
1011         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1012             request->low_mark < 0 || request->high_mark < 0) {
1013                 return EINVAL;
1014         }
1015
1016         spin_lock(&dev->dma_lock);
1017         if (request->low_mark > dma->bufs[order].buf_count ||
1018             request->high_mark > dma->bufs[order].buf_count) {
1019                 spin_unlock(&dev->dma_lock);
1020                 return EINVAL;
1021         }
1022
1023         dma->bufs[order].freelist.low_mark  = request->low_mark;
1024         dma->bufs[order].freelist.high_mark = request->high_mark;
1025         spin_unlock(&dev->dma_lock);
1026
1027         return 0;
1028 }
1029
1030 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1031 {
1032         drm_device_dma_t *dma = dev->dma;
1033         struct drm_buf_free *request = data;
1034         int i;
1035         int idx;
1036         drm_buf_t *buf;
1037         int retcode = 0;
1038
1039         DRM_DEBUG("%d\n", request->count);
1040         
1041         spin_lock(&dev->dma_lock);
1042         for (i = 0; i < request->count; i++) {
1043                 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1044                         retcode = EFAULT;
1045                         break;
1046                 }
1047                 if (idx < 0 || idx >= dma->buf_count) {
1048                         DRM_ERROR("Index %d (of %d max)\n",
1049                             idx, dma->buf_count - 1);
1050                         retcode = EINVAL;
1051                         break;
1052                 }
1053                 buf = dma->buflist[idx];
1054                 if (buf->file_priv != file_priv) {
1055                         DRM_ERROR("Process %d freeing buffer not owned\n",
1056                             DRM_CURRENTPID);
1057                         retcode = EINVAL;
1058                         break;
1059                 }
1060                 drm_free_buffer(dev, buf);
1061         }
1062         spin_unlock(&dev->dma_lock);
1063
1064         return retcode;
1065 }
1066
1067 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1068 {
1069         drm_device_dma_t *dma = dev->dma;
1070         int retcode = 0;
1071         const int zero = 0;
1072         vm_offset_t address;
1073         struct vmspace *vms;
1074         vm_ooffset_t foff;
1075         vm_size_t size;
1076         vm_offset_t vaddr;
1077         struct drm_buf_map *request = data;
1078         int i;
1079
1080         vms = DRM_CURPROC->td_proc->p_vmspace;
1081
1082         spin_lock(&dev->dma_lock);
1083         dev->buf_use++;         /* Can't allocate more after this call */
1084         spin_unlock(&dev->dma_lock);
1085
1086         if (request->count < dma->buf_count)
1087                 goto done;
1088
1089         if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1090             (drm_core_check_feature(dev, DRIVER_SG) &&
1091             (dma->flags & _DRM_DMA_USE_SG))) {
1092                 drm_local_map_t *map = dev->agp_buffer_map;
1093
1094                 if (map == NULL) {
1095                         retcode = EINVAL;
1096                         goto done;
1097                 }
1098                 size = round_page(map->size);
1099                 foff = (unsigned long)map->handle;
1100         } else {
1101                 size = round_page(dma->byte_count),
1102                 foff = 0;
1103         }
1104
1105         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1106         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1107             VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1108             SLIST_FIRST(&dev->devnode->si_hlist), foff);
1109         if (retcode)
1110                 goto done;
1111
1112         request->virtual = (void *)vaddr;
1113
1114         for (i = 0; i < dma->buf_count; i++) {
1115                 if (DRM_COPY_TO_USER(&request->list[i].idx,
1116                     &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1117                         retcode = EFAULT;
1118                         goto done;
1119                 }
1120                 if (DRM_COPY_TO_USER(&request->list[i].total,
1121                     &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1122                         retcode = EFAULT;
1123                         goto done;
1124                 }
1125                 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1126                     sizeof(zero))) {
1127                         retcode = EFAULT;
1128                         goto done;
1129                 }
1130                 address = vaddr + dma->buflist[i]->offset; /* *** */
1131                 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1132                     sizeof(address))) {
1133                         retcode = EFAULT;
1134                         goto done;
1135                 }
1136         }
1137
1138  done:
1139         request->count = dma->buf_count;
1140
1141         DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1142
1143         return retcode;
1144 }
1145
1146 /*
1147  * Compute order.  Can be made faster.
1148  */
1149 int drm_order(unsigned long size)
1150 {
1151         int order;
1152
1153         if (size == 0)
1154                 return 0;
1155
1156         order = flsl(size) - 1;
1157         if (size & ~(1ul << order))
1158                 ++order;
1159
1160         return order;
1161 }