Add the DragonFly cvs id and perform general cleanups on cvs/rcs/sccs ids. Most
[dragonfly.git] / sys / dev / drm / drm_bufs.h
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  * $FreeBSD: src/sys/dev/drm/drm_bufs.h,v 1.5.2.1 2003/04/26 07:05:28 anholt Exp $
31  * $DragonFly: src/sys/dev/drm/Attic/drm_bufs.h,v 1.2 2003/06/17 04:28:24 dillon Exp $
32  */
33
34 #include "dev/drm/drmP.h"
35
36 #ifndef __HAVE_PCI_DMA
37 #define __HAVE_PCI_DMA          0
38 #endif
39
40 #ifndef __HAVE_SG
41 #define __HAVE_SG               0
42 #endif
43
44 #ifndef DRIVER_BUF_PRIV_T
45 #define DRIVER_BUF_PRIV_T               u32
46 #endif
47 #ifndef DRIVER_AGP_BUFFERS_MAP
48 #if __HAVE_AGP && __HAVE_DMA
49 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
50 #else
51 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
52 #endif
53 #endif
54
55 /*
56  * Compute order.  Can be made faster.
57  */
58 int DRM(order)( unsigned long size )
59 {
60         int order;
61         unsigned long tmp;
62
63         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
64
65         if ( size & ~(1 << order) )
66                 ++order;
67
68         return order;
69 }
70
71 int DRM(addmap)( DRM_IOCTL_ARGS )
72 {
73         DRM_DEVICE;
74         drm_map_t request;
75         drm_local_map_t *map;
76         drm_map_list_entry_t *list;
77         
78         if (!(dev->flags & (FREAD|FWRITE)))
79                 return DRM_ERR(EACCES); /* Require read/write */
80
81         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
82
83         map = (drm_local_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
84         if ( !map )
85                 return DRM_ERR(ENOMEM);
86
87         map->offset = request.offset;
88         map->size = request.size;
89         map->type = request.type;
90         map->flags = request.flags;
91         map->mtrr   = -1;
92         map->handle = 0;
93         
94         /* Only allow shared memory to be removable since we only keep enough
95          * book keeping information about shared memory to allow for removal
96          * when processes fork.
97          */
98         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
99                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
100                 return DRM_ERR(EINVAL);
101         }
102         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
103                    map->offset, map->size, map->type );
104         if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
105                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
106                 return DRM_ERR(EINVAL);
107         }
108
109         switch ( map->type ) {
110         case _DRM_REGISTERS:
111         case _DRM_FRAME_BUFFER:
112                 if ( map->offset + map->size < map->offset ) {
113                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
114                         return DRM_ERR(EINVAL);
115                 }
116 #if __REALLY_HAVE_MTRR
117                 if ( map->type == _DRM_FRAME_BUFFER ||
118                      (map->flags & _DRM_WRITE_COMBINING) ) {
119 #ifdef __FreeBSD__
120                         int retcode = 0, act;
121                         struct mem_range_desc mrdesc;
122                         mrdesc.mr_base = map->offset;
123                         mrdesc.mr_len = map->size;
124                         mrdesc.mr_flags = MDF_WRITECOMBINE;
125                         act = MEMRANGE_SET_UPDATE;
126                         bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
127                         retcode = mem_range_attr_set(&mrdesc, &act);
128                         map->mtrr=1;
129 #elif defined __NetBSD__
130                         struct mtrr mtrrmap;
131                         int one = 1;
132                         mtrrmap.base = map->offset;
133                         mtrrmap.len = map->size;
134                         mtrrmap.type = MTRR_TYPE_WC;
135                         mtrrmap.flags = MTRR_VALID;
136                         map->mtrr = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL );
137 #endif
138                 }
139 #endif /* __REALLY_HAVE_MTRR */
140                 DRM_IOREMAP(map);
141                 break;
142
143         case _DRM_SHM:
144                 map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA);
145                 DRM_DEBUG( "%ld %d %p\n",
146                            map->size, DRM(order)( map->size ), map->handle );
147                 if ( !map->handle ) {
148                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
149                         return DRM_ERR(ENOMEM);
150                 }
151                 map->offset = (unsigned long)map->handle;
152                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
153                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
154                 }
155                 break;
156 #if __REALLY_HAVE_AGP
157         case _DRM_AGP:
158                 map->offset += dev->agp->base;
159                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
160                 break;
161 #endif
162         case _DRM_SCATTER_GATHER:
163                 if (!dev->sg) {
164                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
165                         return DRM_ERR(EINVAL);
166                 }
167                 map->offset = map->offset + dev->sg->handle;
168                 break;
169
170         default:
171                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
172                 return DRM_ERR(EINVAL);
173         }
174
175         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
176         if(!list) {
177                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
178                 return DRM_ERR(EINVAL);
179         }
180         memset(list, 0, sizeof(*list));
181         list->map = map;
182
183         DRM_LOCK;
184         TAILQ_INSERT_TAIL(dev->maplist, list, link);
185         DRM_UNLOCK;
186
187         request.offset = map->offset;
188         request.size = map->size;
189         request.type = map->type;
190         request.flags = map->flags;
191         request.mtrr   = map->mtrr;
192         request.handle = map->handle;
193
194         if ( request.type != _DRM_SHM ) {
195                 request.handle = (void *)request.offset;
196         }
197
198         DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
199
200         return 0;
201 }
202
203
204 /* Remove a map private from list and deallocate resources if the mapping
205  * isn't in use.
206  */
207
208 int DRM(rmmap)( DRM_IOCTL_ARGS )
209 {
210         DRM_DEVICE;
211         drm_map_list_entry_t *list;
212         drm_local_map_t *map;
213         drm_map_t request;
214         int found_maps = 0;
215
216         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
217
218         DRM_LOCK;
219         TAILQ_FOREACH(list, dev->maplist, link) {
220                 map = list->map;
221                 if(map->handle == request.handle &&
222                    map->flags & _DRM_REMOVABLE) break;
223         }
224
225         /* List has wrapped around to the head pointer, or its empty we didn't
226          * find anything.
227          */
228         if(list == NULL) {
229                 DRM_UNLOCK;
230                 return DRM_ERR(EINVAL);
231         }
232         TAILQ_REMOVE(dev->maplist, list, link);
233         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
234
235
236         if(!found_maps) {
237                 switch (map->type) {
238                 case _DRM_REGISTERS:
239                 case _DRM_FRAME_BUFFER:
240 #if __REALLY_HAVE_MTRR
241                         if (map->mtrr >= 0) {
242                                 int retcode;
243 #ifdef __FreeBSD__
244                                 int act;
245                                 struct mem_range_desc mrdesc;
246                                 mrdesc.mr_base = map->offset;
247                                 mrdesc.mr_len = map->size;
248                                 mrdesc.mr_flags = MDF_WRITECOMBINE;
249                                 act = MEMRANGE_SET_REMOVE;
250                                 bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
251                                 retcode = mem_range_attr_set(&mrdesc, &act);
252 #elif defined __NetBSD__
253                                 struct mtrr mtrrmap;
254                                 int one = 1;
255                                 mtrrmap.base = map->offset;
256                                 mtrrmap.len = map->size;
257                                 mtrrmap.type = 0;
258                                 mtrrmap.flags = 0;
259                                 mtrrmap.owner = p->p_pid;
260                                 retcode = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL);
261                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
262 #endif
263                         }
264 #endif
265                         DRM(ioremapfree)( map );
266                         break;
267                 case _DRM_SHM:
268                         DRM(free)( map->handle, map->size, DRM_MEM_SAREA );
269                         break;
270                 case _DRM_AGP:
271                 case _DRM_SCATTER_GATHER:
272                         break;
273                 }
274                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
275         }
276         DRM_UNLOCK;
277         return 0;
278 }
279
280 #if __HAVE_DMA
281
282
283 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
284 {
285         int i;
286
287         if (entry->seg_count) {
288                 for (i = 0; i < entry->seg_count; i++) {
289                         DRM(free)((void *)entry->seglist[i],
290                                         entry->buf_size,
291                                         DRM_MEM_DMA);
292                 }
293                 DRM(free)(entry->seglist,
294                           entry->seg_count *
295                           sizeof(*entry->seglist),
296                           DRM_MEM_SEGS);
297
298                 entry->seg_count = 0;
299         }
300
301         if(entry->buf_count) {
302                 for(i = 0; i < entry->buf_count; i++) {
303                         if(entry->buflist[i].dev_private) {
304                                 DRM(free)(entry->buflist[i].dev_private,
305                                           entry->buflist[i].dev_priv_size,
306                                           DRM_MEM_BUFS);
307                         }
308                 }
309                 DRM(free)(entry->buflist,
310                           entry->buf_count *
311                           sizeof(*entry->buflist),
312                           DRM_MEM_BUFS);
313
314                 entry->buf_count = 0;
315         }
316 }
317
318 #if __REALLY_HAVE_AGP
319 static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
320 {
321         drm_device_dma_t *dma = dev->dma;
322         drm_buf_entry_t *entry;
323         drm_buf_t *buf;
324         unsigned long offset;
325         unsigned long agp_offset;
326         int count;
327         int order;
328         int size;
329         int alignment;
330         int page_order;
331         int total;
332         int byte_count;
333         int i;
334         drm_buf_t **temp_buflist;
335
336         count = request->count;
337         order = DRM(order)(request->size);
338         size = 1 << order;
339
340         alignment  = (request->flags & _DRM_PAGE_ALIGN)
341                 ? round_page(size) : size;
342         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
343         total = PAGE_SIZE << page_order;
344
345         byte_count = 0;
346         agp_offset = dev->agp->base + request->agp_start;
347
348         DRM_DEBUG( "count:      %d\n",  count );
349         DRM_DEBUG( "order:      %d\n",  order );
350         DRM_DEBUG( "size:       %d\n",  size );
351         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
352         DRM_DEBUG( "alignment:  %d\n",  alignment );
353         DRM_DEBUG( "page_order: %d\n",  page_order );
354         DRM_DEBUG( "total:      %d\n",  total );
355
356         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
357                 return DRM_ERR(EINVAL);
358
359         DRM_LOCK;
360         entry = &dma->bufs[order];
361         if ( entry->buf_count ) {
362                 DRM_UNLOCK;
363                 return DRM_ERR(ENOMEM); /* May only call once for each order */
364         }
365
366         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
367                                     DRM_MEM_BUFS );
368         if ( !entry->buflist ) {
369                 DRM_UNLOCK;
370                 return DRM_ERR(ENOMEM);
371         }
372         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
373
374         entry->buf_size = size;
375         entry->page_order = page_order;
376
377         offset = 0;
378
379         while ( entry->buf_count < count ) {
380                 buf          = &entry->buflist[entry->buf_count];
381                 buf->idx     = dma->buf_count + entry->buf_count;
382                 buf->total   = alignment;
383                 buf->order   = order;
384                 buf->used    = 0;
385
386                 buf->offset  = (dma->byte_count + offset);
387                 buf->bus_address = agp_offset + offset;
388                 buf->address = (void *)(agp_offset + offset);
389                 buf->next    = NULL;
390                 buf->pending = 0;
391                 buf->filp    = NULL;
392
393                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
394                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
395                                                DRM_MEM_BUFS );
396                 if(!buf->dev_private) {
397                         /* Set count correctly so we free the proper amount. */
398                         entry->buf_count = count;
399                         DRM(cleanup_buf_error)(entry);
400                 }
401                 memset( buf->dev_private, 0, buf->dev_priv_size );
402
403                 offset += alignment;
404                 entry->buf_count++;
405                 byte_count += PAGE_SIZE << page_order;
406         }
407
408         DRM_DEBUG( "byte_count: %d\n", byte_count );
409
410         temp_buflist = DRM(realloc)( dma->buflist,
411                                      dma->buf_count * sizeof(*dma->buflist),
412                                      (dma->buf_count + entry->buf_count)
413                                      * sizeof(*dma->buflist),
414                                      DRM_MEM_BUFS );
415         if(!temp_buflist) {
416                 /* Free the entry because it isn't valid */
417                 DRM(cleanup_buf_error)(entry);
418                 DRM_UNLOCK;
419                 return DRM_ERR(ENOMEM);
420         }
421         dma->buflist = temp_buflist;
422
423         for ( i = 0 ; i < entry->buf_count ; i++ ) {
424                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
425         }
426
427         dma->buf_count += entry->buf_count;
428         dma->byte_count += byte_count;
429
430         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
431         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
432
433         DRM_UNLOCK;
434
435         request->count = entry->buf_count;
436         request->size = size;
437
438         dma->flags = _DRM_DMA_USE_AGP;
439
440         return 0;
441 }
442 #endif /* __REALLY_HAVE_AGP */
443
444 #if __HAVE_PCI_DMA
445 static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
446 {
447         drm_device_dma_t *dma = dev->dma;
448         int count;
449         int order;
450         int size;
451         int total;
452         int page_order;
453         drm_buf_entry_t *entry;
454         unsigned long page;
455         drm_buf_t *buf;
456         int alignment;
457         unsigned long offset;
458         int i;
459         int byte_count;
460         int page_count;
461         unsigned long *temp_pagelist;
462         drm_buf_t **temp_buflist;
463
464         count = request->count;
465         order = DRM(order)(request->size);
466         size = 1 << order;
467
468         DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
469                    request->count, request->size, size, order );
470
471         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
472                 return DRM_ERR(EINVAL);
473
474         alignment = (request->flags & _DRM_PAGE_ALIGN)
475                 ? round_page(size) : size;
476         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
477         total = PAGE_SIZE << page_order;
478
479         DRM_LOCK;
480         entry = &dma->bufs[order];
481         if ( entry->buf_count ) {
482                 DRM_UNLOCK;
483                 return DRM_ERR(ENOMEM); /* May only call once for each order */
484         }
485
486         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
487                                     DRM_MEM_BUFS );
488         if ( !entry->buflist ) {
489                 DRM_UNLOCK;
490                 return DRM_ERR(ENOMEM);
491         }
492         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
493
494         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
495                                     DRM_MEM_SEGS );
496         if ( !entry->seglist ) {
497                 DRM(free)( entry->buflist,
498                           count * sizeof(*entry->buflist),
499                           DRM_MEM_BUFS );
500                 DRM_UNLOCK;
501                 return DRM_ERR(ENOMEM);
502         }
503         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
504
505         temp_pagelist = DRM(realloc)( dma->pagelist,
506                                       dma->page_count * sizeof(*dma->pagelist),
507                                       (dma->page_count + (count << page_order))
508                                       * sizeof(*dma->pagelist),
509                                       DRM_MEM_PAGES );
510         if(!temp_pagelist) {
511                 DRM(free)( entry->buflist,
512                            count * sizeof(*entry->buflist),
513                            DRM_MEM_BUFS );
514                 DRM(free)( entry->seglist,
515                            count * sizeof(*entry->seglist),
516                            DRM_MEM_SEGS );
517                 DRM_UNLOCK;
518                 return DRM_ERR(ENOMEM);
519         }
520
521         dma->pagelist = temp_pagelist;
522         DRM_DEBUG( "pagelist: %d entries\n",
523                    dma->page_count + (count << page_order) );
524
525         entry->buf_size = size;
526         entry->page_order = page_order;
527         byte_count = 0;
528         page_count = 0;
529
530         while ( entry->buf_count < count ) {
531                 page = (unsigned long)DRM(alloc)( size, DRM_MEM_DMA );
532                 if ( !page ) break;
533                 entry->seglist[entry->seg_count++] = page;
534                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
535                         DRM_DEBUG( "page %d @ 0x%08lx\n",
536                                    dma->page_count + page_count,
537                                    page + PAGE_SIZE * i );
538                         dma->pagelist[dma->page_count + page_count++]
539                                 = page + PAGE_SIZE * i;
540                 }
541                 for ( offset = 0 ;
542                       offset + size <= total && entry->buf_count < count ;
543                       offset += alignment, ++entry->buf_count ) {
544                         buf          = &entry->buflist[entry->buf_count];
545                         buf->idx     = dma->buf_count + entry->buf_count;
546                         buf->total   = alignment;
547                         buf->order   = order;
548                         buf->used    = 0;
549                         buf->offset  = (dma->byte_count + byte_count + offset);
550                         buf->address = (void *)(page + offset);
551                         buf->next    = NULL;
552                         buf->pending = 0;
553                         buf->filp    = NULL;
554                         DRM_DEBUG( "buffer %d @ %p\n",
555                                    entry->buf_count, buf->address );
556                 }
557                 byte_count += PAGE_SIZE << page_order;
558         }
559
560         temp_buflist = DRM(realloc)( dma->buflist,
561                                      dma->buf_count * sizeof(*dma->buflist),
562                                      (dma->buf_count + entry->buf_count)
563                                      * sizeof(*dma->buflist),
564                                      DRM_MEM_BUFS );
565         if(!temp_buflist) {
566                 /* Free the entry because it isn't valid */
567                 DRM(cleanup_buf_error)(entry);
568                 DRM_UNLOCK;
569                 return DRM_ERR(ENOMEM);
570         }
571         dma->buflist = temp_buflist;
572
573         for ( i = 0 ; i < entry->buf_count ; i++ ) {
574                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
575         }
576
577         dma->buf_count += entry->buf_count;
578         dma->seg_count += entry->seg_count;
579         dma->page_count += entry->seg_count << page_order;
580         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
581
582         DRM_UNLOCK;
583
584         request->count = entry->buf_count;
585         request->size = size;
586
587         return 0;
588
589 }
590 #endif /* __HAVE_PCI_DMA */
591
592 #if __REALLY_HAVE_SG
593 static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
594 {
595         drm_device_dma_t *dma = dev->dma;
596         drm_buf_entry_t *entry;
597         drm_buf_t *buf;
598         unsigned long offset;
599         unsigned long agp_offset;
600         int count;
601         int order;
602         int size;
603         int alignment;
604         int page_order;
605         int total;
606         int byte_count;
607         int i;
608         drm_buf_t **temp_buflist;
609
610         count = request->count;
611         order = DRM(order)(request->size);
612         size = 1 << order;
613
614         alignment  = (request->flags & _DRM_PAGE_ALIGN)
615                 ? round_page(size) : size;
616         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
617         total = PAGE_SIZE << page_order;
618
619         byte_count = 0;
620         agp_offset = request->agp_start;
621
622         DRM_DEBUG( "count:      %d\n",  count );
623         DRM_DEBUG( "order:      %d\n",  order );
624         DRM_DEBUG( "size:       %d\n",  size );
625         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
626         DRM_DEBUG( "alignment:  %d\n",  alignment );
627         DRM_DEBUG( "page_order: %d\n",  page_order );
628         DRM_DEBUG( "total:      %d\n",  total );
629
630         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
631                 return DRM_ERR(EINVAL);
632
633         DRM_LOCK;
634         entry = &dma->bufs[order];
635         if ( entry->buf_count ) {
636                 DRM_UNLOCK;
637                 return DRM_ERR(ENOMEM); /* May only call once for each order */
638         }
639
640         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
641                                      DRM_MEM_BUFS );
642         if ( !entry->buflist ) {
643                 DRM_UNLOCK;
644                 return DRM_ERR(ENOMEM);
645         }
646         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
647
648         entry->buf_size = size;
649         entry->page_order = page_order;
650
651         offset = 0;
652
653         while ( entry->buf_count < count ) {
654                 buf          = &entry->buflist[entry->buf_count];
655                 buf->idx     = dma->buf_count + entry->buf_count;
656                 buf->total   = alignment;
657                 buf->order   = order;
658                 buf->used    = 0;
659
660                 buf->offset  = (dma->byte_count + offset);
661                 buf->bus_address = agp_offset + offset;
662                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
663                 buf->next    = NULL;
664                 buf->pending = 0;
665                 buf->filp    = NULL;
666
667                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
668                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
669                                                DRM_MEM_BUFS );
670                 if(!buf->dev_private) {
671                         /* Set count correctly so we free the proper amount. */
672                         entry->buf_count = count;
673                         DRM(cleanup_buf_error)(entry);
674                         DRM_UNLOCK;
675                         return DRM_ERR(ENOMEM);
676                 }
677
678                 memset( buf->dev_private, 0, buf->dev_priv_size );
679
680                 DRM_DEBUG( "buffer %d @ %p\n",
681                            entry->buf_count, buf->address );
682
683                 offset += alignment;
684                 entry->buf_count++;
685                 byte_count += PAGE_SIZE << page_order;
686         }
687
688         DRM_DEBUG( "byte_count: %d\n", byte_count );
689
690         temp_buflist = DRM(realloc)( dma->buflist,
691                                      dma->buf_count * sizeof(*dma->buflist),
692                                      (dma->buf_count + entry->buf_count)
693                                      * sizeof(*dma->buflist),
694                                      DRM_MEM_BUFS );
695         if(!temp_buflist) {
696                 /* Free the entry because it isn't valid */
697                 DRM(cleanup_buf_error)(entry);
698                 DRM_UNLOCK;
699                 return DRM_ERR(ENOMEM);
700         }
701         dma->buflist = temp_buflist;
702
703         for ( i = 0 ; i < entry->buf_count ; i++ ) {
704                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
705         }
706
707         dma->buf_count += entry->buf_count;
708         dma->byte_count += byte_count;
709
710         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
711         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
712
713         DRM_UNLOCK;
714
715         request->count = entry->buf_count;
716         request->size = size;
717
718         dma->flags = _DRM_DMA_USE_SG;
719
720         return 0;
721 }
722 #endif /* __REALLY_HAVE_SG */
723
724 int DRM(addbufs)( DRM_IOCTL_ARGS )
725 {
726         DRM_DEVICE;
727         drm_buf_desc_t request;
728         int err;
729
730         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
731
732         if (dev->dma == NULL)
733                 return DRM_ERR(EINVAL);
734
735         if (request.count < 0 || request.count > 4096)
736                 return DRM_ERR(EINVAL);
737
738         DRM_SPINLOCK(&dev->count_lock);
739         if (dev->buf_use) {
740                 DRM_SPINUNLOCK(&dev->count_lock);
741                 return DRM_ERR(EBUSY);
742         }
743         /* dev->buf_alloc acts as a lock to prevent infobufs/mapbufs from
744          * trying to read from the dma->bufs while buffers are being allocated */
745         dev->buf_alloc++;
746         DRM_SPINUNLOCK(&dev->count_lock);
747
748
749 #if __REALLY_HAVE_AGP
750         if ( request.flags & _DRM_AGP_BUFFER )
751                 err = DRM(addbufs_agp)(dev, &request);
752         else
753 #endif
754 #if __REALLY_HAVE_SG
755         if ( request.flags & _DRM_SG_BUFFER )
756                 err = DRM(addbufs_sg)(dev, &request);
757         else
758 #endif
759 #if __HAVE_PCI_DMA
760                 err = DRM(addbufs_pci)(dev, &request);
761 #else
762                 err = DRM_ERR(EINVAL);
763 #endif
764
765         DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
766
767         DRM_SPINLOCK(&dev->count_lock);
768         dev->buf_alloc--;
769         DRM_SPINUNLOCK(&dev->count_lock);
770
771         return err;
772 }
773
774 int DRM(infobufs)( DRM_IOCTL_ARGS )
775 {
776         DRM_DEVICE;
777         drm_device_dma_t *dma = dev->dma;
778         drm_buf_info_t request;
779         int i;
780         int count;
781
782         if ( !dma ) return DRM_ERR(EINVAL);
783
784         DRM_SPINLOCK( &dev->count_lock );
785         if (dev->buf_alloc != 0) {
786                 DRM_SPINUNLOCK( &dev->count_lock );
787                 return DRM_ERR(EBUSY);
788         }
789         ++dev->buf_use;         /* Can't allocate more after this call */
790         DRM_SPINUNLOCK( &dev->count_lock );
791
792         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
793
794         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
795                 if ( dma->bufs[i].buf_count ) ++count;
796         }
797
798         DRM_DEBUG( "count = %d\n", count );
799
800         if ( request.count >= count ) {
801                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
802                         if ( dma->bufs[i].buf_count ) {
803                                 drm_buf_desc_t from;
804
805                                 from.count = dma->bufs[i].buf_count;
806                                 from.size = dma->bufs[i].buf_size;
807                                 from.low_mark = dma->bufs[i].freelist.low_mark;
808                                 from.high_mark = dma->bufs[i].freelist.high_mark;
809
810                                 if (DRM_COPY_TO_USER(&request.list[count], &from,
811                                     sizeof(drm_buf_desc_t)) != 0)
812                                         return DRM_ERR(EFAULT);
813
814                                 DRM_DEBUG( "%d %d %d %d %d\n",
815                                            i,
816                                            dma->bufs[i].buf_count,
817                                            dma->bufs[i].buf_size,
818                                            dma->bufs[i].freelist.low_mark,
819                                            dma->bufs[i].freelist.high_mark );
820                                 ++count;
821                         }
822                 }
823         }
824         request.count = count;
825
826         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
827
828         return 0;
829 }
830
831 int DRM(markbufs)( DRM_IOCTL_ARGS )
832 {
833         DRM_DEVICE;
834         drm_device_dma_t *dma = dev->dma;
835         drm_buf_desc_t request;
836         int order;
837         drm_buf_entry_t *entry;
838
839         if ( !dma ) return DRM_ERR(EINVAL);
840
841         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
842
843         DRM_DEBUG( "%d, %d, %d\n",
844                    request.size, request.low_mark, request.high_mark );
845         order = DRM(order)( request.size );
846         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
847                 return DRM_ERR(EINVAL);
848         entry = &dma->bufs[order];
849
850         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
851                 return DRM_ERR(EINVAL);
852         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
853                 return DRM_ERR(EINVAL);
854
855         entry->freelist.low_mark  = request.low_mark;
856         entry->freelist.high_mark = request.high_mark;
857
858         return 0;
859 }
860
861 int DRM(freebufs)( DRM_IOCTL_ARGS )
862 {
863         DRM_DEVICE;
864         drm_device_dma_t *dma = dev->dma;
865         drm_buf_free_t request;
866         int i;
867         int idx;
868         drm_buf_t *buf;
869
870         if ( !dma ) return DRM_ERR(EINVAL);
871
872         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
873
874         DRM_DEBUG( "%d\n", request.count );
875         for ( i = 0 ; i < request.count ; i++ ) {
876                 if ( DRM_COPY_FROM_USER( &idx,
877                                      &request.list[i],
878                                      sizeof(idx) ) )
879                         return DRM_ERR(EFAULT);
880                 if ( idx < 0 || idx >= dma->buf_count ) {
881                         DRM_ERROR( "Index %d (of %d max)\n",
882                                    idx, dma->buf_count - 1 );
883                         return DRM_ERR(EINVAL);
884                 }
885                 buf = dma->buflist[idx];
886                 if ( buf->filp != filp ) {
887                         DRM_ERROR("Process %d freeing buffer not owned\n",
888                                    DRM_CURRENTPID);
889                         return DRM_ERR(EINVAL);
890                 }
891                 DRM(free_buffer)( dev, buf );
892         }
893
894         return 0;
895 }
896
897 int DRM(mapbufs)( DRM_IOCTL_ARGS )
898 {
899         DRM_DEVICE;
900         drm_device_dma_t *dma = dev->dma;
901         int retcode = 0;
902         const int zero = 0;
903         vm_offset_t virtual, address;
904 #ifdef __FreeBSD__
905 #if __FreeBSD_version >= 500000
906         struct vmspace *vms = p->td_proc->p_vmspace;
907 #else
908         struct vmspace *vms = p->p_vmspace;
909 #endif
910 #endif /* __FreeBSD__ */
911 #ifdef __NetBSD__
912         struct vnode *vn;
913         struct vmspace *vms = p->p_vmspace;
914 #endif /* __NetBSD__ */
915
916         drm_buf_map_t request;
917         int i;
918
919         if ( !dma ) return DRM_ERR(EINVAL);
920
921         DRM_SPINLOCK( &dev->count_lock );
922         if (dev->buf_alloc != 0) {
923                 DRM_SPINUNLOCK( &dev->count_lock );
924                 return DRM_ERR(EBUSY);
925         }
926         dev->buf_use++;         /* Can't allocate more after this call */
927         DRM_SPINUNLOCK( &dev->count_lock );
928
929         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
930
931 #ifdef __NetBSD__
932         if(!vfinddev(kdev, VCHR, &vn))
933                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
934 #endif /* __NetBSD__ */
935
936         if ( request.count >= dma->buf_count ) {
937                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
938                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
939                         drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
940
941                         if ( !map ) {
942                                 retcode = EINVAL;
943                                 goto done;
944                         }
945
946 #ifdef __FreeBSD__
947                         virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
948                         retcode = vm_mmap(&vms->vm_map,
949                                           &virtual,
950                                           round_page(map->size),
951                                           PROT_READ|PROT_WRITE, VM_PROT_ALL,
952                                           MAP_SHARED,
953                                           SLIST_FIRST(&kdev->si_hlist),
954                                           (unsigned long)map->offset );
955 #elif defined(__NetBSD__)
956                         virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
957                         retcode = uvm_mmap(&vms->vm_map,
958                                            (vaddr_t *)&virtual,
959                                            round_page(map->size),
960                                            UVM_PROT_READ | UVM_PROT_WRITE,
961                                            UVM_PROT_ALL, MAP_SHARED,
962                                            &vn->v_uobj, map->offset,
963                                            p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
964 #endif /* __NetBSD__ */
965                 } else {
966 #ifdef __FreeBSD__
967                         virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
968                         retcode = vm_mmap(&vms->vm_map,
969                                           &virtual,
970                                           round_page(dma->byte_count),
971                                           PROT_READ|PROT_WRITE, VM_PROT_ALL,
972                                           MAP_SHARED,
973                                           SLIST_FIRST(&kdev->si_hlist),
974                                           0);
975 #elif defined(__NetBSD__)
976                         virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
977                         retcode = uvm_mmap(&vms->vm_map,
978                                            (vaddr_t *)&virtual,
979                                            round_page(dma->byte_count),
980                                            UVM_PROT_READ | UVM_PROT_WRITE,
981                                            UVM_PROT_ALL, MAP_SHARED,
982                                            &vn->v_uobj, 0,
983                                            p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
984 #endif /* __NetBSD__ */
985                 }
986                 if (retcode)
987                         goto done;
988                 request.virtual = (void *)virtual;
989
990                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
991                         if ( DRM_COPY_TO_USER( &request.list[i].idx,
992                                            &dma->buflist[i]->idx,
993                                            sizeof(request.list[0].idx) ) ) {
994                                 retcode = EFAULT;
995                                 goto done;
996                         }
997                         if ( DRM_COPY_TO_USER( &request.list[i].total,
998                                            &dma->buflist[i]->total,
999                                            sizeof(request.list[0].total) ) ) {
1000                                 retcode = EFAULT;
1001                                 goto done;
1002                         }
1003                         if ( DRM_COPY_TO_USER( &request.list[i].used,
1004                                            &zero,
1005                                            sizeof(zero) ) ) {
1006                                 retcode = EFAULT;
1007                                 goto done;
1008                         }
1009                         address = virtual + dma->buflist[i]->offset; /* *** */
1010                         if ( DRM_COPY_TO_USER( &request.list[i].address,
1011                                            &address,
1012                                            sizeof(address) ) ) {
1013                                 retcode = EFAULT;
1014                                 goto done;
1015                         }
1016                 }
1017         }
1018  done:
1019         request.count = dma->buf_count;
1020
1021         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1022
1023         DRM_COPY_TO_USER_IOCTL( (drm_buf_map_t *)data, request, sizeof(request) );
1024
1025         return DRM_ERR(retcode);
1026 }
1027
1028 #endif /* __HAVE_DMA */
1029