4f438a4f94635a7b4aca8de85f7508b178f4a918
[dragonfly.git] / sys / dev / drm / drm_dma.h
1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  *
31  * $FreeBSD: src/sys/dev/drm/drm_dma.h,v 1.5.2.1 2003/04/26 07:05:28 anholt Exp $
32  * $DragonFly: src/sys/dev/drm/Attic/drm_dma.h,v 1.7 2005/10/12 17:35:50 dillon Exp $
33  */
34
35 #include "dev/drm/drmP.h"
36
37 #ifndef __HAVE_DMA_WAITQUEUE
38 #define __HAVE_DMA_WAITQUEUE    0
39 #endif
40 #ifndef __HAVE_DMA_RECLAIM
41 #define __HAVE_DMA_RECLAIM      0
42 #endif
43 #ifndef __HAVE_SHARED_IRQ
44 #define __HAVE_SHARED_IRQ       0
45 #endif
46
47 #if __HAVE_DMA
48
49 int DRM(dma_setup)( drm_device_t *dev )
50 {
51         int i;
52
53         dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
54         if ( !dev->dma )
55                 return DRM_ERR(ENOMEM);
56
57         memset( dev->dma, 0, sizeof(*dev->dma) );
58
59         for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
60                 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
61
62         return 0;
63 }
64
65 void DRM(dma_takedown)(drm_device_t *dev)
66 {
67         drm_device_dma_t  *dma = dev->dma;
68         int               i, j;
69
70         if (!dma) return;
71
72                                 /* Clear dma buffers */
73         for (i = 0; i <= DRM_MAX_ORDER; i++) {
74                 if (dma->bufs[i].seg_count) {
75                         DRM_DEBUG("order %d: buf_count = %d,"
76                                   " seg_count = %d\n",
77                                   i,
78                                   dma->bufs[i].buf_count,
79                                   dma->bufs[i].seg_count);
80                         for (j = 0; j < dma->bufs[i].seg_count; j++) {
81                                 DRM(free)((void *)dma->bufs[i].seglist[j],
82                                                 dma->bufs[i].buf_size,
83                                                 DRM_MEM_DMA);
84                         }
85                         DRM(free)(dma->bufs[i].seglist,
86                                   dma->bufs[i].seg_count
87                                   * sizeof(*dma->bufs[0].seglist),
88                                   DRM_MEM_SEGS);
89                 }
90                 if(dma->bufs[i].buf_count) {
91                         for(j = 0; j < dma->bufs[i].buf_count; j++) {
92                            if(dma->bufs[i].buflist[j].dev_private) {
93                               DRM(free)(dma->bufs[i].buflist[j].dev_private,
94                                         dma->bufs[i].buflist[j].dev_priv_size,
95                                         DRM_MEM_BUFS);
96                            }
97                         }
98                         DRM(free)(dma->bufs[i].buflist,
99                                   dma->bufs[i].buf_count *
100                                   sizeof(*dma->bufs[0].buflist),
101                                   DRM_MEM_BUFS);
102                 }
103         }
104
105         if (dma->buflist) {
106                 DRM(free)(dma->buflist,
107                           dma->buf_count * sizeof(*dma->buflist),
108                           DRM_MEM_BUFS);
109         }
110
111         if (dma->pagelist) {
112                 DRM(free)(dma->pagelist,
113                           dma->page_count * sizeof(*dma->pagelist),
114                           DRM_MEM_PAGES);
115         }
116         DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
117         dev->dma = NULL;
118 }
119
120
121 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
122 {
123         if (!buf) return;
124
125         buf->pending  = 0;
126         buf->filp     = NULL;
127         buf->used     = 0;
128 }
129
130 #if !__HAVE_DMA_RECLAIM
131 void DRM(reclaim_buffers)(drm_device_t *dev, DRMFILE filp)
132 {
133         drm_device_dma_t *dma = dev->dma;
134         int              i;
135
136         if (!dma) return;
137         for (i = 0; i < dma->buf_count; i++) {
138                 if (dma->buflist[i]->filp == filp) {
139                         switch (dma->buflist[i]->list) {
140                         case DRM_LIST_NONE:
141                                 DRM(free_buffer)(dev, dma->buflist[i]);
142                                 break;
143                         case DRM_LIST_WAIT:
144                                 dma->buflist[i]->list = DRM_LIST_RECLAIM;
145                                 break;
146                         default:
147                                 /* Buffer already on hardware. */
148                                 break;
149                         }
150                 }
151         }
152 }
153 #endif
154
155
156 #if __HAVE_DMA_IRQ
157
158 int DRM(irq_install)( drm_device_t *dev, int irq )
159 {
160         int retcode;
161
162         if ( !irq )
163                 return DRM_ERR(EINVAL);
164
165         DRM_LOCK;
166         if ( dev->irq ) {
167                 DRM_UNLOCK;
168                 return DRM_ERR(EBUSY);
169         }
170         dev->irq = irq;
171         DRM_UNLOCK;
172
173         DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
174
175         dev->context_flag = 0;
176
177         dev->dma->next_buffer = NULL;
178         dev->dma->this_buffer = NULL;
179
180 #if __HAVE_DMA_IRQ_BH
181         TASK_INIT(&dev->task, 0, DRM(dma_immediate_bh), dev);
182 #endif
183
184 #if __HAVE_VBL_IRQ && 0 /* disabled */
185         DRM_SPININIT( dev->vbl_lock, "vblsig" );
186         TAILQ_INIT( &dev->vbl_sig_list );
187 #endif
188
189                                 /* Before installing handler */
190         DRM(driver_irq_preinstall)( dev );
191
192                                 /* Install handler */
193         dev->irqrid = 0;
194 #if defined(__DragonFly__) || defined(__FreeBSD__)
195         dev->irqr = bus_alloc_resource(dev->device, SYS_RES_IRQ, &dev->irqrid,
196                                       0, ~0, 1, RF_SHAREABLE);
197         if (!dev->irqr) {
198 #elif defined(__NetBSD__)
199         if (pci_intr_map(&dev->pa, &dev->ih) != 0) {
200 #endif
201                 DRM_LOCK;
202                 dev->irq = 0;
203                 dev->irqrid = 0;
204                 DRM_UNLOCK;
205                 return ENOENT;
206         }
207         
208 #if defined(__DragonFly__) || defined(__FreeBSD__)
209 #if defined(__DragonFly__) || __FreeBSD_version < 500000
210         retcode = bus_setup_intr(dev->device, dev->irqr, 0,
211                                  DRM(dma_service), dev, &dev->irqh, NULL);
212 #else
213         retcode = bus_setup_intr(dev->device, dev->irqr, INTR_MPSAFE,
214                                  DRM(dma_service), dev, &dev->irqh, NULL);
215 #endif
216         if ( retcode ) {
217 #elif defined(__NetBSD__)
218         dev->irqh = pci_intr_establish(&dev->pa.pa_pc, dev->ih, IPL_TTY,
219                                       (int (*)(DRM_IRQ_ARGS))DRM(dma_service), dev);
220         if ( !dev->irqh ) {
221 #endif
222                 DRM_LOCK;
223 #if defined(__DragonFly__) || defined(__FreeBSD__)
224                 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr);
225 #endif
226                 dev->irq = 0;
227                 dev->irqrid = 0;
228                 DRM_UNLOCK;
229                 return retcode;
230         }
231
232                                 /* After installing handler */
233         DRM(driver_irq_postinstall)( dev );
234
235         return 0;
236 }
237
238 int DRM(irq_uninstall)( drm_device_t *dev )
239 {
240         int irq;
241         int irqrid;
242         
243         DRM_LOCK;
244         irq = dev->irq;
245         irqrid = dev->irqrid;
246         dev->irq = 0;
247         dev->irqrid = 0;
248         DRM_UNLOCK;
249
250         if ( !irq )
251                 return DRM_ERR(EINVAL);
252
253         DRM_DEBUG( "%s: irq=%d\n", __func__, irq );
254
255         DRM(driver_irq_uninstall)( dev );
256
257 #if defined(__DragonFly__) || defined(__FreeBSD__)
258         bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
259         bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
260 #elif defined(__NetBSD__)
261         pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
262 #endif
263
264         return 0;
265 }
266
267 int DRM(control)( DRM_IOCTL_ARGS )
268 {
269         DRM_DEVICE;
270         drm_control_t ctl;
271
272         DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
273
274         switch ( ctl.func ) {
275         case DRM_INST_HANDLER:
276                 return DRM(irq_install)( dev, ctl.irq );
277         case DRM_UNINST_HANDLER:
278                 return DRM(irq_uninstall)( dev );
279         default:
280                 return DRM_ERR(EINVAL);
281         }
282 }
283
284 #if __HAVE_VBL_IRQ
285 int DRM(wait_vblank)( DRM_IOCTL_ARGS )
286 {
287         DRM_DEVICE;
288         drm_wait_vblank_t vblwait;
289         struct timeval now;
290         int ret;
291
292         if (!dev->irq)
293                 return DRM_ERR(EINVAL);
294
295         DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
296                                   sizeof(vblwait) );
297
298         if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
299                 vblwait.request.sequence += atomic_read(&dev->vbl_received);
300                 vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
301         }
302
303         flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
304         if (flags & _DRM_VBLANK_SIGNAL) {
305 #if 0 /* disabled */
306                 drm_vbl_sig_t *vbl_sig = DRM_MALLOC(sizeof(drm_vbl_sig_t));
307                 if (vbl_sig == NULL)
308                         return ENOMEM;
309                 bzero(vbl_sig, sizeof(*vbl_sig));
310                 
311                 vbl_sig->sequence = vblwait.request.sequence;
312                 vbl_sig->signo = vblwait.request.signal;
313                 vbl_sig->pid = DRM_CURRENTPID;
314
315                 vblwait.reply.sequence = atomic_read(&dev->vbl_received);
316                 
317                 DRM_SPINLOCK(&dev->vbl_lock);
318                 TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
319                 DRM_SPINUNLOCK(&dev->vbl_lock);
320                 ret = 0;
321 #endif
322                 ret = EINVAL;
323         } else {
324                 ret = DRM(vblank_wait)(dev, &vblwait.request.sequence);
325                 
326                 microtime(&now);
327                 vblwait.reply.tval_sec = now.tv_sec;
328                 vblwait.reply.tval_usec = now.tv_usec;
329         }
330
331         DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
332                                 sizeof(vblwait) );
333
334         return ret;
335 }
336
337 void DRM(vbl_send_signals)(drm_device_t *dev)
338 {
339 }
340
341 #if 0 /* disabled */
342 void DRM(vbl_send_signals)( drm_device_t *dev )
343 {
344         drm_vbl_sig_t *vbl_sig;
345         unsigned int vbl_seq = atomic_read( &dev->vbl_received );
346         struct proc *p;
347
348         DRM_SPINLOCK(&dev->vbl_lock);
349
350 loop:
351         vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
352         while (vbl_sig != NULL) {
353                 drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
354
355                 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
356                         p = pfind(vbl_sig->pid);
357                         if (p != NULL)
358                                 psignal(p, vbl_sig->signo);
359
360                         TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
361                         DRM_SPINUNLOCK(&dev->vbl_lock);
362                         DRM_FREE(vbl_sig,sizeof(*vbl_sig));
363                         goto loop;
364                 }
365                 vbl_sig = next;
366         }
367
368         DRM_SPINUNLOCK(&dev->vbl_lock);
369 }
370 #endif
371
372 #endif /*  __HAVE_VBL_IRQ */
373
374 #else
375
376 int DRM(control)( DRM_IOCTL_ARGS )
377 {
378         drm_control_t ctl;
379
380         DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
381
382         switch ( ctl.func ) {
383         case DRM_INST_HANDLER:
384         case DRM_UNINST_HANDLER:
385                 return 0;
386         default:
387                 return DRM_ERR(EINVAL);
388         }
389 }
390
391 #endif /* __HAVE_DMA_IRQ */
392
393 #endif /* __HAVE_DMA */
394