Merge from vendor branch LESS:
[dragonfly.git] / sys / dev / drm / mach64_dma.c
1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3  * \file mach64_dma.c
4  * DMA support for mach64 (Rage Pro) driver
5  *
6  * \author Gareth Hughes <gareth@valinux.com>
7  * \author Frank C. Earl <fearl@airmail.net>
8  * \author Leif Delgass <ldelgass@retinalburn.net>
9  * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
10  */
11
12 /*
13  * Copyright 2000 Gareth Hughes
14  * Copyright 2002 Frank C. Earl
15  * Copyright 2002-2003 Leif Delgass
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35  *
36  * $DragonFly: src/sys/dev/drm/mach64_dma.c,v 1.1 2008/04/05 18:12:29 hasso Exp $
37  */
38
39 #include "drmP.h"
40 #include "drm.h"
41 #include "mach64_drm.h"
42 #include "mach64_drv.h"
43
44 /*******************************************************************/
45 /** \name Engine, FIFO control */
46 /*@{*/
47
48 /**
49  * Waits for free entries in the FIFO.
50  *
51  * \note Most writes to Mach64 registers are automatically routed through
52  * command FIFO which is 16 entry deep. Prior to writing to any draw engine
53  * register one has to ensure that enough FIFO entries are available by calling
54  * this function.  Failure to do so may cause the engine to lock.
55  *
56  * \param dev_priv pointer to device private data structure.
57  * \param entries number of free entries in the FIFO to wait for.
58  *
59  * \returns zero on success, or -EBUSY if the timeout (specificed by
60  * drm_mach64_private::usec_timeout) occurs.
61  */
62 int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
63 {
64         int slots = 0, i;
65
66         for (i = 0; i < dev_priv->usec_timeout; i++) {
67                 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
68                 if (slots <= (0x8000 >> entries))
69                         return 0;
70                 DRM_UDELAY(1);
71         }
72
73         DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
74         return -EBUSY;
75 }
76
77 /**
78  * Wait for the draw engine to be idle.
79  */
80 int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
81 {
82         int i, ret;
83
84         ret = mach64_do_wait_for_fifo(dev_priv, 16);
85         if (ret < 0)
86                 return ret;
87
88         for (i = 0; i < dev_priv->usec_timeout; i++) {
89                 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
90                         return 0;
91                 DRM_UDELAY(1);
92         }
93
94         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
95         mach64_dump_ring_info(dev_priv);
96         return -EBUSY;
97 }
98
99 /**
100  * Wait for free entries in the ring buffer.
101  *
102  * The Mach64 bus master can be configured to act as a virtual FIFO, using a
103  * circular buffer (commonly referred as "ring buffer" in other drivers) with
104  * pointers to engine commands. This allows the CPU to do other things while
105  * the graphics engine is busy, i.e., DMA mode.
106  *
107  * This function should be called before writing new entries to the ring
108  * buffer.
109  *
110  * \param dev_priv pointer to device private data structure.
111  * \param n number of free entries in the ring buffer to wait for.
112  *
113  * \returns zero on success, or -EBUSY if the timeout (specificed by
114  * drm_mach64_private_t::usec_timeout) occurs.
115  *
116  * \sa mach64_dump_ring_info()
117  */
118 int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
119 {
120         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
121         int i;
122
123         for (i = 0; i < dev_priv->usec_timeout; i++) {
124                 mach64_update_ring_snapshot(dev_priv);
125                 if (ring->space >= n) {
126                         if (i > 0)
127                                 DRM_DEBUG("%d usecs\n", i);
128                         return 0;
129                 }
130                 DRM_UDELAY(1);
131         }
132
133         /* FIXME: This is being ignored... */
134         DRM_ERROR("failed!\n");
135         mach64_dump_ring_info(dev_priv);
136         return -EBUSY;
137 }
138
139 /**
140  * Wait until all DMA requests have been processed...
141  *
142  * \sa mach64_wait_ring()
143  */
144 static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
145 {
146         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
147         u32 head;
148         int i;
149
150         head = ring->head;
151         i = 0;
152         while (i < dev_priv->usec_timeout) {
153                 mach64_update_ring_snapshot(dev_priv);
154                 if (ring->head == ring->tail &&
155                     !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
156                         if (i > 0)
157                                 DRM_DEBUG("%d usecs\n", i);
158                         return 0;
159                 }
160                 if (ring->head == head) {
161                         ++i;
162                 } else {
163                         head = ring->head;
164                         i = 0;
165                 }
166                 DRM_UDELAY(1);
167         }
168
169         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
170         mach64_dump_ring_info(dev_priv);
171         return -EBUSY;
172 }
173
174 /**
175  * Reset the the ring buffer descriptors.
176  *
177  * \sa mach64_do_engine_reset()
178  */
179 static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
180 {
181         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
182
183         mach64_do_release_used_buffers(dev_priv);
184         ring->head_addr = ring->start_addr;
185         ring->head = ring->tail = 0;
186         ring->space = ring->size;
187
188         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
189                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
190
191         dev_priv->ring_running = 0;
192 }
193
194 /**
195  * Ensure the all the queued commands will be processed.
196  */
197 int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
198 {
199         /* FIXME: It's not necessary to wait for idle when flushing
200          * we just need to ensure the ring will be completely processed
201          * in finite time without another ioctl
202          */
203         return mach64_ring_idle(dev_priv);
204 }
205
206 /**
207  * Stop all DMA activity.
208  */
209 int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
210 {
211         int ret;
212
213         /* wait for completion */
214         if ((ret = mach64_ring_idle(dev_priv)) < 0) {
215                 DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
216                           MACH64_READ(MACH64_BM_GUI_TABLE),
217                           dev_priv->ring.tail);
218                 return ret;
219         }
220
221         mach64_ring_stop(dev_priv);
222
223         /* clean up after pass */
224         mach64_do_release_used_buffers(dev_priv);
225         return 0;
226 }
227
228 /**
229  * Reset the engine.  This will stop the DMA if it is running.
230  */
231 int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
232 {
233         u32 tmp;
234
235         DRM_DEBUG("\n");
236
237         /* Kill off any outstanding DMA transfers.
238          */
239         tmp = MACH64_READ(MACH64_BUS_CNTL);
240         MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
241
242         /* Reset the GUI engine (high to low transition).
243          */
244         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
245         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
246         /* Enable the GUI engine
247          */
248         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
249         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
250
251         /* ensure engine is not locked up by clearing any FIFO or HOST errors
252          */
253         tmp = MACH64_READ(MACH64_BUS_CNTL);
254         MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
255
256         /* Once GUI engine is restored, disable bus mastering */
257         MACH64_WRITE(MACH64_SRC_CNTL, 0);
258
259         /* Reset descriptor ring */
260         mach64_ring_reset(dev_priv);
261
262         return 0;
263 }
264
265 /*@}*/
266
267
268 /*******************************************************************/
269 /** \name Debugging output */
270 /*@{*/
271
272 /**
273  * Dump engine registers values.
274  */
275 void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
276 {
277         DRM_INFO("\n");
278         if (!dev_priv->is_pci) {
279                 DRM_INFO("           AGP_BASE = 0x%08x\n",
280                          MACH64_READ(MACH64_AGP_BASE));
281                 DRM_INFO("           AGP_CNTL = 0x%08x\n",
282                          MACH64_READ(MACH64_AGP_CNTL));
283         }
284         DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
285                  MACH64_READ(MACH64_ALPHA_TST_CNTL));
286         DRM_INFO("\n");
287         DRM_INFO("         BM_COMMAND = 0x%08x\n",
288                  MACH64_READ(MACH64_BM_COMMAND));
289         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
290                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
291         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
292                  MACH64_READ(MACH64_BM_GUI_TABLE));
293         DRM_INFO("          BM_STATUS = 0x%08x\n",
294                  MACH64_READ(MACH64_BM_STATUS));
295         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
296                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
297         DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
298                  MACH64_READ(MACH64_BM_SYSTEM_TABLE));
299         DRM_INFO("           BUS_CNTL = 0x%08x\n",
300                  MACH64_READ(MACH64_BUS_CNTL));
301         DRM_INFO("\n");
302         /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
303         DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
304                  MACH64_READ(MACH64_CLR_CMP_CLR));
305         DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
306                  MACH64_READ(MACH64_CLR_CMP_CNTL));
307         /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
308         DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
309                  MACH64_READ(MACH64_CONFIG_CHIP_ID));
310         DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
311                  MACH64_READ(MACH64_CONFIG_CNTL));
312         DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
313                  MACH64_READ(MACH64_CONFIG_STAT0));
314         DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
315                  MACH64_READ(MACH64_CONFIG_STAT1));
316         DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
317                  MACH64_READ(MACH64_CONFIG_STAT2));
318         DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
319         DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
320                  MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
321         DRM_INFO("\n");
322         /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
323         /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
324         DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
325                  MACH64_READ(MACH64_DP_BKGD_CLR));
326         DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
327                  MACH64_READ(MACH64_DP_FRGD_CLR));
328         DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
329         DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
330                  MACH64_READ(MACH64_DP_PIX_WIDTH));
331         DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
332         DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
333                  MACH64_READ(MACH64_DP_WRITE_MASK));
334         DRM_INFO("         DSP_CONFIG = 0x%08x\n",
335                  MACH64_READ(MACH64_DSP_CONFIG));
336         DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
337                  MACH64_READ(MACH64_DSP_ON_OFF));
338         DRM_INFO("           DST_CNTL = 0x%08x\n",
339                  MACH64_READ(MACH64_DST_CNTL));
340         DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
341                  MACH64_READ(MACH64_DST_OFF_PITCH));
342         DRM_INFO("\n");
343         /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
344         DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
345                  MACH64_READ(MACH64_EXT_MEM_CNTL));
346         DRM_INFO("\n");
347         DRM_INFO("          FIFO_STAT = 0x%08x\n",
348                  MACH64_READ(MACH64_FIFO_STAT));
349         DRM_INFO("\n");
350         DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
351                  MACH64_READ(MACH64_GEN_TEST_CNTL));
352         /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
353         DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
354                  MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
355         DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
356                  MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
357         DRM_INFO("           GUI_CNTL = 0x%08x\n",
358                  MACH64_READ(MACH64_GUI_CNTL));
359         DRM_INFO("           GUI_STAT = 0x%08x\n",
360                  MACH64_READ(MACH64_GUI_STAT));
361         DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
362                  MACH64_READ(MACH64_GUI_TRAJ_CNTL));
363         DRM_INFO("\n");
364         DRM_INFO("          HOST_CNTL = 0x%08x\n",
365                  MACH64_READ(MACH64_HOST_CNTL));
366         DRM_INFO("           HW_DEBUG = 0x%08x\n",
367                  MACH64_READ(MACH64_HW_DEBUG));
368         DRM_INFO("\n");
369         DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
370                  MACH64_READ(MACH64_MEM_ADDR_CONFIG));
371         DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
372                  MACH64_READ(MACH64_MEM_BUF_CNTL));
373         DRM_INFO("\n");
374         DRM_INFO("           PAT_REG0 = 0x%08x\n",
375                  MACH64_READ(MACH64_PAT_REG0));
376         DRM_INFO("           PAT_REG1 = 0x%08x\n",
377                  MACH64_READ(MACH64_PAT_REG1));
378         DRM_INFO("\n");
379         DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
380         DRM_INFO("           SC_RIGHT = 0x%08x\n",
381                  MACH64_READ(MACH64_SC_RIGHT));
382         DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
383         DRM_INFO("          SC_BOTTOM = 0x%08x\n",
384                  MACH64_READ(MACH64_SC_BOTTOM));
385         DRM_INFO("\n");
386         DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
387                  MACH64_READ(MACH64_SCALE_3D_CNTL));
388         DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
389                  MACH64_READ(MACH64_SCRATCH_REG0));
390         DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
391                  MACH64_READ(MACH64_SCRATCH_REG1));
392         DRM_INFO("         SETUP_CNTL = 0x%08x\n",
393                  MACH64_READ(MACH64_SETUP_CNTL));
394         DRM_INFO("           SRC_CNTL = 0x%08x\n",
395                  MACH64_READ(MACH64_SRC_CNTL));
396         DRM_INFO("\n");
397         DRM_INFO("           TEX_CNTL = 0x%08x\n",
398                  MACH64_READ(MACH64_TEX_CNTL));
399         DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
400                  MACH64_READ(MACH64_TEX_SIZE_PITCH));
401         DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
402                  MACH64_READ(MACH64_TIMER_CONFIG));
403         DRM_INFO("\n");
404         DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
405         DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
406                  MACH64_READ(MACH64_Z_OFF_PITCH));
407         DRM_INFO("\n");
408 }
409
410 #define MACH64_DUMP_CONTEXT     3
411
412 /**
413  * Used by mach64_dump_ring_info() to dump the contents of the current buffer
414  * pointed by the ring head.
415  */
416 static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
417                                  struct drm_buf *buf)
418 {
419         u32 addr = GETBUFADDR(buf);
420         u32 used = buf->used >> 2;
421         u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
422         u32 *p = GETBUFPTR(buf);
423         int skipped = 0;
424
425         DRM_INFO("buffer contents:\n");
426
427         while (used) {
428                 u32 reg, count;
429
430                 reg = le32_to_cpu(*p++);
431                 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
432                     (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
433                      addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
434                     addr >=
435                     GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
436                         DRM_INFO("%08x:  0x%08x\n", addr, reg);
437                 }
438                 addr += 4;
439                 used--;
440
441                 count = (reg >> 16) + 1;
442                 reg = reg & 0xffff;
443                 reg = MMSELECT(reg);
444                 while (count && used) {
445                         if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
446                             (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
447                              addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
448                             addr >=
449                             GETBUFADDR(buf) + buf->used -
450                             MACH64_DUMP_CONTEXT * 4) {
451                                 DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
452                                          reg, le32_to_cpu(*p));
453                                 skipped = 0;
454                         } else {
455                                 if (!skipped) {
456                                         DRM_INFO("  ...\n");
457                                         skipped = 1;
458                                 }
459                         }
460                         p++;
461                         addr += 4;
462                         used--;
463
464                         reg += 4;
465                         count--;
466                 }
467         }
468
469         DRM_INFO("\n");
470 }
471
472 /**
473  * Dump the ring state and contents, including the contents of the buffer being
474  * processed by the graphics engine.
475  */
476 void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
477 {
478         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
479         int i, skipped;
480
481         DRM_INFO("\n");
482
483         DRM_INFO("ring contents:\n");
484         DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
485                  ring->head_addr, ring->head, ring->tail);
486
487         skipped = 0;
488         for (i = 0; i < ring->size / sizeof(u32); i += 4) {
489                 if (i <= MACH64_DUMP_CONTEXT * 4 ||
490                     i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
491                     (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
492                      i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
493                     (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
494                      i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
495                         DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
496                                  (u32)(ring->start_addr + i * sizeof(u32)),
497                                  le32_to_cpu(((u32 *) ring->start)[i + 0]),
498                                  le32_to_cpu(((u32 *) ring->start)[i + 1]),
499                                  le32_to_cpu(((u32 *) ring->start)[i + 2]),
500                                  le32_to_cpu(((u32 *) ring->start)[i + 3]),
501                                  i == ring->head ? " (head)" : "",
502                                  i == ring->tail ? " (tail)" : "");
503                         skipped = 0;
504                 } else {
505                         if (!skipped) {
506                                 DRM_INFO("  ...\n");
507                                 skipped = 1;
508                         }
509                 }
510         }
511
512         DRM_INFO("\n");
513
514         if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
515                 struct list_head *ptr;
516                 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
517
518                 list_for_each(ptr, &dev_priv->pending) {
519                         drm_mach64_freelist_t *entry =
520                             list_entry(ptr, drm_mach64_freelist_t, list);
521                         struct drm_buf *buf = entry->buf;
522
523                         u32 buf_addr = GETBUFADDR(buf);
524
525                         if (buf_addr <= addr && addr < buf_addr + buf->used)
526                                 mach64_dump_buf_info(dev_priv, buf);
527                 }
528         }
529
530         DRM_INFO("\n");
531         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
532                  MACH64_READ(MACH64_BM_GUI_TABLE));
533         DRM_INFO("\n");
534         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
535                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
536         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
537                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
538         DRM_INFO("         BM_COMMAND = 0x%08x\n",
539                  MACH64_READ(MACH64_BM_COMMAND));
540         DRM_INFO("\n");
541         DRM_INFO("          BM_STATUS = 0x%08x\n",
542                  MACH64_READ(MACH64_BM_STATUS));
543         DRM_INFO("           BUS_CNTL = 0x%08x\n",
544                  MACH64_READ(MACH64_BUS_CNTL));
545         DRM_INFO("          FIFO_STAT = 0x%08x\n",
546                  MACH64_READ(MACH64_FIFO_STAT));
547         DRM_INFO("           GUI_STAT = 0x%08x\n",
548                  MACH64_READ(MACH64_GUI_STAT));
549         DRM_INFO("           SRC_CNTL = 0x%08x\n",
550                  MACH64_READ(MACH64_SRC_CNTL));
551 }
552
553 /*@}*/
554
555
556 /*******************************************************************/
557 /** \name DMA descriptor ring macros */
558 /*@{*/
559
560 /**
561  * Add the end mark to the ring's new tail position.
562  *
563  * The bus master engine will keep processing the DMA buffers listed in the ring
564  * until it finds this mark, making it stop.
565  *
566  * \sa mach64_clear_dma_eol
567  */ 
568 static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
569 {
570 #if defined(__i386__)
571         int nr = 31;
572
573         /* Taken from include/asm-i386/bitops.h linux header */
574         __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
575                              :"Ir"(nr));
576 #elif defined(__powerpc__)
577         u32 old;
578         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
579
580         /* Taken from the include/asm-ppc/bitops.h linux header */
581         __asm__ __volatile__("\n\
582 1:      lwarx   %0,0,%3 \n\
583         or      %0,%0,%2 \n\
584         stwcx.  %0,0,%3 \n\
585         bne-    1b":"=&r"(old), "=m"(*addr)
586                              :"r"(mask), "r"(addr), "m"(*addr)
587                              :"cc");
588 #elif defined(__alpha__)
589         u32 temp;
590         u32 mask = MACH64_DMA_EOL;
591
592         /* Taken from the include/asm-alpha/bitops.h linux header */
593         __asm__ __volatile__("1:        ldl_l %0,%3\n"
594                              "  bis %0,%2,%0\n"
595                              "  stl_c %0,%1\n"
596                              "  beq %0,2f\n"
597                              ".subsection 2\n"
598                              "2:        br 1b\n"
599                              ".previous":"=&r"(temp), "=m"(*addr)
600                              :"Ir"(mask), "m"(*addr));
601 #else
602         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
603
604         *addr |= mask;
605 #endif
606 }
607
608 /**
609  * Remove the end mark from the ring's old tail position.
610  *
611  * It should be called after calling mach64_set_dma_eol to mark the ring's new
612  * tail position.
613  *
614  * We update the end marks while the bus master engine is in operation. Since
615  * the bus master engine may potentially be reading from the same position
616  * that we write, we must change atomically to avoid having intermediary bad
617  * data.
618  */
619 static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
620 {
621 #if defined(__i386__)
622         int nr = 31;
623
624         /* Taken from include/asm-i386/bitops.h linux header */
625         __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
626                              :"Ir"(nr));
627 #elif defined(__powerpc__)
628         u32 old;
629         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
630
631         /* Taken from the include/asm-ppc/bitops.h linux header */
632         __asm__ __volatile__("\n\
633 1:      lwarx   %0,0,%3 \n\
634         andc    %0,%0,%2 \n\
635         stwcx.  %0,0,%3 \n\
636         bne-    1b":"=&r"(old), "=m"(*addr)
637                              :"r"(mask), "r"(addr), "m"(*addr)
638                              :"cc");
639 #elif defined(__alpha__)
640         u32 temp;
641         u32 mask = ~MACH64_DMA_EOL;
642
643         /* Taken from the include/asm-alpha/bitops.h linux header */
644         __asm__ __volatile__("1:        ldl_l %0,%3\n"
645                              "  and %0,%2,%0\n"
646                              "  stl_c %0,%1\n"
647                              "  beq %0,2f\n"
648                              ".subsection 2\n"
649                              "2:        br 1b\n"
650                              ".previous":"=&r"(temp), "=m"(*addr)
651                              :"Ir"(mask), "m"(*addr));
652 #else
653         u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
654
655         *addr &= mask;
656 #endif
657 }
658
659 #define RING_LOCALS                                                     \
660         int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
661
662 #define RING_WRITE_OFS  _ring_write
663
664 #define BEGIN_RING(n)                                                   \
665         do {                                                            \
666                 if (MACH64_VERBOSE) {                                   \
667                         DRM_INFO( "BEGIN_RING( %d ) \n",                \
668                                   (n) );                                \
669                 }                                                       \
670                 if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
671                         int ret;                                        \
672                         if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
673                                 DRM_ERROR( "wait_ring failed, resetting engine\n"); \
674                                 mach64_dump_engine_info( dev_priv );    \
675                                 mach64_do_engine_reset( dev_priv );     \
676                                 return ret;                             \
677                         }                                               \
678                 }                                                       \
679                 dev_priv->ring.space -= (n) * sizeof(u32);              \
680                 _ring = (u32 *) dev_priv->ring.start;                   \
681                 _ring_tail = _ring_write = dev_priv->ring.tail;         \
682                 _ring_mask = dev_priv->ring.tail_mask;                  \
683         } while (0)
684
685 #define OUT_RING( x )                                           \
686 do {                                                            \
687         if (MACH64_VERBOSE) {                                   \
688                 DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
689                            (unsigned int)(x), _ring_write );    \
690         }                                                       \
691         _ring[_ring_write++] = cpu_to_le32( x );                \
692         _ring_write &= _ring_mask;                              \
693 } while (0)
694
695 #define ADVANCE_RING()                                                  \
696 do {                                                                    \
697         if (MACH64_VERBOSE) {                                           \
698                 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
699                           _ring_write, _ring_tail );                    \
700         }                                                               \
701         DRM_MEMORYBARRIER();                                            \
702         mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
703         DRM_MEMORYBARRIER();                                            \
704         dev_priv->ring.tail = _ring_write;                              \
705         mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
706 } while (0)
707
708 /**
709  * Queue a DMA buffer of registers writes into the ring buffer.
710  */ 
711 int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
712                            drm_mach64_freelist_t *entry)
713 {
714         int bytes, pages, remainder;
715         u32 address, page;
716         int i;
717         struct drm_buf *buf = entry->buf;
718         RING_LOCALS;
719
720         bytes = buf->used;
721         address = GETBUFADDR( buf );
722         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
723
724         BEGIN_RING( pages * 4 );
725
726         for ( i = 0 ; i < pages-1 ; i++ ) {
727                 page = address + i * MACH64_DMA_CHUNKSIZE;
728                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
729                 OUT_RING( page );
730                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
731                 OUT_RING( 0 );
732         }
733
734         /* generate the final descriptor for any remaining commands in this buffer */
735         page = address + i * MACH64_DMA_CHUNKSIZE;
736         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
737
738         /* Save dword offset of last descriptor for this buffer.
739          * This is needed to check for completion of the buffer in freelist_get
740          */
741         entry->ring_ofs = RING_WRITE_OFS;
742
743         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
744         OUT_RING( page );
745         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
746         OUT_RING( 0 );
747
748         ADVANCE_RING();
749         
750         return 0;
751 }
752
753 /**
754  * Queue DMA buffer controlling host data tranfers (e.g., blit).
755  * 
756  * Almost identical to mach64_add_buf_to_ring.
757  */
758 int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
759                                     drm_mach64_freelist_t *entry)
760 {
761         int bytes, pages, remainder;
762         u32 address, page;
763         int i;
764         struct drm_buf *buf = entry->buf;
765         RING_LOCALS;
766         
767         bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
768         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
769         address = GETBUFADDR( buf );
770         
771         BEGIN_RING( 4 + pages * 4 );
772         
773         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
774         OUT_RING( address );
775         OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
776         OUT_RING( 0 );
777         address += MACH64_HOSTDATA_BLIT_OFFSET;
778         
779         for ( i = 0 ; i < pages-1 ; i++ ) {
780                 page = address + i * MACH64_DMA_CHUNKSIZE;
781                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
782                 OUT_RING( page );
783                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
784                 OUT_RING( 0 );
785         }
786         
787         /* generate the final descriptor for any remaining commands in this buffer */
788         page = address + i * MACH64_DMA_CHUNKSIZE;
789         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
790         
791         /* Save dword offset of last descriptor for this buffer.
792          * This is needed to check for completion of the buffer in freelist_get
793          */
794         entry->ring_ofs = RING_WRITE_OFS;
795         
796         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
797         OUT_RING( page );
798         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
799         OUT_RING( 0 );
800         
801         ADVANCE_RING();
802         
803         return 0;
804 }
805
806 /*@}*/
807
808
809 /*******************************************************************/
810 /** \name DMA test and initialization */
811 /*@{*/
812
813 /**
814  * Perform a simple DMA operation using the pattern registers to test whether
815  * DMA works.
816  *
817  * \return zero if successful.
818  *
819  * \note This function was the testbed for many experiences regarding Mach64
820  * DMA operation. It is left here since it so tricky to get DMA operating
821  * properly in some architectures and hardware.
822  */
823 static int mach64_bm_dma_test(struct drm_device * dev)
824 {
825         drm_mach64_private_t *dev_priv = dev->dev_private;
826         drm_dma_handle_t *cpu_addr_dmah;
827         u32 data_addr;
828         u32 *table, *data;
829         u32 expected[2];
830         u32 src_cntl, pat_reg0, pat_reg1;
831         int i, count, failed;
832
833         DRM_DEBUG("\n");
834
835         table = (u32 *) dev_priv->ring.start;
836
837         /* FIXME: get a dma buffer from the freelist here */
838         DRM_DEBUG("Allocating data memory ...\n");
839         cpu_addr_dmah =
840             drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
841         if (!cpu_addr_dmah) {
842                 DRM_INFO("data-memory allocation failed!\n");
843                 return -ENOMEM;
844         } else {
845                 data = (u32 *) cpu_addr_dmah->vaddr;
846                 data_addr = (u32) cpu_addr_dmah->busaddr;
847         }
848
849         /* Save the X server's value for SRC_CNTL and restore it
850          * in case our test fails.  This prevents the X server
851          * from disabling it's cache for this register
852          */
853         src_cntl = MACH64_READ(MACH64_SRC_CNTL);
854         pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
855         pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
856
857         mach64_do_wait_for_fifo(dev_priv, 3);
858
859         MACH64_WRITE(MACH64_SRC_CNTL, 0);
860         MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
861         MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
862
863         mach64_do_wait_for_idle(dev_priv);
864
865         for (i = 0; i < 2; i++) {
866                 u32 reg;
867                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
868                 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
869                 if (reg != 0x11111111) {
870                         DRM_INFO("Error initializing test registers\n");
871                         DRM_INFO("resetting engine ...\n");
872                         mach64_do_engine_reset(dev_priv);
873                         DRM_INFO("freeing data buffer memory.\n");
874                         drm_pci_free(dev, cpu_addr_dmah);
875                         return -EIO;
876                 }
877         }
878
879         /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
880         count = 0;
881
882         data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
883         data[count++] = expected[0] = 0x22222222;
884         data[count++] = expected[1] = 0xaaaaaaaa;
885
886         while (count < 1020) {
887                 data[count++] =
888                     cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
889                 data[count++] = 0x22222222;
890                 data[count++] = 0xaaaaaaaa;
891         }
892         data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
893         data[count++] = 0;
894
895         DRM_DEBUG("Preparing table ...\n");
896         table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
897                                                          MACH64_APERTURE_OFFSET);
898         table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
899         table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
900                                                 | MACH64_DMA_HOLD_OFFSET
901                                                 | MACH64_DMA_EOL);
902         table[MACH64_DMA_RESERVED] = 0;
903
904         DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
905         DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
906         DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
907         DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
908
909         for (i = 0; i < 6; i++) {
910                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
911         }
912         DRM_DEBUG(" ...\n");
913         for (i = count - 5; i < count; i++) {
914                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
915         }
916
917         DRM_MEMORYBARRIER();
918
919         DRM_DEBUG("waiting for idle...\n");
920         if ((i = mach64_do_wait_for_idle(dev_priv))) {
921                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
922                 DRM_INFO("resetting engine ...\n");
923                 mach64_do_engine_reset(dev_priv);
924                 mach64_do_wait_for_fifo(dev_priv, 3);
925                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
926                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
927                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
928                 DRM_INFO("freeing data buffer memory.\n");
929                 drm_pci_free(dev, cpu_addr_dmah);
930                 return i;
931         }
932         DRM_DEBUG("waiting for idle...done\n");
933
934         DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
935         DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
936         DRM_DEBUG("\n");
937         DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
938         DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
939
940         DRM_DEBUG("starting DMA transfer...\n");
941         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
942                      dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
943
944         MACH64_WRITE(MACH64_SRC_CNTL,
945                      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
946                      MACH64_SRC_BM_OP_SYSTEM_TO_REG);
947
948         /* Kick off the transfer */
949         DRM_DEBUG("starting DMA transfer... done.\n");
950         MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
951
952         DRM_DEBUG("waiting for idle...\n");
953
954         if ((i = mach64_do_wait_for_idle(dev_priv))) {
955                 /* engine locked up, dump register state and reset */
956                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
957                 mach64_dump_engine_info(dev_priv);
958                 DRM_INFO("resetting engine ...\n");
959                 mach64_do_engine_reset(dev_priv);
960                 mach64_do_wait_for_fifo(dev_priv, 3);
961                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
962                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
963                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
964                 DRM_INFO("freeing data buffer memory.\n");
965                 drm_pci_free(dev, cpu_addr_dmah);
966                 return i;
967         }
968
969         DRM_DEBUG("waiting for idle...done\n");
970
971         /* restore SRC_CNTL */
972         mach64_do_wait_for_fifo(dev_priv, 1);
973         MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
974
975         failed = 0;
976
977         /* Check register values to see if the GUI master operation succeeded */
978         for (i = 0; i < 2; i++) {
979                 u32 reg;
980                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
981                 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
982                 if (reg != expected[i]) {
983                         failed = -1;
984                 }
985         }
986
987         /* restore pattern registers */
988         mach64_do_wait_for_fifo(dev_priv, 2);
989         MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
990         MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
991
992         DRM_DEBUG("freeing data buffer memory.\n");
993         drm_pci_free(dev, cpu_addr_dmah);
994         DRM_DEBUG("returning ...\n");
995
996         return failed;
997 }
998
999 /**
1000  * Called during the DMA initialization ioctl to initialize all the necessary
1001  * software and hardware state for DMA operation.
1002  */
1003 static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
1004 {
1005         drm_mach64_private_t *dev_priv;
1006         u32 tmp;
1007         int i, ret;
1008
1009         DRM_DEBUG("\n");
1010
1011         dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
1012         if (dev_priv == NULL)
1013                 return -ENOMEM;
1014
1015         memset(dev_priv, 0, sizeof(drm_mach64_private_t));
1016
1017         dev_priv->is_pci = init->is_pci;
1018
1019         dev_priv->fb_bpp = init->fb_bpp;
1020         dev_priv->front_offset = init->front_offset;
1021         dev_priv->front_pitch = init->front_pitch;
1022         dev_priv->back_offset = init->back_offset;
1023         dev_priv->back_pitch = init->back_pitch;
1024
1025         dev_priv->depth_bpp = init->depth_bpp;
1026         dev_priv->depth_offset = init->depth_offset;
1027         dev_priv->depth_pitch = init->depth_pitch;
1028
1029         dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
1030                                         (dev_priv->front_offset >> 3));
1031         dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
1032                                        (dev_priv->back_offset >> 3));
1033         dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
1034                                         (dev_priv->depth_offset >> 3));
1035
1036         dev_priv->usec_timeout = 1000000;
1037
1038         /* Set up the freelist, placeholder list and pending list */
1039         INIT_LIST_HEAD(&dev_priv->free_list);
1040         INIT_LIST_HEAD(&dev_priv->placeholders);
1041         INIT_LIST_HEAD(&dev_priv->pending);
1042
1043         dev_priv->sarea = drm_getsarea(dev);
1044         if (!dev_priv->sarea) {
1045                 DRM_ERROR("can not find sarea!\n");
1046                 dev->dev_private = (void *)dev_priv;
1047                 mach64_do_cleanup_dma(dev);
1048                 return -EINVAL;
1049         }
1050         dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
1051         if (!dev_priv->fb) {
1052                 DRM_ERROR("can not find frame buffer map!\n");
1053                 dev->dev_private = (void *)dev_priv;
1054                 mach64_do_cleanup_dma(dev);
1055                 return -EINVAL;
1056         }
1057         dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1058         if (!dev_priv->mmio) {
1059                 DRM_ERROR("can not find mmio map!\n");
1060                 dev->dev_private = (void *)dev_priv;
1061                 mach64_do_cleanup_dma(dev);
1062                 return -EINVAL;
1063         }
1064
1065         dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
1066         if (!dev_priv->ring_map) {
1067                 DRM_ERROR("can not find ring map!\n");
1068                 dev->dev_private = (void *)dev_priv;
1069                 mach64_do_cleanup_dma(dev);
1070                 return -EINVAL;
1071         }
1072
1073         dev_priv->sarea_priv = (drm_mach64_sarea_t *)
1074             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
1075
1076         if (!dev_priv->is_pci) {
1077                 drm_core_ioremap(dev_priv->ring_map, dev);
1078                 if (!dev_priv->ring_map->handle) {
1079                         DRM_ERROR("can not ioremap virtual address for"
1080                                   " descriptor ring\n");
1081                         dev->dev_private = (void *)dev_priv;
1082                         mach64_do_cleanup_dma(dev);
1083                         return -ENOMEM;
1084                 }
1085                 dev->agp_buffer_token = init->buffers_offset;
1086                 dev->agp_buffer_map =
1087                     drm_core_findmap(dev, init->buffers_offset);
1088                 if (!dev->agp_buffer_map) {
1089                         DRM_ERROR("can not find dma buffer map!\n");
1090                         dev->dev_private = (void *)dev_priv;
1091                         mach64_do_cleanup_dma(dev);
1092                         return -EINVAL;
1093                 }
1094                 /* there might be a nicer way to do this -
1095                    dev isn't passed all the way though the mach64 - DA */
1096                 dev_priv->dev_buffers = dev->agp_buffer_map;
1097
1098                 drm_core_ioremap(dev->agp_buffer_map, dev);
1099                 if (!dev->agp_buffer_map->handle) {
1100                         DRM_ERROR("can not ioremap virtual address for"
1101                                   " dma buffer\n");
1102                         dev->dev_private = (void *)dev_priv;
1103                         mach64_do_cleanup_dma(dev);
1104                         return -ENOMEM;
1105                 }
1106                 dev_priv->agp_textures =
1107                     drm_core_findmap(dev, init->agp_textures_offset);
1108                 if (!dev_priv->agp_textures) {
1109                         DRM_ERROR("can not find agp texture region!\n");
1110                         dev->dev_private = (void *)dev_priv;
1111                         mach64_do_cleanup_dma(dev);
1112                         return -EINVAL;
1113                 }
1114         }
1115
1116         dev->dev_private = (void *)dev_priv;
1117
1118         dev_priv->driver_mode = init->dma_mode;
1119
1120         /* changing the FIFO size from the default causes problems with DMA */
1121         tmp = MACH64_READ(MACH64_GUI_CNTL);
1122         if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
1123                 DRM_INFO("Setting FIFO size to 128 entries\n");
1124                 /* FIFO must be empty to change the FIFO depth */
1125                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1126                         DRM_ERROR
1127                             ("wait for idle failed before changing FIFO depth!\n");
1128                         mach64_do_cleanup_dma(dev);
1129                         return ret;
1130                 }
1131                 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
1132                                                | MACH64_CMDFIFO_SIZE_128));
1133                 /* need to read GUI_STAT for proper sync according to docs */
1134                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1135                         DRM_ERROR
1136                             ("wait for idle failed when changing FIFO depth!\n");
1137                         mach64_do_cleanup_dma(dev);
1138                         return ret;
1139                 }
1140         }
1141
1142         dev_priv->ring.size = 0x4000;   /* 16KB */
1143         dev_priv->ring.start = dev_priv->ring_map->handle;
1144         dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
1145
1146         memset(dev_priv->ring.start, 0, dev_priv->ring.size);
1147         DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
1148                  dev_priv->ring.start, dev_priv->ring.start_addr);
1149
1150         ret = 0;
1151         if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
1152
1153                 /* enable block 1 registers and bus mastering */
1154                 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
1155                                                 | MACH64_BUS_EXT_REG_EN)
1156                                                & ~MACH64_BUS_MASTER_DIS));
1157
1158                 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
1159                 DRM_DEBUG("Starting DMA test...\n");
1160                 if ((ret = mach64_bm_dma_test(dev))) {
1161                         dev_priv->driver_mode = MACH64_MODE_MMIO;
1162                 }
1163         }
1164
1165         switch (dev_priv->driver_mode) {
1166         case MACH64_MODE_MMIO:
1167                 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
1168                                                | MACH64_BUS_EXT_REG_EN
1169                                                | MACH64_BUS_MASTER_DIS));
1170                 if (init->dma_mode == MACH64_MODE_MMIO)
1171                         DRM_INFO("Forcing pseudo-DMA mode\n");
1172                 else
1173                         DRM_INFO
1174                             ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
1175                              ret);
1176                 break;
1177         case MACH64_MODE_DMA_SYNC:
1178                 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
1179                 break;
1180         case MACH64_MODE_DMA_ASYNC:
1181         default:
1182                 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
1183         }
1184
1185         dev_priv->ring_running = 0;
1186
1187         /* setup offsets for physical address of table start and end */
1188         dev_priv->ring.head_addr = dev_priv->ring.start_addr;
1189         dev_priv->ring.head = dev_priv->ring.tail = 0;
1190         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1191         dev_priv->ring.space = dev_priv->ring.size;
1192
1193         /* setup physical address and size of descriptor table */
1194         mach64_do_wait_for_fifo(dev_priv, 1);
1195         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1196                      (dev_priv->ring.
1197                       head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
1198
1199         /* init frame counter */
1200         dev_priv->sarea_priv->frames_queued = 0;
1201         for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
1202                 dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
1203         }
1204
1205         /* Allocate the DMA buffer freelist */
1206         if ((ret = mach64_init_freelist(dev))) {
1207                 DRM_ERROR("Freelist allocation failed\n");
1208                 mach64_do_cleanup_dma(dev);
1209                 return ret;
1210         }
1211
1212         return 0;
1213 }
1214
1215 /*******************************************************************/
1216 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
1217  */
1218
1219 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
1220 {
1221         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1222         volatile u32 *ring_read;
1223         struct list_head *ptr;
1224         drm_mach64_freelist_t *entry;
1225         struct drm_buf *buf = NULL;
1226         u32 *buf_ptr;
1227         u32 used, reg, target;
1228         int fifo, count, found, ret, no_idle_wait;
1229
1230         fifo = count = reg = no_idle_wait = 0;
1231         target = MACH64_BM_ADDR;
1232
1233         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1234                 DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
1235                 mach64_dump_engine_info(dev_priv);
1236                 mach64_do_engine_reset(dev_priv);
1237                 return ret;
1238         }
1239
1240         ring_read = (u32 *) ring->start;
1241
1242         while (ring->tail != ring->head) {
1243                 u32 buf_addr, new_target, offset;
1244                 u32 bytes, remaining, head, eol;
1245
1246                 head = ring->head;
1247
1248                 new_target =
1249                     le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1250                 buf_addr = le32_to_cpu(ring_read[head++]);
1251                 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1252                 bytes = le32_to_cpu(ring_read[head++])
1253                     & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1254                 head++;
1255                 head &= ring->tail_mask;
1256
1257                 /* can't wait for idle between a blit setup descriptor
1258                  * and a HOSTDATA descriptor or the engine will lock
1259                  */
1260                 if (new_target == MACH64_BM_HOSTDATA
1261                     && target == MACH64_BM_ADDR)
1262                         no_idle_wait = 1;
1263
1264                 target = new_target;
1265
1266                 found = 0;
1267                 offset = 0;
1268                 list_for_each(ptr, &dev_priv->pending) {
1269                         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1270                         buf = entry->buf;
1271                         offset = buf_addr - GETBUFADDR(buf);
1272                         if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
1273                                 found = 1;
1274                                 break;
1275                         }
1276                 }
1277
1278                 if (!found || buf == NULL) {
1279                         DRM_ERROR
1280                             ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1281                              head, ring->tail, buf_addr, (eol ? "eol" : ""));
1282                         mach64_dump_ring_info(dev_priv);
1283                         mach64_do_engine_reset(dev_priv);
1284                         return -EINVAL;
1285                 }
1286
1287                 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1288                  * every 16 writes
1289                  */
1290                 DRM_DEBUG("target: (0x%08x) %s\n", target,
1291                           (target ==
1292                            MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1293                 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1294                           buf->used);
1295
1296                 remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
1297                 used = bytes >> 2;      /* dwords in buffer for this descriptor */
1298                 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1299
1300                 while (used) {
1301
1302                         if (count == 0) {
1303                                 if (target == MACH64_BM_HOSTDATA) {
1304                                         reg = DMAREG(MACH64_HOST_DATA0);
1305                                         count =
1306                                             (remaining > 16) ? 16 : remaining;
1307                                         fifo = 0;
1308                                 } else {
1309                                         reg = le32_to_cpu(*buf_ptr++);
1310                                         used--;
1311                                         count = (reg >> 16) + 1;
1312                                 }
1313
1314                                 reg = reg & 0xffff;
1315                                 reg = MMSELECT(reg);
1316                         }
1317                         while (count && used) {
1318                                 if (!fifo) {
1319                                         if (no_idle_wait) {
1320                                                 if ((ret =
1321                                                      mach64_do_wait_for_fifo
1322                                                      (dev_priv, 16)) < 0) {
1323                                                         no_idle_wait = 0;
1324                                                         return ret;
1325                                                 }
1326                                         } else {
1327                                                 if ((ret =
1328                                                      mach64_do_wait_for_idle
1329                                                      (dev_priv)) < 0) {
1330                                                         return ret;
1331                                                 }
1332                                         }
1333                                         fifo = 16;
1334                                 }
1335                                 --fifo;
1336                                 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1337                                 used--;
1338                                 remaining--;
1339
1340                                 reg += 4;
1341                                 count--;
1342                         }
1343                 }
1344                 ring->head = head;
1345                 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1346                 ring->space += (4 * sizeof(u32));
1347         }
1348
1349         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1350                 return ret;
1351         }
1352         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1353                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1354
1355         DRM_DEBUG("completed\n");
1356         return 0;
1357 }
1358
1359 /*@}*/
1360
1361
1362 /*******************************************************************/
1363 /** \name DMA cleanup */
1364 /*@{*/
1365
1366 int mach64_do_cleanup_dma(struct drm_device * dev)
1367 {
1368         DRM_DEBUG("\n");
1369
1370         /* Make sure interrupts are disabled here because the uninstall ioctl
1371          * may not have been called from userspace and after dev_private
1372          * is freed, it's too late.
1373          */
1374         if (dev->irq)
1375                 drm_irq_uninstall(dev);
1376
1377         if (dev->dev_private) {
1378                 drm_mach64_private_t *dev_priv = dev->dev_private;
1379
1380                 if (!dev_priv->is_pci) {
1381                         if (dev_priv->ring_map)
1382                                 drm_core_ioremapfree(dev_priv->ring_map, dev);
1383
1384                         if (dev->agp_buffer_map) {
1385                                 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1386                                 dev->agp_buffer_map = NULL;
1387                         }
1388                 }
1389
1390                 mach64_destroy_freelist(dev);
1391
1392                 drm_free(dev_priv, sizeof(drm_mach64_private_t),
1393                          DRM_MEM_DRIVER);
1394                 dev->dev_private = NULL;
1395         }
1396
1397         return 0;
1398 }
1399
1400 /*@}*/
1401
1402
1403 /*******************************************************************/
1404 /** \name IOCTL handlers */
1405 /*@{*/
1406
1407 int mach64_dma_init(struct drm_device *dev, void *data,
1408                     struct drm_file *file_priv)
1409 {
1410         drm_mach64_init_t *init = data;
1411
1412         DRM_DEBUG("\n");
1413
1414         LOCK_TEST_WITH_RETURN(dev, file_priv);
1415
1416         switch (init->func) {
1417         case DRM_MACH64_INIT_DMA:
1418                 return mach64_do_dma_init(dev, init);
1419         case DRM_MACH64_CLEANUP_DMA:
1420                 return mach64_do_cleanup_dma(dev);
1421         }
1422
1423         return -EINVAL;
1424 }
1425
1426 int mach64_dma_idle(struct drm_device *dev, void *data,
1427                     struct drm_file *file_priv)
1428 {
1429         drm_mach64_private_t *dev_priv = dev->dev_private;
1430
1431         DRM_DEBUG("\n");
1432
1433         LOCK_TEST_WITH_RETURN(dev, file_priv);
1434
1435         return mach64_do_dma_idle(dev_priv);
1436 }
1437
1438 int mach64_dma_flush(struct drm_device *dev, void *data,
1439                      struct drm_file *file_priv)
1440 {
1441         drm_mach64_private_t *dev_priv = dev->dev_private;
1442
1443         DRM_DEBUG("\n");
1444
1445         LOCK_TEST_WITH_RETURN(dev, file_priv);
1446
1447         return mach64_do_dma_flush(dev_priv);
1448 }
1449
1450 int mach64_engine_reset(struct drm_device *dev, void *data,
1451                         struct drm_file *file_priv)
1452 {
1453         drm_mach64_private_t *dev_priv = dev->dev_private;
1454
1455         DRM_DEBUG("\n");
1456
1457         LOCK_TEST_WITH_RETURN(dev, file_priv);
1458
1459         return mach64_do_engine_reset(dev_priv);
1460 }
1461
1462 /*@}*/
1463
1464
1465 /*******************************************************************/
1466 /** \name Freelist management */
1467 /*@{*/
1468
1469 int mach64_init_freelist(struct drm_device * dev)
1470 {
1471         struct drm_device_dma *dma = dev->dma;
1472         drm_mach64_private_t *dev_priv = dev->dev_private;
1473         drm_mach64_freelist_t *entry;
1474         struct list_head *ptr;
1475         int i;
1476
1477         DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
1478
1479         for (i = 0; i < dma->buf_count; i++) {
1480                 if ((entry =
1481                      (drm_mach64_freelist_t *)
1482                      drm_alloc(sizeof(drm_mach64_freelist_t),
1483                                DRM_MEM_BUFLISTS)) == NULL)
1484                         return -ENOMEM;
1485                 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1486                 entry->buf = dma->buflist[i];
1487                 ptr = &entry->list;
1488                 list_add_tail(ptr, &dev_priv->free_list);
1489         }
1490
1491         return 0;
1492 }
1493
1494 void mach64_destroy_freelist(struct drm_device * dev)
1495 {
1496         drm_mach64_private_t *dev_priv = dev->dev_private;
1497         drm_mach64_freelist_t *entry;
1498         struct list_head *ptr;
1499         struct list_head *tmp;
1500
1501         DRM_DEBUG("\n");
1502
1503         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1504                 list_del(ptr);
1505                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1506                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1507         }
1508         list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1509                 list_del(ptr);
1510                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1511                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1512         }
1513
1514         list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1515                 list_del(ptr);
1516                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1517                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1518         }
1519 }
1520
1521 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1522  * as it assumes all buffers in the pending list have been completed by the hardware.
1523  */
1524 int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
1525 {
1526         struct list_head *ptr;
1527         struct list_head *tmp;
1528         drm_mach64_freelist_t *entry;
1529         int i;
1530
1531         if (list_empty(&dev_priv->pending))
1532                 return 0;
1533
1534         /* Iterate the pending list and move all buffers into the freelist... */
1535         i = 0;
1536         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1537                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1538                 if (entry->discard) {
1539                         entry->buf->pending = 0;
1540                         list_del(ptr);
1541                         list_add_tail(ptr, &dev_priv->free_list);
1542                         i++;
1543                 }
1544         }
1545
1546         DRM_DEBUG("released %d buffers from pending list\n", i);
1547
1548         return 0;
1549 }
1550
1551 static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
1552 {
1553         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1554         struct list_head *ptr;
1555         struct list_head *tmp;
1556         drm_mach64_freelist_t *entry;
1557         u32 head, tail, ofs;
1558
1559         mach64_ring_tick(dev_priv, ring);
1560         head = ring->head;
1561         tail = ring->tail;
1562
1563         if (head == tail) {
1564 #if MACH64_EXTRA_CHECKING
1565                 if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
1566                         DRM_ERROR("Empty ring with non-idle engine!\n");
1567                         mach64_dump_ring_info(dev_priv);
1568                         return -1;
1569                 }
1570 #endif
1571                 /* last pass is complete, so release everything */
1572                 mach64_do_release_used_buffers(dev_priv);
1573                 DRM_DEBUG("idle engine, freed all buffers.\n");
1574                 if (list_empty(&dev_priv->free_list)) {
1575                         DRM_ERROR("Freelist empty with idle engine\n");
1576                         return -1;
1577                 }
1578                 return 0;
1579         }
1580         /* Look for a completed buffer and bail out of the loop
1581          * as soon as we find one -- don't waste time trying
1582          * to free extra bufs here, leave that to do_release_used_buffers
1583          */
1584         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1585                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1586                 ofs = entry->ring_ofs;
1587                 if (entry->discard &&
1588                     ((head < tail && (ofs < head || ofs >= tail)) ||
1589                      (head > tail && (ofs < head && ofs >= tail)))) {
1590 #if MACH64_EXTRA_CHECKING
1591                         int i;
1592
1593                         for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
1594                         {
1595                                 u32 o1 = le32_to_cpu(((u32 *) ring->
1596                                                  start)[i + 1]);
1597                                 u32 o2 = GETBUFADDR(entry->buf);
1598
1599                                 if (o1 == o2) {
1600                                         DRM_ERROR
1601                                             ("Attempting to free used buffer: "
1602                                              "i=%d  buf=0x%08x\n",
1603                                              i, o1);
1604                                         mach64_dump_ring_info(dev_priv);
1605                                         return -1;
1606                                 }
1607                         }
1608 #endif
1609                         /* found a processed buffer */
1610                         entry->buf->pending = 0;
1611                         list_del(ptr);
1612                         list_add_tail(ptr, &dev_priv->free_list);
1613                         DRM_DEBUG
1614                             ("freed processed buffer (head=%d tail=%d "
1615                              "buf ring ofs=%d).\n",
1616                              head, tail, ofs);
1617                         return 0;
1618                 }
1619         }
1620
1621         return 1;
1622 }
1623
1624 struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
1625 {
1626         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1627         drm_mach64_freelist_t *entry;
1628         struct list_head *ptr;
1629         int t;
1630
1631         if (list_empty(&dev_priv->free_list)) {
1632                 if (list_empty(&dev_priv->pending)) {
1633                         DRM_ERROR
1634                             ("Couldn't get buffer - pending and free lists empty\n");
1635                         t = 0;
1636                         list_for_each(ptr, &dev_priv->placeholders) {
1637                                 t++;
1638                         }
1639                         DRM_INFO("Placeholders: %d\n", t);
1640                         return NULL;
1641                 }
1642
1643                 for (t = 0; t < dev_priv->usec_timeout; t++) {
1644                         int ret;
1645
1646                         ret = mach64_do_reclaim_completed(dev_priv);
1647                         if (ret == 0)
1648                                 goto _freelist_entry_found;
1649                         if (ret < 0)
1650                                 return NULL;
1651
1652                         DRM_UDELAY(1);
1653                 }
1654                 mach64_dump_ring_info(dev_priv);
1655                 DRM_ERROR
1656                     ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1657                      ring->head_addr, ring->head, ring->tail);
1658                 return NULL;
1659         }
1660
1661       _freelist_entry_found:
1662         ptr = dev_priv->free_list.next;
1663         list_del(ptr);
1664         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1665         entry->buf->used = 0;
1666         list_add_tail(ptr, &dev_priv->placeholders);
1667         return entry->buf;
1668 }
1669
1670 int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
1671 {
1672         struct list_head *ptr;
1673         drm_mach64_freelist_t *entry;
1674
1675 #if MACH64_EXTRA_CHECKING
1676         list_for_each(ptr, &dev_priv->pending) {
1677                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1678                 if (copy_buf == entry->buf) {
1679                         DRM_ERROR("Trying to release a pending buf\n");
1680                         return -EFAULT;
1681                 }
1682         }
1683 #endif
1684         ptr = dev_priv->placeholders.next;
1685         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1686         copy_buf->pending = 0;
1687         copy_buf->used = 0;
1688         entry->buf = copy_buf;
1689         entry->discard = 1;
1690         list_del(ptr);
1691         list_add_tail(ptr, &dev_priv->free_list);
1692
1693         return 0;
1694 }
1695
1696 /*@}*/
1697
1698
1699 /*******************************************************************/
1700 /** \name DMA buffer request and submission IOCTL handler */
1701 /*@{*/
1702
1703 static int mach64_dma_get_buffers(struct drm_device *dev,
1704                                   struct drm_file *file_priv,
1705                                   struct drm_dma * d)
1706 {
1707         int i;
1708         struct drm_buf *buf;
1709         drm_mach64_private_t *dev_priv = dev->dev_private;
1710
1711         for (i = d->granted_count; i < d->request_count; i++) {
1712                 buf = mach64_freelist_get(dev_priv);
1713 #if MACH64_EXTRA_CHECKING
1714                 if (!buf)
1715                         return -EFAULT;
1716 #else
1717                 if (!buf)
1718                         return -EAGAIN;
1719 #endif
1720
1721                 buf->file_priv = file_priv;
1722
1723                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1724                                      sizeof(buf->idx)))
1725                         return -EFAULT;
1726                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1727                                      sizeof(buf->total)))
1728                         return -EFAULT;
1729
1730                 d->granted_count++;
1731         }
1732         return 0;
1733 }
1734
1735 int mach64_dma_buffers(struct drm_device *dev, void *data,
1736                        struct drm_file *file_priv)
1737 {
1738         struct drm_device_dma *dma = dev->dma;
1739         struct drm_dma *d = data;
1740         int ret = 0;
1741
1742         LOCK_TEST_WITH_RETURN(dev, file_priv);
1743
1744         /* Please don't send us buffers.
1745          */
1746         if (d->send_count != 0) {
1747                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1748                           DRM_CURRENTPID, d->send_count);
1749                 return -EINVAL;
1750         }
1751
1752         /* We'll send you buffers.
1753          */
1754         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1755                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1756                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1757                 ret = -EINVAL;
1758         }
1759
1760         d->granted_count = 0;
1761
1762         if (d->request_count) {
1763                 ret = mach64_dma_get_buffers(dev, file_priv, d);
1764         }
1765
1766         return ret;
1767 }
1768
1769 void mach64_driver_lastclose(struct drm_device * dev)
1770 {
1771         mach64_do_cleanup_dma(dev);
1772 }
1773
1774 /*@}*/