c7236d47f8b0bc9825b45eba424867fbaed6d0c1
[dragonfly.git] / sys / dev / drm / mach64_dma.c
1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3  * \file mach64_dma.c
4  * DMA support for mach64 (Rage Pro) driver
5  *
6  * \author Gareth Hughes <gareth@valinux.com>
7  * \author Frank C. Earl <fearl@airmail.net>
8  * \author Leif Delgass <ldelgass@retinalburn.net>
9  * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
10  */
11
12 /*-
13  * Copyright 2000 Gareth Hughes
14  * Copyright 2002 Frank C. Earl
15  * Copyright 2002-2003 Leif Delgass
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35  */
36
37 #include "dev/drm/drmP.h"
38 #include "dev/drm/drm.h"
39 #include "dev/drm/mach64_drm.h"
40 #include "dev/drm/mach64_drv.h"
41
42 /*******************************************************************/
43 /** \name Engine, FIFO control */
44 /*@{*/
45
46 /**
47  * Waits for free entries in the FIFO.
48  *
49  * \note Most writes to Mach64 registers are automatically routed through
50  * command FIFO which is 16 entry deep. Prior to writing to any draw engine
51  * register one has to ensure that enough FIFO entries are available by calling
52  * this function.  Failure to do so may cause the engine to lock.
53  *
54  * \param dev_priv pointer to device private data structure.
55  * \param entries number of free entries in the FIFO to wait for.
56  *
57  * \returns zero on success, or -EBUSY if the timeout (specificed by
58  * drm_mach64_private::usec_timeout) occurs.
59  */
60 int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
61 {
62         int slots = 0, i;
63
64         for (i = 0; i < dev_priv->usec_timeout; i++) {
65                 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
66                 if (slots <= (0x8000 >> entries))
67                         return 0;
68                 DRM_UDELAY(1);
69         }
70
71         DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
72         return -EBUSY;
73 }
74
75 /**
76  * Wait for the draw engine to be idle.
77  */
78 int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
79 {
80         int i, ret;
81
82         ret = mach64_do_wait_for_fifo(dev_priv, 16);
83         if (ret < 0)
84                 return ret;
85
86         for (i = 0; i < dev_priv->usec_timeout; i++) {
87                 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
88                         return 0;
89                 DRM_UDELAY(1);
90         }
91
92         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
93         mach64_dump_ring_info(dev_priv);
94         return -EBUSY;
95 }
96
97 /**
98  * Wait for free entries in the ring buffer.
99  *
100  * The Mach64 bus master can be configured to act as a virtual FIFO, using a
101  * circular buffer (commonly referred as "ring buffer" in other drivers) with
102  * pointers to engine commands. This allows the CPU to do other things while
103  * the graphics engine is busy, i.e., DMA mode.
104  *
105  * This function should be called before writing new entries to the ring
106  * buffer.
107  *
108  * \param dev_priv pointer to device private data structure.
109  * \param n number of free entries in the ring buffer to wait for.
110  *
111  * \returns zero on success, or -EBUSY if the timeout (specificed by
112  * drm_mach64_private_t::usec_timeout) occurs.
113  *
114  * \sa mach64_dump_ring_info()
115  */
116 int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
117 {
118         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
119         int i;
120
121         for (i = 0; i < dev_priv->usec_timeout; i++) {
122                 mach64_update_ring_snapshot(dev_priv);
123                 if (ring->space >= n) {
124                         if (i > 0)
125                                 DRM_DEBUG("%d usecs\n", i);
126                         return 0;
127                 }
128                 DRM_UDELAY(1);
129         }
130
131         /* FIXME: This is being ignored... */
132         DRM_ERROR("failed!\n");
133         mach64_dump_ring_info(dev_priv);
134         return -EBUSY;
135 }
136
137 /**
138  * Wait until all DMA requests have been processed...
139  *
140  * \sa mach64_wait_ring()
141  */
142 static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
143 {
144         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
145         u32 head;
146         int i;
147
148         head = ring->head;
149         i = 0;
150         while (i < dev_priv->usec_timeout) {
151                 mach64_update_ring_snapshot(dev_priv);
152                 if (ring->head == ring->tail &&
153                     !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
154                         if (i > 0)
155                                 DRM_DEBUG("%d usecs\n", i);
156                         return 0;
157                 }
158                 if (ring->head == head) {
159                         ++i;
160                 } else {
161                         head = ring->head;
162                         i = 0;
163                 }
164                 DRM_UDELAY(1);
165         }
166
167         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
168         mach64_dump_ring_info(dev_priv);
169         return -EBUSY;
170 }
171
172 /**
173  * Reset the the ring buffer descriptors.
174  *
175  * \sa mach64_do_engine_reset()
176  */
177 static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
178 {
179         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
180
181         mach64_do_release_used_buffers(dev_priv);
182         ring->head_addr = ring->start_addr;
183         ring->head = ring->tail = 0;
184         ring->space = ring->size;
185
186         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
187                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
188
189         dev_priv->ring_running = 0;
190 }
191
192 /**
193  * Ensure the all the queued commands will be processed.
194  */
195 int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
196 {
197         /* FIXME: It's not necessary to wait for idle when flushing
198          * we just need to ensure the ring will be completely processed
199          * in finite time without another ioctl
200          */
201         return mach64_ring_idle(dev_priv);
202 }
203
204 /**
205  * Stop all DMA activity.
206  */
207 int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
208 {
209         int ret;
210
211         /* wait for completion */
212         if ((ret = mach64_ring_idle(dev_priv)) < 0) {
213                 DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
214                           MACH64_READ(MACH64_BM_GUI_TABLE),
215                           dev_priv->ring.tail);
216                 return ret;
217         }
218
219         mach64_ring_stop(dev_priv);
220
221         /* clean up after pass */
222         mach64_do_release_used_buffers(dev_priv);
223         return 0;
224 }
225
226 /**
227  * Reset the engine.  This will stop the DMA if it is running.
228  */
229 int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
230 {
231         u32 tmp;
232
233         DRM_DEBUG("\n");
234
235         /* Kill off any outstanding DMA transfers.
236          */
237         tmp = MACH64_READ(MACH64_BUS_CNTL);
238         MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
239
240         /* Reset the GUI engine (high to low transition).
241          */
242         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
243         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
244         /* Enable the GUI engine
245          */
246         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
247         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
248
249         /* ensure engine is not locked up by clearing any FIFO or HOST errors
250          */
251         tmp = MACH64_READ(MACH64_BUS_CNTL);
252         MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
253
254         /* Once GUI engine is restored, disable bus mastering */
255         MACH64_WRITE(MACH64_SRC_CNTL, 0);
256
257         /* Reset descriptor ring */
258         mach64_ring_reset(dev_priv);
259
260         return 0;
261 }
262
263 /*@}*/
264
265
266 /*******************************************************************/
267 /** \name Debugging output */
268 /*@{*/
269
270 /**
271  * Dump engine registers values.
272  */
273 void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
274 {
275         DRM_INFO("\n");
276         if (!dev_priv->is_pci) {
277                 DRM_INFO("           AGP_BASE = 0x%08x\n",
278                          MACH64_READ(MACH64_AGP_BASE));
279                 DRM_INFO("           AGP_CNTL = 0x%08x\n",
280                          MACH64_READ(MACH64_AGP_CNTL));
281         }
282         DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
283                  MACH64_READ(MACH64_ALPHA_TST_CNTL));
284         DRM_INFO("\n");
285         DRM_INFO("         BM_COMMAND = 0x%08x\n",
286                  MACH64_READ(MACH64_BM_COMMAND));
287         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
288                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
289         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
290                  MACH64_READ(MACH64_BM_GUI_TABLE));
291         DRM_INFO("          BM_STATUS = 0x%08x\n",
292                  MACH64_READ(MACH64_BM_STATUS));
293         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
294                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
295         DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
296                  MACH64_READ(MACH64_BM_SYSTEM_TABLE));
297         DRM_INFO("           BUS_CNTL = 0x%08x\n",
298                  MACH64_READ(MACH64_BUS_CNTL));
299         DRM_INFO("\n");
300         /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
301         DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
302                  MACH64_READ(MACH64_CLR_CMP_CLR));
303         DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
304                  MACH64_READ(MACH64_CLR_CMP_CNTL));
305         /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
306         DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
307                  MACH64_READ(MACH64_CONFIG_CHIP_ID));
308         DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
309                  MACH64_READ(MACH64_CONFIG_CNTL));
310         DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
311                  MACH64_READ(MACH64_CONFIG_STAT0));
312         DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
313                  MACH64_READ(MACH64_CONFIG_STAT1));
314         DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
315                  MACH64_READ(MACH64_CONFIG_STAT2));
316         DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
317         DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
318                  MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
319         DRM_INFO("\n");
320         /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
321         /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
322         DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
323                  MACH64_READ(MACH64_DP_BKGD_CLR));
324         DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
325                  MACH64_READ(MACH64_DP_FRGD_CLR));
326         DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
327         DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
328                  MACH64_READ(MACH64_DP_PIX_WIDTH));
329         DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
330         DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
331                  MACH64_READ(MACH64_DP_WRITE_MASK));
332         DRM_INFO("         DSP_CONFIG = 0x%08x\n",
333                  MACH64_READ(MACH64_DSP_CONFIG));
334         DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
335                  MACH64_READ(MACH64_DSP_ON_OFF));
336         DRM_INFO("           DST_CNTL = 0x%08x\n",
337                  MACH64_READ(MACH64_DST_CNTL));
338         DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
339                  MACH64_READ(MACH64_DST_OFF_PITCH));
340         DRM_INFO("\n");
341         /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
342         DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
343                  MACH64_READ(MACH64_EXT_MEM_CNTL));
344         DRM_INFO("\n");
345         DRM_INFO("          FIFO_STAT = 0x%08x\n",
346                  MACH64_READ(MACH64_FIFO_STAT));
347         DRM_INFO("\n");
348         DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
349                  MACH64_READ(MACH64_GEN_TEST_CNTL));
350         /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
351         DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
352                  MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
353         DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
354                  MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
355         DRM_INFO("           GUI_CNTL = 0x%08x\n",
356                  MACH64_READ(MACH64_GUI_CNTL));
357         DRM_INFO("           GUI_STAT = 0x%08x\n",
358                  MACH64_READ(MACH64_GUI_STAT));
359         DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
360                  MACH64_READ(MACH64_GUI_TRAJ_CNTL));
361         DRM_INFO("\n");
362         DRM_INFO("          HOST_CNTL = 0x%08x\n",
363                  MACH64_READ(MACH64_HOST_CNTL));
364         DRM_INFO("           HW_DEBUG = 0x%08x\n",
365                  MACH64_READ(MACH64_HW_DEBUG));
366         DRM_INFO("\n");
367         DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
368                  MACH64_READ(MACH64_MEM_ADDR_CONFIG));
369         DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
370                  MACH64_READ(MACH64_MEM_BUF_CNTL));
371         DRM_INFO("\n");
372         DRM_INFO("           PAT_REG0 = 0x%08x\n",
373                  MACH64_READ(MACH64_PAT_REG0));
374         DRM_INFO("           PAT_REG1 = 0x%08x\n",
375                  MACH64_READ(MACH64_PAT_REG1));
376         DRM_INFO("\n");
377         DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
378         DRM_INFO("           SC_RIGHT = 0x%08x\n",
379                  MACH64_READ(MACH64_SC_RIGHT));
380         DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
381         DRM_INFO("          SC_BOTTOM = 0x%08x\n",
382                  MACH64_READ(MACH64_SC_BOTTOM));
383         DRM_INFO("\n");
384         DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
385                  MACH64_READ(MACH64_SCALE_3D_CNTL));
386         DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
387                  MACH64_READ(MACH64_SCRATCH_REG0));
388         DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
389                  MACH64_READ(MACH64_SCRATCH_REG1));
390         DRM_INFO("         SETUP_CNTL = 0x%08x\n",
391                  MACH64_READ(MACH64_SETUP_CNTL));
392         DRM_INFO("           SRC_CNTL = 0x%08x\n",
393                  MACH64_READ(MACH64_SRC_CNTL));
394         DRM_INFO("\n");
395         DRM_INFO("           TEX_CNTL = 0x%08x\n",
396                  MACH64_READ(MACH64_TEX_CNTL));
397         DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
398                  MACH64_READ(MACH64_TEX_SIZE_PITCH));
399         DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
400                  MACH64_READ(MACH64_TIMER_CONFIG));
401         DRM_INFO("\n");
402         DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
403         DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
404                  MACH64_READ(MACH64_Z_OFF_PITCH));
405         DRM_INFO("\n");
406 }
407
408 #define MACH64_DUMP_CONTEXT     3
409
410 /**
411  * Used by mach64_dump_ring_info() to dump the contents of the current buffer
412  * pointed by the ring head.
413  */
414 static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
415                                  struct drm_buf *buf)
416 {
417         u32 addr = GETBUFADDR(buf);
418         u32 used = buf->used >> 2;
419         u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
420         u32 *p = GETBUFPTR(buf);
421         int skipped = 0;
422
423         DRM_INFO("buffer contents:\n");
424
425         while (used) {
426                 u32 reg, count;
427
428                 reg = le32_to_cpu(*p++);
429                 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
430                     (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
431                      addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
432                     addr >=
433                     GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
434                         DRM_INFO("%08x:  0x%08x\n", addr, reg);
435                 }
436                 addr += 4;
437                 used--;
438
439                 count = (reg >> 16) + 1;
440                 reg = reg & 0xffff;
441                 reg = MMSELECT(reg);
442                 while (count && used) {
443                         if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
444                             (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
445                              addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
446                             addr >=
447                             GETBUFADDR(buf) + buf->used -
448                             MACH64_DUMP_CONTEXT * 4) {
449                                 DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
450                                          reg, le32_to_cpu(*p));
451                                 skipped = 0;
452                         } else {
453                                 if (!skipped) {
454                                         DRM_INFO("  ...\n");
455                                         skipped = 1;
456                                 }
457                         }
458                         p++;
459                         addr += 4;
460                         used--;
461
462                         reg += 4;
463                         count--;
464                 }
465         }
466
467         DRM_INFO("\n");
468 }
469
470 /**
471  * Dump the ring state and contents, including the contents of the buffer being
472  * processed by the graphics engine.
473  */
474 void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
475 {
476         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
477         int i, skipped;
478
479         DRM_INFO("\n");
480
481         DRM_INFO("ring contents:\n");
482         DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
483                  ring->head_addr, ring->head, ring->tail);
484
485         skipped = 0;
486         for (i = 0; i < ring->size / sizeof(u32); i += 4) {
487                 if (i <= MACH64_DUMP_CONTEXT * 4 ||
488                     i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
489                     (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
490                      i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
491                     (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
492                      i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
493                         DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
494                                  (u32)(ring->start_addr + i * sizeof(u32)),
495                                  le32_to_cpu(((u32 *) ring->start)[i + 0]),
496                                  le32_to_cpu(((u32 *) ring->start)[i + 1]),
497                                  le32_to_cpu(((u32 *) ring->start)[i + 2]),
498                                  le32_to_cpu(((u32 *) ring->start)[i + 3]),
499                                  i == ring->head ? " (head)" : "",
500                                  i == ring->tail ? " (tail)" : "");
501                         skipped = 0;
502                 } else {
503                         if (!skipped) {
504                                 DRM_INFO("  ...\n");
505                                 skipped = 1;
506                         }
507                 }
508         }
509
510         DRM_INFO("\n");
511
512         if (ring->head >= 0 && ring->head < ring->size / sizeof(u32)) {
513                 struct list_head *ptr;
514                 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
515
516                 list_for_each(ptr, &dev_priv->pending) {
517                         drm_mach64_freelist_t *entry =
518                             list_entry(ptr, drm_mach64_freelist_t, list);
519                         struct drm_buf *buf = entry->buf;
520
521                         u32 buf_addr = GETBUFADDR(buf);
522
523                         if (buf_addr <= addr && addr < buf_addr + buf->used)
524                                 mach64_dump_buf_info(dev_priv, buf);
525                 }
526         }
527
528         DRM_INFO("\n");
529         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
530                  MACH64_READ(MACH64_BM_GUI_TABLE));
531         DRM_INFO("\n");
532         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
533                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
534         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
535                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
536         DRM_INFO("         BM_COMMAND = 0x%08x\n",
537                  MACH64_READ(MACH64_BM_COMMAND));
538         DRM_INFO("\n");
539         DRM_INFO("          BM_STATUS = 0x%08x\n",
540                  MACH64_READ(MACH64_BM_STATUS));
541         DRM_INFO("           BUS_CNTL = 0x%08x\n",
542                  MACH64_READ(MACH64_BUS_CNTL));
543         DRM_INFO("          FIFO_STAT = 0x%08x\n",
544                  MACH64_READ(MACH64_FIFO_STAT));
545         DRM_INFO("           GUI_STAT = 0x%08x\n",
546                  MACH64_READ(MACH64_GUI_STAT));
547         DRM_INFO("           SRC_CNTL = 0x%08x\n",
548                  MACH64_READ(MACH64_SRC_CNTL));
549 }
550
551 /*@}*/
552
553
554 /*******************************************************************/
555 /** \name DMA descriptor ring macros */
556 /*@{*/
557
558 /**
559  * Add the end mark to the ring's new tail position.
560  *
561  * The bus master engine will keep processing the DMA buffers listed in the ring
562  * until it finds this mark, making it stop.
563  *
564  * \sa mach64_clear_dma_eol
565  */ 
566 static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
567 {
568 #if defined(__i386__)
569         int nr = 31;
570
571         /* Taken from include/asm-i386/bitops.h linux header */
572         __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
573                              :"Ir"(nr));
574 #elif defined(__powerpc__)
575         u32 old;
576         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
577
578         /* Taken from the include/asm-ppc/bitops.h linux header */
579         __asm__ __volatile__("\n\
580 1:      lwarx   %0,0,%3 \n\
581         or      %0,%0,%2 \n\
582         stwcx.  %0,0,%3 \n\
583         bne-    1b":"=&r"(old), "=m"(*addr)
584                              :"r"(mask), "r"(addr), "m"(*addr)
585                              :"cc");
586 #elif defined(__alpha__)
587         u32 temp;
588         u32 mask = MACH64_DMA_EOL;
589
590         /* Taken from the include/asm-alpha/bitops.h linux header */
591         __asm__ __volatile__("1:        ldl_l %0,%3\n"
592                              "  bis %0,%2,%0\n"
593                              "  stl_c %0,%1\n"
594                              "  beq %0,2f\n"
595                              ".subsection 2\n"
596                              "2:        br 1b\n"
597                              ".previous":"=&r"(temp), "=m"(*addr)
598                              :"Ir"(mask), "m"(*addr));
599 #else
600         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
601
602         *addr |= mask;
603 #endif
604 }
605
606 /**
607  * Remove the end mark from the ring's old tail position.
608  *
609  * It should be called after calling mach64_set_dma_eol to mark the ring's new
610  * tail position.
611  *
612  * We update the end marks while the bus master engine is in operation. Since
613  * the bus master engine may potentially be reading from the same position
614  * that we write, we must change atomically to avoid having intermediary bad
615  * data.
616  */
617 static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
618 {
619 #if defined(__i386__)
620         int nr = 31;
621
622         /* Taken from include/asm-i386/bitops.h linux header */
623         __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
624                              :"Ir"(nr));
625 #elif defined(__powerpc__)
626         u32 old;
627         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
628
629         /* Taken from the include/asm-ppc/bitops.h linux header */
630         __asm__ __volatile__("\n\
631 1:      lwarx   %0,0,%3 \n\
632         andc    %0,%0,%2 \n\
633         stwcx.  %0,0,%3 \n\
634         bne-    1b":"=&r"(old), "=m"(*addr)
635                              :"r"(mask), "r"(addr), "m"(*addr)
636                              :"cc");
637 #elif defined(__alpha__)
638         u32 temp;
639         u32 mask = ~MACH64_DMA_EOL;
640
641         /* Taken from the include/asm-alpha/bitops.h linux header */
642         __asm__ __volatile__("1:        ldl_l %0,%3\n"
643                              "  and %0,%2,%0\n"
644                              "  stl_c %0,%1\n"
645                              "  beq %0,2f\n"
646                              ".subsection 2\n"
647                              "2:        br 1b\n"
648                              ".previous":"=&r"(temp), "=m"(*addr)
649                              :"Ir"(mask), "m"(*addr));
650 #else
651         u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
652
653         *addr &= mask;
654 #endif
655 }
656
657 #define RING_LOCALS                                                     \
658         int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
659
660 #define RING_WRITE_OFS  _ring_write
661
662 #define BEGIN_RING(n)                                                   \
663         do {                                                            \
664                 if (MACH64_VERBOSE) {                                   \
665                         DRM_INFO( "BEGIN_RING( %d ) \n",                \
666                                   (n) );                                \
667                 }                                                       \
668                 if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
669                         int ret;                                        \
670                         if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
671                                 DRM_ERROR( "wait_ring failed, resetting engine\n"); \
672                                 mach64_dump_engine_info( dev_priv );    \
673                                 mach64_do_engine_reset( dev_priv );     \
674                                 return ret;                             \
675                         }                                               \
676                 }                                                       \
677                 dev_priv->ring.space -= (n) * sizeof(u32);              \
678                 _ring = (u32 *) dev_priv->ring.start;                   \
679                 _ring_tail = _ring_write = dev_priv->ring.tail;         \
680                 _ring_mask = dev_priv->ring.tail_mask;                  \
681         } while (0)
682
683 #define OUT_RING( x )                                           \
684 do {                                                            \
685         if (MACH64_VERBOSE) {                                   \
686                 DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
687                            (unsigned int)(x), _ring_write );    \
688         }                                                       \
689         _ring[_ring_write++] = cpu_to_le32( x );                \
690         _ring_write &= _ring_mask;                              \
691 } while (0)
692
693 #define ADVANCE_RING()                                                  \
694 do {                                                                    \
695         if (MACH64_VERBOSE) {                                           \
696                 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
697                           _ring_write, _ring_tail );                    \
698         }                                                               \
699         DRM_MEMORYBARRIER();                                            \
700         mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
701         DRM_MEMORYBARRIER();                                            \
702         dev_priv->ring.tail = _ring_write;                              \
703         mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
704 } while (0)
705
706 /**
707  * Queue a DMA buffer of registers writes into the ring buffer.
708  */ 
709 int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
710                            drm_mach64_freelist_t *entry)
711 {
712         int bytes, pages, remainder;
713         u32 address, page;
714         int i;
715         struct drm_buf *buf = entry->buf;
716         RING_LOCALS;
717
718         bytes = buf->used;
719         address = GETBUFADDR( buf );
720         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
721
722         BEGIN_RING( pages * 4 );
723
724         for ( i = 0 ; i < pages-1 ; i++ ) {
725                 page = address + i * MACH64_DMA_CHUNKSIZE;
726                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
727                 OUT_RING( page );
728                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
729                 OUT_RING( 0 );
730         }
731
732         /* generate the final descriptor for any remaining commands in this buffer */
733         page = address + i * MACH64_DMA_CHUNKSIZE;
734         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
735
736         /* Save dword offset of last descriptor for this buffer.
737          * This is needed to check for completion of the buffer in freelist_get
738          */
739         entry->ring_ofs = RING_WRITE_OFS;
740
741         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
742         OUT_RING( page );
743         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
744         OUT_RING( 0 );
745
746         ADVANCE_RING();
747         
748         return 0;
749 }
750
751 /**
752  * Queue DMA buffer controlling host data tranfers (e.g., blit).
753  * 
754  * Almost identical to mach64_add_buf_to_ring.
755  */
756 int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
757                                     drm_mach64_freelist_t *entry)
758 {
759         int bytes, pages, remainder;
760         u32 address, page;
761         int i;
762         struct drm_buf *buf = entry->buf;
763         RING_LOCALS;
764         
765         bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
766         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
767         address = GETBUFADDR( buf );
768         
769         BEGIN_RING( 4 + pages * 4 );
770         
771         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
772         OUT_RING( address );
773         OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
774         OUT_RING( 0 );
775         address += MACH64_HOSTDATA_BLIT_OFFSET;
776         
777         for ( i = 0 ; i < pages-1 ; i++ ) {
778                 page = address + i * MACH64_DMA_CHUNKSIZE;
779                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
780                 OUT_RING( page );
781                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
782                 OUT_RING( 0 );
783         }
784         
785         /* generate the final descriptor for any remaining commands in this buffer */
786         page = address + i * MACH64_DMA_CHUNKSIZE;
787         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
788         
789         /* Save dword offset of last descriptor for this buffer.
790          * This is needed to check for completion of the buffer in freelist_get
791          */
792         entry->ring_ofs = RING_WRITE_OFS;
793         
794         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
795         OUT_RING( page );
796         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
797         OUT_RING( 0 );
798         
799         ADVANCE_RING();
800         
801         return 0;
802 }
803
804 /*@}*/
805
806
807 /*******************************************************************/
808 /** \name DMA test and initialization */
809 /*@{*/
810
811 /**
812  * Perform a simple DMA operation using the pattern registers to test whether
813  * DMA works.
814  *
815  * \return zero if successful.
816  *
817  * \note This function was the testbed for many experiences regarding Mach64
818  * DMA operation. It is left here since it so tricky to get DMA operating
819  * properly in some architectures and hardware.
820  */
821 static int mach64_bm_dma_test(struct drm_device * dev)
822 {
823         drm_mach64_private_t *dev_priv = dev->dev_private;
824         drm_dma_handle_t *cpu_addr_dmah;
825         u32 data_addr;
826         u32 *table, *data;
827         u32 expected[2];
828         u32 src_cntl, pat_reg0, pat_reg1;
829         int i, count, failed;
830
831         DRM_DEBUG("\n");
832
833         table = (u32 *) dev_priv->ring.start;
834
835         /* FIXME: get a dma buffer from the freelist here */
836         DRM_DEBUG("Allocating data memory ...\n");
837 #if defined(__FreeBSD__) || defined(__DragonFly__)
838         DRM_UNLOCK();
839 #endif
840         cpu_addr_dmah =
841             drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
842 #if defined(__FreeBSD__) || defined(__DragonFly__)
843         DRM_LOCK();
844 #endif
845         if (!cpu_addr_dmah) {
846                 DRM_INFO("data-memory allocation failed!\n");
847                 return -ENOMEM;
848         } else {
849                 data = (u32 *) cpu_addr_dmah->vaddr;
850                 data_addr = (u32) cpu_addr_dmah->busaddr;
851         }
852
853         /* Save the X server's value for SRC_CNTL and restore it
854          * in case our test fails.  This prevents the X server
855          * from disabling it's cache for this register
856          */
857         src_cntl = MACH64_READ(MACH64_SRC_CNTL);
858         pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
859         pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
860
861         mach64_do_wait_for_fifo(dev_priv, 3);
862
863         MACH64_WRITE(MACH64_SRC_CNTL, 0);
864         MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
865         MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
866
867         mach64_do_wait_for_idle(dev_priv);
868
869         for (i = 0; i < 2; i++) {
870                 u32 reg;
871                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
872                 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
873                 if (reg != 0x11111111) {
874                         DRM_INFO("Error initializing test registers\n");
875                         DRM_INFO("resetting engine ...\n");
876                         mach64_do_engine_reset(dev_priv);
877                         DRM_INFO("freeing data buffer memory.\n");
878                         drm_pci_free(dev, cpu_addr_dmah);
879                         return -EIO;
880                 }
881         }
882
883         /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
884         count = 0;
885
886         data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
887         data[count++] = expected[0] = 0x22222222;
888         data[count++] = expected[1] = 0xaaaaaaaa;
889
890         while (count < 1020) {
891                 data[count++] =
892                     cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
893                 data[count++] = 0x22222222;
894                 data[count++] = 0xaaaaaaaa;
895         }
896         data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
897         data[count++] = 0;
898
899         DRM_DEBUG("Preparing table ...\n");
900         table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
901                                                          MACH64_APERTURE_OFFSET);
902         table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
903         table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
904                                                 | MACH64_DMA_HOLD_OFFSET
905                                                 | MACH64_DMA_EOL);
906         table[MACH64_DMA_RESERVED] = 0;
907
908         DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
909         DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
910         DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
911         DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
912
913         for (i = 0; i < 6; i++) {
914                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
915         }
916         DRM_DEBUG(" ...\n");
917         for (i = count - 5; i < count; i++) {
918                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
919         }
920
921         DRM_MEMORYBARRIER();
922
923         DRM_DEBUG("waiting for idle...\n");
924         if ((i = mach64_do_wait_for_idle(dev_priv))) {
925                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
926                 DRM_INFO("resetting engine ...\n");
927                 mach64_do_engine_reset(dev_priv);
928                 mach64_do_wait_for_fifo(dev_priv, 3);
929                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
930                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
931                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
932                 DRM_INFO("freeing data buffer memory.\n");
933                 drm_pci_free(dev, cpu_addr_dmah);
934                 return i;
935         }
936         DRM_DEBUG("waiting for idle...done\n");
937
938         DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
939         DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
940         DRM_DEBUG("\n");
941         DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
942         DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
943
944         DRM_DEBUG("starting DMA transfer...\n");
945         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
946                      dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
947
948         MACH64_WRITE(MACH64_SRC_CNTL,
949                      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
950                      MACH64_SRC_BM_OP_SYSTEM_TO_REG);
951
952         /* Kick off the transfer */
953         DRM_DEBUG("starting DMA transfer... done.\n");
954         MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
955
956         DRM_DEBUG("waiting for idle...\n");
957
958         if ((i = mach64_do_wait_for_idle(dev_priv))) {
959                 /* engine locked up, dump register state and reset */
960                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
961                 mach64_dump_engine_info(dev_priv);
962                 DRM_INFO("resetting engine ...\n");
963                 mach64_do_engine_reset(dev_priv);
964                 mach64_do_wait_for_fifo(dev_priv, 3);
965                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
966                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
967                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
968                 DRM_INFO("freeing data buffer memory.\n");
969                 drm_pci_free(dev, cpu_addr_dmah);
970                 return i;
971         }
972
973         DRM_DEBUG("waiting for idle...done\n");
974
975         /* restore SRC_CNTL */
976         mach64_do_wait_for_fifo(dev_priv, 1);
977         MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
978
979         failed = 0;
980
981         /* Check register values to see if the GUI master operation succeeded */
982         for (i = 0; i < 2; i++) {
983                 u32 reg;
984                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
985                 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
986                 if (reg != expected[i]) {
987                         failed = -1;
988                 }
989         }
990
991         /* restore pattern registers */
992         mach64_do_wait_for_fifo(dev_priv, 2);
993         MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
994         MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
995
996         DRM_DEBUG("freeing data buffer memory.\n");
997         drm_pci_free(dev, cpu_addr_dmah);
998         DRM_DEBUG("returning ...\n");
999
1000         return failed;
1001 }
1002
1003 /**
1004  * Called during the DMA initialization ioctl to initialize all the necessary
1005  * software and hardware state for DMA operation.
1006  */
1007 static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
1008 {
1009         drm_mach64_private_t *dev_priv;
1010         u32 tmp;
1011         int i, ret;
1012
1013         DRM_DEBUG("\n");
1014
1015         dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
1016         if (dev_priv == NULL)
1017                 return -ENOMEM;
1018
1019         memset(dev_priv, 0, sizeof(drm_mach64_private_t));
1020
1021         dev_priv->is_pci = init->is_pci;
1022
1023         dev_priv->fb_bpp = init->fb_bpp;
1024         dev_priv->front_offset = init->front_offset;
1025         dev_priv->front_pitch = init->front_pitch;
1026         dev_priv->back_offset = init->back_offset;
1027         dev_priv->back_pitch = init->back_pitch;
1028
1029         dev_priv->depth_bpp = init->depth_bpp;
1030         dev_priv->depth_offset = init->depth_offset;
1031         dev_priv->depth_pitch = init->depth_pitch;
1032
1033         dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
1034                                         (dev_priv->front_offset >> 3));
1035         dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
1036                                        (dev_priv->back_offset >> 3));
1037         dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
1038                                         (dev_priv->depth_offset >> 3));
1039
1040         dev_priv->usec_timeout = 1000000;
1041
1042         /* Set up the freelist, placeholder list and pending list */
1043         INIT_LIST_HEAD(&dev_priv->free_list);
1044         INIT_LIST_HEAD(&dev_priv->placeholders);
1045         INIT_LIST_HEAD(&dev_priv->pending);
1046
1047         dev_priv->sarea = drm_getsarea(dev);
1048         if (!dev_priv->sarea) {
1049                 DRM_ERROR("can not find sarea!\n");
1050                 dev->dev_private = (void *)dev_priv;
1051                 mach64_do_cleanup_dma(dev);
1052                 return -EINVAL;
1053         }
1054         dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
1055         if (!dev_priv->fb) {
1056                 DRM_ERROR("can not find frame buffer map!\n");
1057                 dev->dev_private = (void *)dev_priv;
1058                 mach64_do_cleanup_dma(dev);
1059                 return -EINVAL;
1060         }
1061         dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1062         if (!dev_priv->mmio) {
1063                 DRM_ERROR("can not find mmio map!\n");
1064                 dev->dev_private = (void *)dev_priv;
1065                 mach64_do_cleanup_dma(dev);
1066                 return -EINVAL;
1067         }
1068
1069         dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
1070         if (!dev_priv->ring_map) {
1071                 DRM_ERROR("can not find ring map!\n");
1072                 dev->dev_private = (void *)dev_priv;
1073                 mach64_do_cleanup_dma(dev);
1074                 return -EINVAL;
1075         }
1076
1077         dev_priv->sarea_priv = (drm_mach64_sarea_t *)
1078             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
1079
1080         if (!dev_priv->is_pci) {
1081                 drm_core_ioremap(dev_priv->ring_map, dev);
1082                 if (!dev_priv->ring_map->handle) {
1083                         DRM_ERROR("can not ioremap virtual address for"
1084                                   " descriptor ring\n");
1085                         dev->dev_private = (void *)dev_priv;
1086                         mach64_do_cleanup_dma(dev);
1087                         return -ENOMEM;
1088                 }
1089                 dev->agp_buffer_token = init->buffers_offset;
1090                 dev->agp_buffer_map =
1091                     drm_core_findmap(dev, init->buffers_offset);
1092                 if (!dev->agp_buffer_map) {
1093                         DRM_ERROR("can not find dma buffer map!\n");
1094                         dev->dev_private = (void *)dev_priv;
1095                         mach64_do_cleanup_dma(dev);
1096                         return -EINVAL;
1097                 }
1098                 /* there might be a nicer way to do this -
1099                    dev isn't passed all the way though the mach64 - DA */
1100                 dev_priv->dev_buffers = dev->agp_buffer_map;
1101
1102                 drm_core_ioremap(dev->agp_buffer_map, dev);
1103                 if (!dev->agp_buffer_map->handle) {
1104                         DRM_ERROR("can not ioremap virtual address for"
1105                                   " dma buffer\n");
1106                         dev->dev_private = (void *)dev_priv;
1107                         mach64_do_cleanup_dma(dev);
1108                         return -ENOMEM;
1109                 }
1110                 dev_priv->agp_textures =
1111                     drm_core_findmap(dev, init->agp_textures_offset);
1112                 if (!dev_priv->agp_textures) {
1113                         DRM_ERROR("can not find agp texture region!\n");
1114                         dev->dev_private = (void *)dev_priv;
1115                         mach64_do_cleanup_dma(dev);
1116                         return -EINVAL;
1117                 }
1118         }
1119
1120         dev->dev_private = (void *)dev_priv;
1121
1122         dev_priv->driver_mode = init->dma_mode;
1123
1124         /* changing the FIFO size from the default causes problems with DMA */
1125         tmp = MACH64_READ(MACH64_GUI_CNTL);
1126         if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
1127                 DRM_INFO("Setting FIFO size to 128 entries\n");
1128                 /* FIFO must be empty to change the FIFO depth */
1129                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1130                         DRM_ERROR
1131                             ("wait for idle failed before changing FIFO depth!\n");
1132                         mach64_do_cleanup_dma(dev);
1133                         return ret;
1134                 }
1135                 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
1136                                                | MACH64_CMDFIFO_SIZE_128));
1137                 /* need to read GUI_STAT for proper sync according to docs */
1138                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1139                         DRM_ERROR
1140                             ("wait for idle failed when changing FIFO depth!\n");
1141                         mach64_do_cleanup_dma(dev);
1142                         return ret;
1143                 }
1144         }
1145
1146         dev_priv->ring.size = 0x4000;   /* 16KB */
1147         dev_priv->ring.start = dev_priv->ring_map->handle;
1148         dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
1149
1150         memset(dev_priv->ring.start, 0, dev_priv->ring.size);
1151         DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
1152                  dev_priv->ring.start, dev_priv->ring.start_addr);
1153
1154         ret = 0;
1155         if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
1156
1157                 /* enable block 1 registers and bus mastering */
1158                 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
1159                                                 | MACH64_BUS_EXT_REG_EN)
1160                                                & ~MACH64_BUS_MASTER_DIS));
1161
1162                 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
1163                 DRM_DEBUG("Starting DMA test...\n");
1164                 if ((ret = mach64_bm_dma_test(dev))) {
1165                         dev_priv->driver_mode = MACH64_MODE_MMIO;
1166                 }
1167         }
1168
1169         switch (dev_priv->driver_mode) {
1170         case MACH64_MODE_MMIO:
1171                 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
1172                                                | MACH64_BUS_EXT_REG_EN
1173                                                | MACH64_BUS_MASTER_DIS));
1174                 if (init->dma_mode == MACH64_MODE_MMIO)
1175                         DRM_INFO("Forcing pseudo-DMA mode\n");
1176                 else
1177                         DRM_INFO
1178                             ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
1179                              ret);
1180                 break;
1181         case MACH64_MODE_DMA_SYNC:
1182                 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
1183                 break;
1184         case MACH64_MODE_DMA_ASYNC:
1185         default:
1186                 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
1187         }
1188
1189         dev_priv->ring_running = 0;
1190
1191         /* setup offsets for physical address of table start and end */
1192         dev_priv->ring.head_addr = dev_priv->ring.start_addr;
1193         dev_priv->ring.head = dev_priv->ring.tail = 0;
1194         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1195         dev_priv->ring.space = dev_priv->ring.size;
1196
1197         /* setup physical address and size of descriptor table */
1198         mach64_do_wait_for_fifo(dev_priv, 1);
1199         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1200                      (dev_priv->ring.
1201                       head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
1202
1203         /* init frame counter */
1204         dev_priv->sarea_priv->frames_queued = 0;
1205         for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
1206                 dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
1207         }
1208
1209         /* Allocate the DMA buffer freelist */
1210         if ((ret = mach64_init_freelist(dev))) {
1211                 DRM_ERROR("Freelist allocation failed\n");
1212                 mach64_do_cleanup_dma(dev);
1213                 return ret;
1214         }
1215
1216         return 0;
1217 }
1218
1219 /*******************************************************************/
1220 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
1221  */
1222
1223 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
1224 {
1225         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1226         volatile u32 *ring_read;
1227         struct list_head *ptr;
1228         drm_mach64_freelist_t *entry;
1229         struct drm_buf *buf = NULL;
1230         u32 *buf_ptr;
1231         u32 used, reg, target;
1232         int fifo, count, found, ret, no_idle_wait;
1233
1234         fifo = count = reg = no_idle_wait = 0;
1235         target = MACH64_BM_ADDR;
1236
1237         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1238                 DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
1239                 mach64_dump_engine_info(dev_priv);
1240                 mach64_do_engine_reset(dev_priv);
1241                 return ret;
1242         }
1243
1244         ring_read = (u32 *) ring->start;
1245
1246         while (ring->tail != ring->head) {
1247                 u32 buf_addr, new_target, offset;
1248                 u32 bytes, remaining, head, eol;
1249
1250                 head = ring->head;
1251
1252                 new_target =
1253                     le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1254                 buf_addr = le32_to_cpu(ring_read[head++]);
1255                 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1256                 bytes = le32_to_cpu(ring_read[head++])
1257                     & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1258                 head++;
1259                 head &= ring->tail_mask;
1260
1261                 /* can't wait for idle between a blit setup descriptor
1262                  * and a HOSTDATA descriptor or the engine will lock
1263                  */
1264                 if (new_target == MACH64_BM_HOSTDATA
1265                     && target == MACH64_BM_ADDR)
1266                         no_idle_wait = 1;
1267
1268                 target = new_target;
1269
1270                 found = 0;
1271                 offset = 0;
1272                 list_for_each(ptr, &dev_priv->pending) {
1273                         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1274                         buf = entry->buf;
1275                         offset = buf_addr - GETBUFADDR(buf);
1276                         if (offset >= 0 && offset < MACH64_BUFFER_SIZE) {
1277                                 found = 1;
1278                                 break;
1279                         }
1280                 }
1281
1282                 if (!found || buf == NULL) {
1283                         DRM_ERROR
1284                             ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1285                              head, ring->tail, buf_addr, (eol ? "eol" : ""));
1286                         mach64_dump_ring_info(dev_priv);
1287                         mach64_do_engine_reset(dev_priv);
1288                         return -EINVAL;
1289                 }
1290
1291                 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1292                  * every 16 writes
1293                  */
1294                 DRM_DEBUG("target: (0x%08x) %s\n", target,
1295                           (target ==
1296                            MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1297                 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1298                           buf->used);
1299
1300                 remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
1301                 used = bytes >> 2;      /* dwords in buffer for this descriptor */
1302                 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1303
1304                 while (used) {
1305
1306                         if (count == 0) {
1307                                 if (target == MACH64_BM_HOSTDATA) {
1308                                         reg = DMAREG(MACH64_HOST_DATA0);
1309                                         count =
1310                                             (remaining > 16) ? 16 : remaining;
1311                                         fifo = 0;
1312                                 } else {
1313                                         reg = le32_to_cpu(*buf_ptr++);
1314                                         used--;
1315                                         count = (reg >> 16) + 1;
1316                                 }
1317
1318                                 reg = reg & 0xffff;
1319                                 reg = MMSELECT(reg);
1320                         }
1321                         while (count && used) {
1322                                 if (!fifo) {
1323                                         if (no_idle_wait) {
1324                                                 if ((ret =
1325                                                      mach64_do_wait_for_fifo
1326                                                      (dev_priv, 16)) < 0) {
1327                                                         no_idle_wait = 0;
1328                                                         return ret;
1329                                                 }
1330                                         } else {
1331                                                 if ((ret =
1332                                                      mach64_do_wait_for_idle
1333                                                      (dev_priv)) < 0) {
1334                                                         return ret;
1335                                                 }
1336                                         }
1337                                         fifo = 16;
1338                                 }
1339                                 --fifo;
1340                                 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1341                                 used--;
1342                                 remaining--;
1343
1344                                 reg += 4;
1345                                 count--;
1346                         }
1347                 }
1348                 ring->head = head;
1349                 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1350                 ring->space += (4 * sizeof(u32));
1351         }
1352
1353         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1354                 return ret;
1355         }
1356         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1357                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1358
1359         DRM_DEBUG("completed\n");
1360         return 0;
1361 }
1362
1363 /*@}*/
1364
1365
1366 /*******************************************************************/
1367 /** \name DMA cleanup */
1368 /*@{*/
1369
1370 int mach64_do_cleanup_dma(struct drm_device * dev)
1371 {
1372         DRM_DEBUG("\n");
1373
1374         /* Make sure interrupts are disabled here because the uninstall ioctl
1375          * may not have been called from userspace and after dev_private
1376          * is freed, it's too late.
1377          */
1378         if (dev->irq)
1379                 drm_irq_uninstall(dev);
1380
1381         if (dev->dev_private) {
1382                 drm_mach64_private_t *dev_priv = dev->dev_private;
1383
1384                 if (!dev_priv->is_pci) {
1385                         if (dev_priv->ring_map)
1386                                 drm_core_ioremapfree(dev_priv->ring_map, dev);
1387
1388                         if (dev->agp_buffer_map) {
1389                                 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1390                                 dev->agp_buffer_map = NULL;
1391                         }
1392                 }
1393
1394                 mach64_destroy_freelist(dev);
1395
1396                 drm_free(dev_priv, sizeof(drm_mach64_private_t),
1397                          DRM_MEM_DRIVER);
1398                 dev->dev_private = NULL;
1399         }
1400
1401         return 0;
1402 }
1403
1404 /*@}*/
1405
1406
1407 /*******************************************************************/
1408 /** \name IOCTL handlers */
1409 /*@{*/
1410
1411 int mach64_dma_init(struct drm_device *dev, void *data,
1412                     struct drm_file *file_priv)
1413 {
1414         drm_mach64_init_t *init = data;
1415
1416         DRM_DEBUG("\n");
1417
1418         LOCK_TEST_WITH_RETURN(dev, file_priv);
1419
1420         switch (init->func) {
1421         case DRM_MACH64_INIT_DMA:
1422                 return mach64_do_dma_init(dev, init);
1423         case DRM_MACH64_CLEANUP_DMA:
1424                 return mach64_do_cleanup_dma(dev);
1425         }
1426
1427         return -EINVAL;
1428 }
1429
1430 int mach64_dma_idle(struct drm_device *dev, void *data,
1431                     struct drm_file *file_priv)
1432 {
1433         drm_mach64_private_t *dev_priv = dev->dev_private;
1434
1435         DRM_DEBUG("\n");
1436
1437         LOCK_TEST_WITH_RETURN(dev, file_priv);
1438
1439         return mach64_do_dma_idle(dev_priv);
1440 }
1441
1442 int mach64_dma_flush(struct drm_device *dev, void *data,
1443                      struct drm_file *file_priv)
1444 {
1445         drm_mach64_private_t *dev_priv = dev->dev_private;
1446
1447         DRM_DEBUG("\n");
1448
1449         LOCK_TEST_WITH_RETURN(dev, file_priv);
1450
1451         return mach64_do_dma_flush(dev_priv);
1452 }
1453
1454 int mach64_engine_reset(struct drm_device *dev, void *data,
1455                         struct drm_file *file_priv)
1456 {
1457         drm_mach64_private_t *dev_priv = dev->dev_private;
1458
1459         DRM_DEBUG("\n");
1460
1461         LOCK_TEST_WITH_RETURN(dev, file_priv);
1462
1463         return mach64_do_engine_reset(dev_priv);
1464 }
1465
1466 /*@}*/
1467
1468
1469 /*******************************************************************/
1470 /** \name Freelist management */
1471 /*@{*/
1472
1473 int mach64_init_freelist(struct drm_device * dev)
1474 {
1475         struct drm_device_dma *dma = dev->dma;
1476         drm_mach64_private_t *dev_priv = dev->dev_private;
1477         drm_mach64_freelist_t *entry;
1478         struct list_head *ptr;
1479         int i;
1480
1481         DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
1482
1483         for (i = 0; i < dma->buf_count; i++) {
1484                 if ((entry =
1485                      (drm_mach64_freelist_t *)
1486                      drm_alloc(sizeof(drm_mach64_freelist_t),
1487                                DRM_MEM_BUFLISTS)) == NULL)
1488                         return -ENOMEM;
1489                 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1490                 entry->buf = dma->buflist[i];
1491                 ptr = &entry->list;
1492                 list_add_tail(ptr, &dev_priv->free_list);
1493         }
1494
1495         return 0;
1496 }
1497
1498 void mach64_destroy_freelist(struct drm_device * dev)
1499 {
1500         drm_mach64_private_t *dev_priv = dev->dev_private;
1501         drm_mach64_freelist_t *entry;
1502         struct list_head *ptr;
1503         struct list_head *tmp;
1504
1505         DRM_DEBUG("\n");
1506
1507         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1508                 list_del(ptr);
1509                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1510                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1511         }
1512         list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1513                 list_del(ptr);
1514                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1515                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1516         }
1517
1518         list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1519                 list_del(ptr);
1520                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1521                 drm_free(entry, sizeof(*entry), DRM_MEM_BUFLISTS);
1522         }
1523 }
1524
1525 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1526  * as it assumes all buffers in the pending list have been completed by the hardware.
1527  */
1528 int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
1529 {
1530         struct list_head *ptr;
1531         struct list_head *tmp;
1532         drm_mach64_freelist_t *entry;
1533         int i;
1534
1535         if (list_empty(&dev_priv->pending))
1536                 return 0;
1537
1538         /* Iterate the pending list and move all buffers into the freelist... */
1539         i = 0;
1540         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1541                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1542                 if (entry->discard) {
1543                         entry->buf->pending = 0;
1544                         list_del(ptr);
1545                         list_add_tail(ptr, &dev_priv->free_list);
1546                         i++;
1547                 }
1548         }
1549
1550         DRM_DEBUG("released %d buffers from pending list\n", i);
1551
1552         return 0;
1553 }
1554
1555 static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
1556 {
1557         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1558         struct list_head *ptr;
1559         struct list_head *tmp;
1560         drm_mach64_freelist_t *entry;
1561         u32 head, tail, ofs;
1562
1563         mach64_ring_tick(dev_priv, ring);
1564         head = ring->head;
1565         tail = ring->tail;
1566
1567         if (head == tail) {
1568 #if MACH64_EXTRA_CHECKING
1569                 if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
1570                         DRM_ERROR("Empty ring with non-idle engine!\n");
1571                         mach64_dump_ring_info(dev_priv);
1572                         return -1;
1573                 }
1574 #endif
1575                 /* last pass is complete, so release everything */
1576                 mach64_do_release_used_buffers(dev_priv);
1577                 DRM_DEBUG("idle engine, freed all buffers.\n");
1578                 if (list_empty(&dev_priv->free_list)) {
1579                         DRM_ERROR("Freelist empty with idle engine\n");
1580                         return -1;
1581                 }
1582                 return 0;
1583         }
1584         /* Look for a completed buffer and bail out of the loop
1585          * as soon as we find one -- don't waste time trying
1586          * to free extra bufs here, leave that to do_release_used_buffers
1587          */
1588         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1589                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1590                 ofs = entry->ring_ofs;
1591                 if (entry->discard &&
1592                     ((head < tail && (ofs < head || ofs >= tail)) ||
1593                      (head > tail && (ofs < head && ofs >= tail)))) {
1594 #if MACH64_EXTRA_CHECKING
1595                         int i;
1596
1597                         for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
1598                         {
1599                                 u32 o1 = le32_to_cpu(((u32 *) ring->
1600                                                  start)[i + 1]);
1601                                 u32 o2 = GETBUFADDR(entry->buf);
1602
1603                                 if (o1 == o2) {
1604                                         DRM_ERROR
1605                                             ("Attempting to free used buffer: "
1606                                              "i=%d  buf=0x%08x\n",
1607                                              i, o1);
1608                                         mach64_dump_ring_info(dev_priv);
1609                                         return -1;
1610                                 }
1611                         }
1612 #endif
1613                         /* found a processed buffer */
1614                         entry->buf->pending = 0;
1615                         list_del(ptr);
1616                         list_add_tail(ptr, &dev_priv->free_list);
1617                         DRM_DEBUG
1618                             ("freed processed buffer (head=%d tail=%d "
1619                              "buf ring ofs=%d).\n",
1620                              head, tail, ofs);
1621                         return 0;
1622                 }
1623         }
1624
1625         return 1;
1626 }
1627
1628 struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
1629 {
1630         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1631         drm_mach64_freelist_t *entry;
1632         struct list_head *ptr;
1633         int t;
1634
1635         if (list_empty(&dev_priv->free_list)) {
1636                 if (list_empty(&dev_priv->pending)) {
1637                         DRM_ERROR
1638                             ("Couldn't get buffer - pending and free lists empty\n");
1639                         t = 0;
1640                         list_for_each(ptr, &dev_priv->placeholders) {
1641                                 t++;
1642                         }
1643                         DRM_INFO("Placeholders: %d\n", t);
1644                         return NULL;
1645                 }
1646
1647                 for (t = 0; t < dev_priv->usec_timeout; t++) {
1648                         int ret;
1649
1650                         ret = mach64_do_reclaim_completed(dev_priv);
1651                         if (ret == 0)
1652                                 goto _freelist_entry_found;
1653                         if (ret < 0)
1654                                 return NULL;
1655
1656                         DRM_UDELAY(1);
1657                 }
1658                 mach64_dump_ring_info(dev_priv);
1659                 DRM_ERROR
1660                     ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1661                      ring->head_addr, ring->head, ring->tail);
1662                 return NULL;
1663         }
1664
1665       _freelist_entry_found:
1666         ptr = dev_priv->free_list.next;
1667         list_del(ptr);
1668         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1669         entry->buf->used = 0;
1670         list_add_tail(ptr, &dev_priv->placeholders);
1671         return entry->buf;
1672 }
1673
1674 int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
1675 {
1676         struct list_head *ptr;
1677         drm_mach64_freelist_t *entry;
1678
1679 #if MACH64_EXTRA_CHECKING
1680         list_for_each(ptr, &dev_priv->pending) {
1681                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1682                 if (copy_buf == entry->buf) {
1683                         DRM_ERROR("Trying to release a pending buf\n");
1684                         return -EFAULT;
1685                 }
1686         }
1687 #endif
1688         ptr = dev_priv->placeholders.next;
1689         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1690         copy_buf->pending = 0;
1691         copy_buf->used = 0;
1692         entry->buf = copy_buf;
1693         entry->discard = 1;
1694         list_del(ptr);
1695         list_add_tail(ptr, &dev_priv->free_list);
1696
1697         return 0;
1698 }
1699
1700 /*@}*/
1701
1702
1703 /*******************************************************************/
1704 /** \name DMA buffer request and submission IOCTL handler */
1705 /*@{*/
1706
1707 static int mach64_dma_get_buffers(struct drm_device *dev,
1708                                   struct drm_file *file_priv,
1709                                   struct drm_dma * d)
1710 {
1711         int i;
1712         struct drm_buf *buf;
1713         drm_mach64_private_t *dev_priv = dev->dev_private;
1714
1715         for (i = d->granted_count; i < d->request_count; i++) {
1716                 buf = mach64_freelist_get(dev_priv);
1717 #if MACH64_EXTRA_CHECKING
1718                 if (!buf)
1719                         return -EFAULT;
1720 #else
1721                 if (!buf)
1722                         return -EAGAIN;
1723 #endif
1724
1725                 buf->file_priv = file_priv;
1726
1727                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1728                                      sizeof(buf->idx)))
1729                         return -EFAULT;
1730                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1731                                      sizeof(buf->total)))
1732                         return -EFAULT;
1733
1734                 d->granted_count++;
1735         }
1736         return 0;
1737 }
1738
1739 int mach64_dma_buffers(struct drm_device *dev, void *data,
1740                        struct drm_file *file_priv)
1741 {
1742         struct drm_device_dma *dma = dev->dma;
1743         struct drm_dma *d = data;
1744         int ret = 0;
1745
1746         LOCK_TEST_WITH_RETURN(dev, file_priv);
1747
1748         /* Please don't send us buffers.
1749          */
1750         if (d->send_count != 0) {
1751                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1752                           DRM_CURRENTPID, d->send_count);
1753                 return -EINVAL;
1754         }
1755
1756         /* We'll send you buffers.
1757          */
1758         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1759                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1760                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1761                 ret = -EINVAL;
1762         }
1763
1764         d->granted_count = 0;
1765
1766         if (d->request_count) {
1767                 ret = mach64_dma_get_buffers(dev, file_priv, d);
1768         }
1769
1770         return ret;
1771 }
1772
1773 void mach64_driver_lastclose(struct drm_device * dev)
1774 {
1775         mach64_do_cleanup_dma(dev);
1776 }
1777
1778 /*@}*/