Merge branch 'vendor/GCC47'
[dragonfly.git] / sys / dev / drm / mach64 / mach64_dma.c
1 /* mach64_dma.c -- DMA support for mach64 (Rage Pro) driver -*- linux-c -*- */
2 /**
3  * \file mach64_dma.c
4  * DMA support for mach64 (Rage Pro) driver
5  *
6  * \author Gareth Hughes <gareth@valinux.com>
7  * \author Frank C. Earl <fearl@airmail.net>
8  * \author Leif Delgass <ldelgass@retinalburn.net>
9  * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
10  */
11
12 /*-
13  * Copyright 2000 Gareth Hughes
14  * Copyright 2002 Frank C. Earl
15  * Copyright 2002-2003 Leif Delgass
16  * All Rights Reserved.
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a
19  * copy of this software and associated documentation files (the "Software"),
20  * to deal in the Software without restriction, including without limitation
21  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
22  * and/or sell copies of the Software, and to permit persons to whom the
23  * Software is furnished to do so, subject to the following conditions:
24  *
25  * The above copyright notice and this permission notice (including the next
26  * paragraph) shall be included in all copies or substantial portions of the
27  * Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
32  * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
33  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
35  */
36
37 #include <drm/drmP.h>
38 #include "mach64_drm.h"
39 #include "mach64_drv.h"
40
41 /*******************************************************************/
42 /** \name Engine, FIFO control */
43 /*@{*/
44
45 /**
46  * Waits for free entries in the FIFO.
47  *
48  * \note Most writes to Mach64 registers are automatically routed through
49  * command FIFO which is 16 entry deep. Prior to writing to any draw engine
50  * register one has to ensure that enough FIFO entries are available by calling
51  * this function.  Failure to do so may cause the engine to lock.
52  *
53  * \param dev_priv pointer to device private data structure.
54  * \param entries number of free entries in the FIFO to wait for.
55  *
56  * \returns zero on success, or -EBUSY if the timeout (specificed by
57  * drm_mach64_private::usec_timeout) occurs.
58  */
59 int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)
60 {
61         int slots = 0, i;
62
63         for (i = 0; i < dev_priv->usec_timeout; i++) {
64                 slots = (MACH64_READ(MACH64_FIFO_STAT) & MACH64_FIFO_SLOT_MASK);
65                 if (slots <= (0x8000 >> entries))
66                         return 0;
67                 DRM_UDELAY(1);
68         }
69
70         DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);
71         return -EBUSY;
72 }
73
74 /**
75  * Wait for the draw engine to be idle.
76  */
77 int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)
78 {
79         int i, ret;
80
81         ret = mach64_do_wait_for_fifo(dev_priv, 16);
82         if (ret < 0)
83                 return ret;
84
85         for (i = 0; i < dev_priv->usec_timeout; i++) {
86                 if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))
87                         return 0;
88                 DRM_UDELAY(1);
89         }
90
91         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
92         mach64_dump_ring_info(dev_priv);
93         return -EBUSY;
94 }
95
96 /**
97  * Wait for free entries in the ring buffer.
98  *
99  * The Mach64 bus master can be configured to act as a virtual FIFO, using a
100  * circular buffer (commonly referred as "ring buffer" in other drivers) with
101  * pointers to engine commands. This allows the CPU to do other things while
102  * the graphics engine is busy, i.e., DMA mode.
103  *
104  * This function should be called before writing new entries to the ring
105  * buffer.
106  *
107  * \param dev_priv pointer to device private data structure.
108  * \param n number of free entries in the ring buffer to wait for.
109  *
110  * \returns zero on success, or -EBUSY if the timeout (specificed by
111  * drm_mach64_private_t::usec_timeout) occurs.
112  *
113  * \sa mach64_dump_ring_info()
114  */
115 int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)
116 {
117         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
118         int i;
119
120         for (i = 0; i < dev_priv->usec_timeout; i++) {
121                 mach64_update_ring_snapshot(dev_priv);
122                 if (ring->space >= n) {
123                         if (i > 0)
124                                 DRM_DEBUG("%d usecs\n", i);
125                         return 0;
126                 }
127                 DRM_UDELAY(1);
128         }
129
130         /* FIXME: This is being ignored... */
131         DRM_ERROR("failed!\n");
132         mach64_dump_ring_info(dev_priv);
133         return -EBUSY;
134 }
135
136 /**
137  * Wait until all DMA requests have been processed...
138  *
139  * \sa mach64_wait_ring()
140  */
141 static int mach64_ring_idle(drm_mach64_private_t *dev_priv)
142 {
143         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
144         u32 head;
145         int i;
146
147         head = ring->head;
148         i = 0;
149         while (i < dev_priv->usec_timeout) {
150                 mach64_update_ring_snapshot(dev_priv);
151                 if (ring->head == ring->tail &&
152                     !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) {
153                         if (i > 0)
154                                 DRM_DEBUG("%d usecs\n", i);
155                         return 0;
156                 }
157                 if (ring->head == head) {
158                         ++i;
159                 } else {
160                         head = ring->head;
161                         i = 0;
162                 }
163                 DRM_UDELAY(1);
164         }
165
166         DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));
167         mach64_dump_ring_info(dev_priv);
168         return -EBUSY;
169 }
170
171 /**
172  * Reset the ring buffer descriptors.
173  *
174  * \sa mach64_do_engine_reset()
175  */
176 static void mach64_ring_reset(drm_mach64_private_t *dev_priv)
177 {
178         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
179
180         mach64_do_release_used_buffers(dev_priv);
181         ring->head_addr = ring->start_addr;
182         ring->head = ring->tail = 0;
183         ring->space = ring->size;
184
185         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
186                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
187
188         dev_priv->ring_running = 0;
189 }
190
191 /**
192  * Ensure the all the queued commands will be processed.
193  */
194 int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)
195 {
196         /* FIXME: It's not necessary to wait for idle when flushing
197          * we just need to ensure the ring will be completely processed
198          * in finite time without another ioctl
199          */
200         return mach64_ring_idle(dev_priv);
201 }
202
203 /**
204  * Stop all DMA activity.
205  */
206 int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)
207 {
208         int ret;
209
210         /* wait for completion */
211         if ((ret = mach64_ring_idle(dev_priv)) < 0) {
212                 DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n",
213                           MACH64_READ(MACH64_BM_GUI_TABLE),
214                           dev_priv->ring.tail);
215                 return ret;
216         }
217
218         mach64_ring_stop(dev_priv);
219
220         /* clean up after pass */
221         mach64_do_release_used_buffers(dev_priv);
222         return 0;
223 }
224
225 /**
226  * Reset the engine.  This will stop the DMA if it is running.
227  */
228 int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)
229 {
230         u32 tmp;
231
232         DRM_DEBUG("\n");
233
234         /* Kill off any outstanding DMA transfers.
235          */
236         tmp = MACH64_READ(MACH64_BUS_CNTL);
237         MACH64_WRITE(MACH64_BUS_CNTL, tmp | MACH64_BUS_MASTER_DIS);
238
239         /* Reset the GUI engine (high to low transition).
240          */
241         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
242         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp & ~MACH64_GUI_ENGINE_ENABLE);
243         /* Enable the GUI engine
244          */
245         tmp = MACH64_READ(MACH64_GEN_TEST_CNTL);
246         MACH64_WRITE(MACH64_GEN_TEST_CNTL, tmp | MACH64_GUI_ENGINE_ENABLE);
247
248         /* ensure engine is not locked up by clearing any FIFO or HOST errors
249          */
250         tmp = MACH64_READ(MACH64_BUS_CNTL);
251         MACH64_WRITE(MACH64_BUS_CNTL, tmp | 0x00a00000);
252
253         /* Once GUI engine is restored, disable bus mastering */
254         MACH64_WRITE(MACH64_SRC_CNTL, 0);
255
256         /* Reset descriptor ring */
257         mach64_ring_reset(dev_priv);
258
259         return 0;
260 }
261
262 /*@}*/
263
264
265 /*******************************************************************/
266 /** \name Debugging output */
267 /*@{*/
268
269 /**
270  * Dump engine registers values.
271  */
272 void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)
273 {
274         DRM_INFO("\n");
275         if (!dev_priv->is_pci) {
276                 DRM_INFO("           AGP_BASE = 0x%08x\n",
277                          MACH64_READ(MACH64_AGP_BASE));
278                 DRM_INFO("           AGP_CNTL = 0x%08x\n",
279                          MACH64_READ(MACH64_AGP_CNTL));
280         }
281         DRM_INFO("     ALPHA_TST_CNTL = 0x%08x\n",
282                  MACH64_READ(MACH64_ALPHA_TST_CNTL));
283         DRM_INFO("\n");
284         DRM_INFO("         BM_COMMAND = 0x%08x\n",
285                  MACH64_READ(MACH64_BM_COMMAND));
286         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
287                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
288         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
289                  MACH64_READ(MACH64_BM_GUI_TABLE));
290         DRM_INFO("          BM_STATUS = 0x%08x\n",
291                  MACH64_READ(MACH64_BM_STATUS));
292         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
293                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
294         DRM_INFO("    BM_SYSTEM_TABLE = 0x%08x\n",
295                  MACH64_READ(MACH64_BM_SYSTEM_TABLE));
296         DRM_INFO("           BUS_CNTL = 0x%08x\n",
297                  MACH64_READ(MACH64_BUS_CNTL));
298         DRM_INFO("\n");
299         /* DRM_INFO( "         CLOCK_CNTL = 0x%08x\n", MACH64_READ( MACH64_CLOCK_CNTL ) ); */
300         DRM_INFO("        CLR_CMP_CLR = 0x%08x\n",
301                  MACH64_READ(MACH64_CLR_CMP_CLR));
302         DRM_INFO("       CLR_CMP_CNTL = 0x%08x\n",
303                  MACH64_READ(MACH64_CLR_CMP_CNTL));
304         /* DRM_INFO( "        CLR_CMP_MSK = 0x%08x\n", MACH64_READ( MACH64_CLR_CMP_MSK ) ); */
305         DRM_INFO("     CONFIG_CHIP_ID = 0x%08x\n",
306                  MACH64_READ(MACH64_CONFIG_CHIP_ID));
307         DRM_INFO("        CONFIG_CNTL = 0x%08x\n",
308                  MACH64_READ(MACH64_CONFIG_CNTL));
309         DRM_INFO("       CONFIG_STAT0 = 0x%08x\n",
310                  MACH64_READ(MACH64_CONFIG_STAT0));
311         DRM_INFO("       CONFIG_STAT1 = 0x%08x\n",
312                  MACH64_READ(MACH64_CONFIG_STAT1));
313         DRM_INFO("       CONFIG_STAT2 = 0x%08x\n",
314                  MACH64_READ(MACH64_CONFIG_STAT2));
315         DRM_INFO("            CRC_SIG = 0x%08x\n", MACH64_READ(MACH64_CRC_SIG));
316         DRM_INFO("  CUSTOM_MACRO_CNTL = 0x%08x\n",
317                  MACH64_READ(MACH64_CUSTOM_MACRO_CNTL));
318         DRM_INFO("\n");
319         /* DRM_INFO( "           DAC_CNTL = 0x%08x\n", MACH64_READ( MACH64_DAC_CNTL ) ); */
320         /* DRM_INFO( "           DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_DAC_REGS ) ); */
321         DRM_INFO("        DP_BKGD_CLR = 0x%08x\n",
322                  MACH64_READ(MACH64_DP_BKGD_CLR));
323         DRM_INFO("        DP_FRGD_CLR = 0x%08x\n",
324                  MACH64_READ(MACH64_DP_FRGD_CLR));
325         DRM_INFO("             DP_MIX = 0x%08x\n", MACH64_READ(MACH64_DP_MIX));
326         DRM_INFO("       DP_PIX_WIDTH = 0x%08x\n",
327                  MACH64_READ(MACH64_DP_PIX_WIDTH));
328         DRM_INFO("             DP_SRC = 0x%08x\n", MACH64_READ(MACH64_DP_SRC));
329         DRM_INFO("      DP_WRITE_MASK = 0x%08x\n",
330                  MACH64_READ(MACH64_DP_WRITE_MASK));
331         DRM_INFO("         DSP_CONFIG = 0x%08x\n",
332                  MACH64_READ(MACH64_DSP_CONFIG));
333         DRM_INFO("         DSP_ON_OFF = 0x%08x\n",
334                  MACH64_READ(MACH64_DSP_ON_OFF));
335         DRM_INFO("           DST_CNTL = 0x%08x\n",
336                  MACH64_READ(MACH64_DST_CNTL));
337         DRM_INFO("      DST_OFF_PITCH = 0x%08x\n",
338                  MACH64_READ(MACH64_DST_OFF_PITCH));
339         DRM_INFO("\n");
340         /* DRM_INFO( "       EXT_DAC_REGS = 0x%08x\n", MACH64_READ( MACH64_EXT_DAC_REGS ) ); */
341         DRM_INFO("       EXT_MEM_CNTL = 0x%08x\n",
342                  MACH64_READ(MACH64_EXT_MEM_CNTL));
343         DRM_INFO("\n");
344         DRM_INFO("          FIFO_STAT = 0x%08x\n",
345                  MACH64_READ(MACH64_FIFO_STAT));
346         DRM_INFO("\n");
347         DRM_INFO("      GEN_TEST_CNTL = 0x%08x\n",
348                  MACH64_READ(MACH64_GEN_TEST_CNTL));
349         /* DRM_INFO( "              GP_IO = 0x%08x\n", MACH64_READ( MACH64_GP_IO ) ); */
350         DRM_INFO("   GUI_CMDFIFO_DATA = 0x%08x\n",
351                  MACH64_READ(MACH64_GUI_CMDFIFO_DATA));
352         DRM_INFO("  GUI_CMDFIFO_DEBUG = 0x%08x\n",
353                  MACH64_READ(MACH64_GUI_CMDFIFO_DEBUG));
354         DRM_INFO("           GUI_CNTL = 0x%08x\n",
355                  MACH64_READ(MACH64_GUI_CNTL));
356         DRM_INFO("           GUI_STAT = 0x%08x\n",
357                  MACH64_READ(MACH64_GUI_STAT));
358         DRM_INFO("      GUI_TRAJ_CNTL = 0x%08x\n",
359                  MACH64_READ(MACH64_GUI_TRAJ_CNTL));
360         DRM_INFO("\n");
361         DRM_INFO("          HOST_CNTL = 0x%08x\n",
362                  MACH64_READ(MACH64_HOST_CNTL));
363         DRM_INFO("           HW_DEBUG = 0x%08x\n",
364                  MACH64_READ(MACH64_HW_DEBUG));
365         DRM_INFO("\n");
366         DRM_INFO("    MEM_ADDR_CONFIG = 0x%08x\n",
367                  MACH64_READ(MACH64_MEM_ADDR_CONFIG));
368         DRM_INFO("       MEM_BUF_CNTL = 0x%08x\n",
369                  MACH64_READ(MACH64_MEM_BUF_CNTL));
370         DRM_INFO("\n");
371         DRM_INFO("           PAT_REG0 = 0x%08x\n",
372                  MACH64_READ(MACH64_PAT_REG0));
373         DRM_INFO("           PAT_REG1 = 0x%08x\n",
374                  MACH64_READ(MACH64_PAT_REG1));
375         DRM_INFO("\n");
376         DRM_INFO("            SC_LEFT = 0x%08x\n", MACH64_READ(MACH64_SC_LEFT));
377         DRM_INFO("           SC_RIGHT = 0x%08x\n",
378                  MACH64_READ(MACH64_SC_RIGHT));
379         DRM_INFO("             SC_TOP = 0x%08x\n", MACH64_READ(MACH64_SC_TOP));
380         DRM_INFO("          SC_BOTTOM = 0x%08x\n",
381                  MACH64_READ(MACH64_SC_BOTTOM));
382         DRM_INFO("\n");
383         DRM_INFO("      SCALE_3D_CNTL = 0x%08x\n",
384                  MACH64_READ(MACH64_SCALE_3D_CNTL));
385         DRM_INFO("       SCRATCH_REG0 = 0x%08x\n",
386                  MACH64_READ(MACH64_SCRATCH_REG0));
387         DRM_INFO("       SCRATCH_REG1 = 0x%08x\n",
388                  MACH64_READ(MACH64_SCRATCH_REG1));
389         DRM_INFO("         SETUP_CNTL = 0x%08x\n",
390                  MACH64_READ(MACH64_SETUP_CNTL));
391         DRM_INFO("           SRC_CNTL = 0x%08x\n",
392                  MACH64_READ(MACH64_SRC_CNTL));
393         DRM_INFO("\n");
394         DRM_INFO("           TEX_CNTL = 0x%08x\n",
395                  MACH64_READ(MACH64_TEX_CNTL));
396         DRM_INFO("     TEX_SIZE_PITCH = 0x%08x\n",
397                  MACH64_READ(MACH64_TEX_SIZE_PITCH));
398         DRM_INFO("       TIMER_CONFIG = 0x%08x\n",
399                  MACH64_READ(MACH64_TIMER_CONFIG));
400         DRM_INFO("\n");
401         DRM_INFO("             Z_CNTL = 0x%08x\n", MACH64_READ(MACH64_Z_CNTL));
402         DRM_INFO("        Z_OFF_PITCH = 0x%08x\n",
403                  MACH64_READ(MACH64_Z_OFF_PITCH));
404         DRM_INFO("\n");
405 }
406
407 #define MACH64_DUMP_CONTEXT     3
408
409 /**
410  * Used by mach64_dump_ring_info() to dump the contents of the current buffer
411  * pointed by the ring head.
412  */
413 static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv,
414                                  struct drm_buf *buf)
415 {
416         u32 addr = GETBUFADDR(buf);
417         u32 used = buf->used >> 2;
418         u32 sys_addr = MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR);
419         u32 *p = GETBUFPTR(buf);
420         int skipped = 0;
421
422         DRM_INFO("buffer contents:\n");
423
424         while (used) {
425                 u32 reg, count;
426
427                 reg = le32_to_cpu(*p++);
428                 if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
429                     (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
430                      addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
431                     addr >=
432                     GETBUFADDR(buf) + buf->used - MACH64_DUMP_CONTEXT * 4) {
433                         DRM_INFO("%08x:  0x%08x\n", addr, reg);
434                 }
435                 addr += 4;
436                 used--;
437
438                 count = (reg >> 16) + 1;
439                 reg = reg & 0xffff;
440                 reg = MMSELECT(reg);
441                 while (count && used) {
442                         if (addr <= GETBUFADDR(buf) + MACH64_DUMP_CONTEXT * 4 ||
443                             (addr >= sys_addr - MACH64_DUMP_CONTEXT * 4 &&
444                              addr <= sys_addr + MACH64_DUMP_CONTEXT * 4) ||
445                             addr >=
446                             GETBUFADDR(buf) + buf->used -
447                             MACH64_DUMP_CONTEXT * 4) {
448                                 DRM_INFO("%08x:    0x%04x = 0x%08x\n", addr,
449                                          reg, le32_to_cpu(*p));
450                                 skipped = 0;
451                         } else {
452                                 if (!skipped) {
453                                         DRM_INFO("  ...\n");
454                                         skipped = 1;
455                                 }
456                         }
457                         p++;
458                         addr += 4;
459                         used--;
460
461                         reg += 4;
462                         count--;
463                 }
464         }
465
466         DRM_INFO("\n");
467 }
468
469 /**
470  * Dump the ring state and contents, including the contents of the buffer being
471  * processed by the graphics engine.
472  */
473 void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)
474 {
475         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
476         int i, skipped;
477
478         DRM_INFO("\n");
479
480         DRM_INFO("ring contents:\n");
481         DRM_INFO("  head_addr: 0x%08x head: %u tail: %u\n\n",
482                  ring->head_addr, ring->head, ring->tail);
483
484         skipped = 0;
485         for (i = 0; i < ring->size / sizeof(u32); i += 4) {
486                 if (i <= MACH64_DUMP_CONTEXT * 4 ||
487                     i >= ring->size / sizeof(u32) - MACH64_DUMP_CONTEXT * 4 ||
488                     (i >= ring->tail - MACH64_DUMP_CONTEXT * 4 &&
489                      i <= ring->tail + MACH64_DUMP_CONTEXT * 4) ||
490                     (i >= ring->head - MACH64_DUMP_CONTEXT * 4 &&
491                      i <= ring->head + MACH64_DUMP_CONTEXT * 4)) {
492                         DRM_INFO("  0x%08x:  0x%08x 0x%08x 0x%08x 0x%08x%s%s\n",
493                                  (u32)(ring->start_addr + i * sizeof(u32)),
494                                  le32_to_cpu(((u32 *) ring->start)[i + 0]),
495                                  le32_to_cpu(((u32 *) ring->start)[i + 1]),
496                                  le32_to_cpu(((u32 *) ring->start)[i + 2]),
497                                  le32_to_cpu(((u32 *) ring->start)[i + 3]),
498                                  i == ring->head ? " (head)" : "",
499                                  i == ring->tail ? " (tail)" : "");
500                         skipped = 0;
501                 } else {
502                         if (!skipped) {
503                                 DRM_INFO("  ...\n");
504                                 skipped = 1;
505                         }
506                 }
507         }
508
509         DRM_INFO("\n");
510
511         if (ring->head < ring->size / sizeof(u32)) {
512                 struct list_head *ptr;
513                 u32 addr = le32_to_cpu(((u32 *) ring->start)[ring->head + 1]);
514
515                 list_for_each(ptr, &dev_priv->pending) {
516                         drm_mach64_freelist_t *entry =
517                             list_entry(ptr, drm_mach64_freelist_t, list);
518                         struct drm_buf *buf = entry->buf;
519
520                         u32 buf_addr = GETBUFADDR(buf);
521
522                         if (buf_addr <= addr && addr < buf_addr + buf->used)
523                                 mach64_dump_buf_info(dev_priv, buf);
524                 }
525         }
526
527         DRM_INFO("\n");
528         DRM_INFO("       BM_GUI_TABLE = 0x%08x\n",
529                  MACH64_READ(MACH64_BM_GUI_TABLE));
530         DRM_INFO("\n");
531         DRM_INFO("BM_FRAME_BUF_OFFSET = 0x%08x\n",
532                  MACH64_READ(MACH64_BM_FRAME_BUF_OFFSET));
533         DRM_INFO(" BM_SYSTEM_MEM_ADDR = 0x%08x\n",
534                  MACH64_READ(MACH64_BM_SYSTEM_MEM_ADDR));
535         DRM_INFO("         BM_COMMAND = 0x%08x\n",
536                  MACH64_READ(MACH64_BM_COMMAND));
537         DRM_INFO("\n");
538         DRM_INFO("          BM_STATUS = 0x%08x\n",
539                  MACH64_READ(MACH64_BM_STATUS));
540         DRM_INFO("           BUS_CNTL = 0x%08x\n",
541                  MACH64_READ(MACH64_BUS_CNTL));
542         DRM_INFO("          FIFO_STAT = 0x%08x\n",
543                  MACH64_READ(MACH64_FIFO_STAT));
544         DRM_INFO("           GUI_STAT = 0x%08x\n",
545                  MACH64_READ(MACH64_GUI_STAT));
546         DRM_INFO("           SRC_CNTL = 0x%08x\n",
547                  MACH64_READ(MACH64_SRC_CNTL));
548 }
549
550 /*@}*/
551
552
553 /*******************************************************************/
554 /** \name DMA descriptor ring macros */
555 /*@{*/
556
557 /**
558  * Add the end mark to the ring's new tail position.
559  *
560  * The bus master engine will keep processing the DMA buffers listed in the ring
561  * until it finds this mark, making it stop.
562  *
563  * \sa mach64_clear_dma_eol
564  */ 
565 static __inline__ void mach64_set_dma_eol(volatile u32 *addr)
566 {
567 #if defined(__i386__)
568         int nr = 31;
569
570         /* Taken from include/asm-i386/bitops.h linux header */
571         __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
572                              :"Ir"(nr));
573 #elif defined(__powerpc__)
574         u32 old;
575         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
576
577         /* Taken from the include/asm-ppc/bitops.h linux header */
578         __asm__ __volatile__("\n\
579 1:      lwarx   %0,0,%3 \n\
580         or      %0,%0,%2 \n\
581         stwcx.  %0,0,%3 \n\
582         bne-    1b":"=&r"(old), "=m"(*addr)
583                              :"r"(mask), "r"(addr), "m"(*addr)
584                              :"cc");
585 #elif defined(__alpha__)
586         u32 temp;
587         u32 mask = MACH64_DMA_EOL;
588
589         /* Taken from the include/asm-alpha/bitops.h linux header */
590         __asm__ __volatile__("1:        ldl_l %0,%3\n"
591                              "  bis %0,%2,%0\n"
592                              "  stl_c %0,%1\n"
593                              "  beq %0,2f\n"
594                              ".subsection 2\n"
595                              "2:        br 1b\n"
596                              ".previous":"=&r"(temp), "=m"(*addr)
597                              :"Ir"(mask), "m"(*addr));
598 #else
599         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
600
601         *addr |= mask;
602 #endif
603 }
604
605 /**
606  * Remove the end mark from the ring's old tail position.
607  *
608  * It should be called after calling mach64_set_dma_eol to mark the ring's new
609  * tail position.
610  *
611  * We update the end marks while the bus master engine is in operation. Since
612  * the bus master engine may potentially be reading from the same position
613  * that we write, we must change atomically to avoid having intermediary bad
614  * data.
615  */
616 static __inline__ void mach64_clear_dma_eol(volatile u32 *addr)
617 {
618 #if defined(__i386__)
619         int nr = 31;
620
621         /* Taken from include/asm-i386/bitops.h linux header */
622         __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
623                              :"Ir"(nr));
624 #elif defined(__powerpc__)
625         u32 old;
626         u32 mask = cpu_to_le32(MACH64_DMA_EOL);
627
628         /* Taken from the include/asm-ppc/bitops.h linux header */
629         __asm__ __volatile__("\n\
630 1:      lwarx   %0,0,%3 \n\
631         andc    %0,%0,%2 \n\
632         stwcx.  %0,0,%3 \n\
633         bne-    1b":"=&r"(old), "=m"(*addr)
634                              :"r"(mask), "r"(addr), "m"(*addr)
635                              :"cc");
636 #elif defined(__alpha__)
637         u32 temp;
638         u32 mask = ~MACH64_DMA_EOL;
639
640         /* Taken from the include/asm-alpha/bitops.h linux header */
641         __asm__ __volatile__("1:        ldl_l %0,%3\n"
642                              "  and %0,%2,%0\n"
643                              "  stl_c %0,%1\n"
644                              "  beq %0,2f\n"
645                              ".subsection 2\n"
646                              "2:        br 1b\n"
647                              ".previous":"=&r"(temp), "=m"(*addr)
648                              :"Ir"(mask), "m"(*addr));
649 #else
650         u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
651
652         *addr &= mask;
653 #endif
654 }
655
656 #define RING_LOCALS                                                     \
657         int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
658
659 #define RING_WRITE_OFS  _ring_write
660
661 #define BEGIN_RING(n)                                                   \
662         do {                                                            \
663                 if (MACH64_VERBOSE) {                                   \
664                         DRM_INFO( "BEGIN_RING( %d ) \n",                \
665                                   (n) );                                \
666                 }                                                       \
667                 if (dev_priv->ring.space <= (n) * sizeof(u32)) {        \
668                         int ret;                                        \
669                         if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
670                                 DRM_ERROR( "wait_ring failed, resetting engine\n"); \
671                                 mach64_dump_engine_info( dev_priv );    \
672                                 mach64_do_engine_reset( dev_priv );     \
673                                 return ret;                             \
674                         }                                               \
675                 }                                                       \
676                 dev_priv->ring.space -= (n) * sizeof(u32);              \
677                 _ring = (u32 *) dev_priv->ring.start;                   \
678                 _ring_tail = _ring_write = dev_priv->ring.tail;         \
679                 _ring_mask = dev_priv->ring.tail_mask;                  \
680         } while (0)
681
682 #define OUT_RING( x )                                           \
683 do {                                                            \
684         if (MACH64_VERBOSE) {                                   \
685                 DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",    \
686                            (unsigned int)(x), _ring_write );    \
687         }                                                       \
688         _ring[_ring_write++] = cpu_to_le32( x );                \
689         _ring_write &= _ring_mask;                              \
690 } while (0)
691
692 #define ADVANCE_RING()                                                  \
693 do {                                                                    \
694         if (MACH64_VERBOSE) {                                           \
695                 DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",     \
696                           _ring_write, _ring_tail );                    \
697         }                                                               \
698         DRM_MEMORYBARRIER();                                            \
699         mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );  \
700         DRM_MEMORYBARRIER();                                            \
701         dev_priv->ring.tail = _ring_write;                              \
702         mach64_ring_tick( dev_priv, &(dev_priv)->ring );                \
703 } while (0)
704
705 /**
706  * Queue a DMA buffer of registers writes into the ring buffer.
707  */ 
708 int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
709                            drm_mach64_freelist_t *entry)
710 {
711         int bytes, pages, remainder;
712         u32 address, page;
713         int i;
714         struct drm_buf *buf = entry->buf;
715         RING_LOCALS;
716
717         bytes = buf->used;
718         address = GETBUFADDR( buf );
719         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
720
721         BEGIN_RING( pages * 4 );
722
723         for ( i = 0 ; i < pages-1 ; i++ ) {
724                 page = address + i * MACH64_DMA_CHUNKSIZE;
725                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
726                 OUT_RING( page );
727                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
728                 OUT_RING( 0 );
729         }
730
731         /* generate the final descriptor for any remaining commands in this buffer */
732         page = address + i * MACH64_DMA_CHUNKSIZE;
733         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
734
735         /* Save dword offset of last descriptor for this buffer.
736          * This is needed to check for completion of the buffer in freelist_get
737          */
738         entry->ring_ofs = RING_WRITE_OFS;
739
740         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
741         OUT_RING( page );
742         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
743         OUT_RING( 0 );
744
745         ADVANCE_RING();
746         
747         return 0;
748 }
749
750 /**
751  * Queue DMA buffer controlling host data tranfers (e.g., blit).
752  * 
753  * Almost identical to mach64_add_buf_to_ring.
754  */
755 int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
756                                     drm_mach64_freelist_t *entry)
757 {
758         int bytes, pages, remainder;
759         u32 address, page;
760         int i;
761         struct drm_buf *buf = entry->buf;
762         RING_LOCALS;
763         
764         bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
765         pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
766         address = GETBUFADDR( buf );
767         
768         BEGIN_RING( 4 + pages * 4 );
769         
770         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
771         OUT_RING( address );
772         OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
773         OUT_RING( 0 );
774         address += MACH64_HOSTDATA_BLIT_OFFSET;
775         
776         for ( i = 0 ; i < pages-1 ; i++ ) {
777                 page = address + i * MACH64_DMA_CHUNKSIZE;
778                 OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
779                 OUT_RING( page );
780                 OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
781                 OUT_RING( 0 );
782         }
783         
784         /* generate the final descriptor for any remaining commands in this buffer */
785         page = address + i * MACH64_DMA_CHUNKSIZE;
786         remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
787         
788         /* Save dword offset of last descriptor for this buffer.
789          * This is needed to check for completion of the buffer in freelist_get
790          */
791         entry->ring_ofs = RING_WRITE_OFS;
792         
793         OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
794         OUT_RING( page );
795         OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
796         OUT_RING( 0 );
797         
798         ADVANCE_RING();
799         
800         return 0;
801 }
802
803 /*@}*/
804
805
806 /*******************************************************************/
807 /** \name DMA test and initialization */
808 /*@{*/
809
810 /**
811  * Perform a simple DMA operation using the pattern registers to test whether
812  * DMA works.
813  *
814  * \return zero if successful.
815  *
816  * \note This function was the testbed for many experiences regarding Mach64
817  * DMA operation. It is left here since it so tricky to get DMA operating
818  * properly in some architectures and hardware.
819  */
820 static int mach64_bm_dma_test(struct drm_device * dev)
821 {
822         drm_mach64_private_t *dev_priv = dev->dev_private;
823         drm_dma_handle_t *cpu_addr_dmah;
824         u32 data_addr;
825         u32 *table, *data;
826         u32 expected[2];
827         u32 src_cntl, pat_reg0, pat_reg1;
828         int i, count, failed;
829
830         DRM_DEBUG("\n");
831
832         table = (u32 *) dev_priv->ring.start;
833
834         /* FIXME: get a dma buffer from the freelist here */
835         DRM_DEBUG("Allocating data memory ...\n");
836 #if defined(__FreeBSD__) || defined(__DragonFly__)
837         DRM_UNLOCK(dev);
838 #endif
839         cpu_addr_dmah =
840             drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
841 #if defined(__FreeBSD__) || defined(__DragonFly__)
842         DRM_LOCK(dev);
843 #endif
844         if (!cpu_addr_dmah) {
845                 DRM_INFO("data-memory allocation failed!\n");
846                 return -ENOMEM;
847         } else {
848                 data = (u32 *) cpu_addr_dmah->vaddr;
849                 data_addr = (u32) cpu_addr_dmah->busaddr;
850         }
851
852         /* Save the X server's value for SRC_CNTL and restore it
853          * in case our test fails.  This prevents the X server
854          * from disabling it's cache for this register
855          */
856         src_cntl = MACH64_READ(MACH64_SRC_CNTL);
857         pat_reg0 = MACH64_READ(MACH64_PAT_REG0);
858         pat_reg1 = MACH64_READ(MACH64_PAT_REG1);
859
860         mach64_do_wait_for_fifo(dev_priv, 3);
861
862         MACH64_WRITE(MACH64_SRC_CNTL, 0);
863         MACH64_WRITE(MACH64_PAT_REG0, 0x11111111);
864         MACH64_WRITE(MACH64_PAT_REG1, 0x11111111);
865
866         mach64_do_wait_for_idle(dev_priv);
867
868         for (i = 0; i < 2; i++) {
869                 u32 reg;
870                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
871                 DRM_DEBUG("(Before DMA Transfer) reg %d = 0x%08x\n", i, reg);
872                 if (reg != 0x11111111) {
873                         DRM_INFO("Error initializing test registers\n");
874                         DRM_INFO("resetting engine ...\n");
875                         mach64_do_engine_reset(dev_priv);
876                         DRM_INFO("freeing data buffer memory.\n");
877                         drm_pci_free(dev, cpu_addr_dmah);
878                         return -EIO;
879                 }
880         }
881
882         /* fill up a buffer with sets of 2 consecutive writes starting with PAT_REG0 */
883         count = 0;
884
885         data[count++] = cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
886         data[count++] = expected[0] = 0x22222222;
887         data[count++] = expected[1] = 0xaaaaaaaa;
888
889         while (count < 1020) {
890                 data[count++] =
891                     cpu_to_le32(DMAREG(MACH64_PAT_REG0) | (1 << 16));
892                 data[count++] = 0x22222222;
893                 data[count++] = 0xaaaaaaaa;
894         }
895         data[count++] = cpu_to_le32(DMAREG(MACH64_SRC_CNTL) | (0 << 16));
896         data[count++] = 0;
897
898         DRM_DEBUG("Preparing table ...\n");
899         table[MACH64_DMA_FRAME_BUF_OFFSET] = cpu_to_le32(MACH64_BM_ADDR +
900                                                          MACH64_APERTURE_OFFSET);
901         table[MACH64_DMA_SYS_MEM_ADDR] = cpu_to_le32(data_addr);
902         table[MACH64_DMA_COMMAND] = cpu_to_le32(count * sizeof(u32)
903                                                 | MACH64_DMA_HOLD_OFFSET
904                                                 | MACH64_DMA_EOL);
905         table[MACH64_DMA_RESERVED] = 0;
906
907         DRM_DEBUG("table[0] = 0x%08x\n", table[0]);
908         DRM_DEBUG("table[1] = 0x%08x\n", table[1]);
909         DRM_DEBUG("table[2] = 0x%08x\n", table[2]);
910         DRM_DEBUG("table[3] = 0x%08x\n", table[3]);
911
912         for (i = 0; i < 6; i++) {
913                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
914         }
915         DRM_DEBUG(" ...\n");
916         for (i = count - 5; i < count; i++) {
917                 DRM_DEBUG(" data[%d] = 0x%08x\n", i, data[i]);
918         }
919
920         DRM_MEMORYBARRIER();
921
922         DRM_DEBUG("waiting for idle...\n");
923         if ((i = mach64_do_wait_for_idle(dev_priv))) {
924                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
925                 DRM_INFO("resetting engine ...\n");
926                 mach64_do_engine_reset(dev_priv);
927                 mach64_do_wait_for_fifo(dev_priv, 3);
928                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
929                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
930                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
931                 DRM_INFO("freeing data buffer memory.\n");
932                 drm_pci_free(dev, cpu_addr_dmah);
933                 return i;
934         }
935         DRM_DEBUG("waiting for idle...done\n");
936
937         DRM_DEBUG("BUS_CNTL = 0x%08x\n", MACH64_READ(MACH64_BUS_CNTL));
938         DRM_DEBUG("SRC_CNTL = 0x%08x\n", MACH64_READ(MACH64_SRC_CNTL));
939         DRM_DEBUG("\n");
940         DRM_DEBUG("data bus addr = 0x%08x\n", data_addr);
941         DRM_DEBUG("table bus addr = 0x%08x\n", dev_priv->ring.start_addr);
942
943         DRM_DEBUG("starting DMA transfer...\n");
944         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
945                      dev_priv->ring.start_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
946
947         MACH64_WRITE(MACH64_SRC_CNTL,
948                      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
949                      MACH64_SRC_BM_OP_SYSTEM_TO_REG);
950
951         /* Kick off the transfer */
952         DRM_DEBUG("starting DMA transfer... done.\n");
953         MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);
954
955         DRM_DEBUG("waiting for idle...\n");
956
957         if ((i = mach64_do_wait_for_idle(dev_priv))) {
958                 /* engine locked up, dump register state and reset */
959                 DRM_INFO("mach64_do_wait_for_idle failed (result=%d)\n", i);
960                 mach64_dump_engine_info(dev_priv);
961                 DRM_INFO("resetting engine ...\n");
962                 mach64_do_engine_reset(dev_priv);
963                 mach64_do_wait_for_fifo(dev_priv, 3);
964                 MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
965                 MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
966                 MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
967                 DRM_INFO("freeing data buffer memory.\n");
968                 drm_pci_free(dev, cpu_addr_dmah);
969                 return i;
970         }
971
972         DRM_DEBUG("waiting for idle...done\n");
973
974         /* restore SRC_CNTL */
975         mach64_do_wait_for_fifo(dev_priv, 1);
976         MACH64_WRITE(MACH64_SRC_CNTL, src_cntl);
977
978         failed = 0;
979
980         /* Check register values to see if the GUI master operation succeeded */
981         for (i = 0; i < 2; i++) {
982                 u32 reg;
983                 reg = MACH64_READ((MACH64_PAT_REG0 + i * 4));
984                 DRM_DEBUG("(After DMA Transfer) reg %d = 0x%08x\n", i, reg);
985                 if (reg != expected[i]) {
986                         failed = -1;
987                 }
988         }
989
990         /* restore pattern registers */
991         mach64_do_wait_for_fifo(dev_priv, 2);
992         MACH64_WRITE(MACH64_PAT_REG0, pat_reg0);
993         MACH64_WRITE(MACH64_PAT_REG1, pat_reg1);
994
995         DRM_DEBUG("freeing data buffer memory.\n");
996         drm_pci_free(dev, cpu_addr_dmah);
997         DRM_DEBUG("returning ...\n");
998
999         return failed;
1000 }
1001
1002 /**
1003  * Called during the DMA initialization ioctl to initialize all the necessary
1004  * software and hardware state for DMA operation.
1005  */
1006 static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
1007 {
1008         drm_mach64_private_t *dev_priv;
1009         u32 tmp;
1010         int i, ret;
1011
1012         DRM_DEBUG("\n");
1013
1014         dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
1015         if (dev_priv == NULL)
1016                 return -ENOMEM;
1017
1018         memset(dev_priv, 0, sizeof(drm_mach64_private_t));
1019
1020         dev_priv->is_pci = init->is_pci;
1021
1022         dev_priv->fb_bpp = init->fb_bpp;
1023         dev_priv->front_offset = init->front_offset;
1024         dev_priv->front_pitch = init->front_pitch;
1025         dev_priv->back_offset = init->back_offset;
1026         dev_priv->back_pitch = init->back_pitch;
1027
1028         dev_priv->depth_bpp = init->depth_bpp;
1029         dev_priv->depth_offset = init->depth_offset;
1030         dev_priv->depth_pitch = init->depth_pitch;
1031
1032         dev_priv->front_offset_pitch = (((dev_priv->front_pitch / 8) << 22) |
1033                                         (dev_priv->front_offset >> 3));
1034         dev_priv->back_offset_pitch = (((dev_priv->back_pitch / 8) << 22) |
1035                                        (dev_priv->back_offset >> 3));
1036         dev_priv->depth_offset_pitch = (((dev_priv->depth_pitch / 8) << 22) |
1037                                         (dev_priv->depth_offset >> 3));
1038
1039         dev_priv->usec_timeout = 1000000;
1040
1041         /* Set up the freelist, placeholder list and pending list */
1042         INIT_LIST_HEAD(&dev_priv->free_list);
1043         INIT_LIST_HEAD(&dev_priv->placeholders);
1044         INIT_LIST_HEAD(&dev_priv->pending);
1045
1046         dev_priv->sarea = drm_getsarea(dev);
1047         if (!dev_priv->sarea) {
1048                 DRM_ERROR("can not find sarea!\n");
1049                 dev->dev_private = (void *)dev_priv;
1050                 mach64_do_cleanup_dma(dev);
1051                 return -EINVAL;
1052         }
1053         dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
1054         if (!dev_priv->fb) {
1055                 DRM_ERROR("can not find frame buffer map!\n");
1056                 dev->dev_private = (void *)dev_priv;
1057                 mach64_do_cleanup_dma(dev);
1058                 return -EINVAL;
1059         }
1060         dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
1061         if (!dev_priv->mmio) {
1062                 DRM_ERROR("can not find mmio map!\n");
1063                 dev->dev_private = (void *)dev_priv;
1064                 mach64_do_cleanup_dma(dev);
1065                 return -EINVAL;
1066         }
1067
1068         dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
1069         if (!dev_priv->ring_map) {
1070                 DRM_ERROR("can not find ring map!\n");
1071                 dev->dev_private = (void *)dev_priv;
1072                 mach64_do_cleanup_dma(dev);
1073                 return -EINVAL;
1074         }
1075
1076         dev_priv->sarea_priv = (drm_mach64_sarea_t *)
1077             ((u8 *) dev_priv->sarea->virtual + init->sarea_priv_offset);
1078
1079         if (!dev_priv->is_pci) {
1080                 drm_core_ioremap(dev_priv->ring_map, dev);
1081                 if (!dev_priv->ring_map->virtual) {
1082                         DRM_ERROR("can not ioremap virtual address for"
1083                                   " descriptor ring\n");
1084                         dev->dev_private = (void *)dev_priv;
1085                         mach64_do_cleanup_dma(dev);
1086                         return -ENOMEM;
1087                 }
1088                 dev->agp_buffer_token = init->buffers_offset;
1089                 dev->agp_buffer_map =
1090                     drm_core_findmap(dev, init->buffers_offset);
1091                 if (!dev->agp_buffer_map) {
1092                         DRM_ERROR("can not find dma buffer map!\n");
1093                         dev->dev_private = (void *)dev_priv;
1094                         mach64_do_cleanup_dma(dev);
1095                         return -EINVAL;
1096                 }
1097                 /* there might be a nicer way to do this -
1098                    dev isn't passed all the way though the mach64 - DA */
1099                 dev_priv->dev_buffers = dev->agp_buffer_map;
1100
1101                 drm_core_ioremap(dev->agp_buffer_map, dev);
1102                 if (!dev->agp_buffer_map->virtual) {
1103                         DRM_ERROR("can not ioremap virtual address for"
1104                                   " dma buffer\n");
1105                         dev->dev_private = (void *)dev_priv;
1106                         mach64_do_cleanup_dma(dev);
1107                         return -ENOMEM;
1108                 }
1109                 dev_priv->agp_textures =
1110                     drm_core_findmap(dev, init->agp_textures_offset);
1111                 if (!dev_priv->agp_textures) {
1112                         DRM_ERROR("can not find agp texture region!\n");
1113                         dev->dev_private = (void *)dev_priv;
1114                         mach64_do_cleanup_dma(dev);
1115                         return -EINVAL;
1116                 }
1117         }
1118
1119         dev->dev_private = (void *)dev_priv;
1120
1121         dev_priv->driver_mode = init->dma_mode;
1122
1123         /* changing the FIFO size from the default causes problems with DMA */
1124         tmp = MACH64_READ(MACH64_GUI_CNTL);
1125         if ((tmp & MACH64_CMDFIFO_SIZE_MASK) != MACH64_CMDFIFO_SIZE_128) {
1126                 DRM_INFO("Setting FIFO size to 128 entries\n");
1127                 /* FIFO must be empty to change the FIFO depth */
1128                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1129                         DRM_ERROR
1130                             ("wait for idle failed before changing FIFO depth!\n");
1131                         mach64_do_cleanup_dma(dev);
1132                         return ret;
1133                 }
1134                 MACH64_WRITE(MACH64_GUI_CNTL, ((tmp & ~MACH64_CMDFIFO_SIZE_MASK)
1135                                                | MACH64_CMDFIFO_SIZE_128));
1136                 /* need to read GUI_STAT for proper sync according to docs */
1137                 if ((ret = mach64_do_wait_for_idle(dev_priv))) {
1138                         DRM_ERROR
1139                             ("wait for idle failed when changing FIFO depth!\n");
1140                         mach64_do_cleanup_dma(dev);
1141                         return ret;
1142                 }
1143         }
1144
1145         dev_priv->ring.size = 0x4000;   /* 16KB */
1146         dev_priv->ring.start = dev_priv->ring_map->virtual;
1147         dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset;
1148
1149         memset(dev_priv->ring.start, 0, dev_priv->ring.size);
1150         DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n",
1151                  dev_priv->ring.start, dev_priv->ring.start_addr);
1152
1153         ret = 0;
1154         if (dev_priv->driver_mode != MACH64_MODE_MMIO) {
1155
1156                 /* enable block 1 registers and bus mastering */
1157                 MACH64_WRITE(MACH64_BUS_CNTL, ((MACH64_READ(MACH64_BUS_CNTL)
1158                                                 | MACH64_BUS_EXT_REG_EN)
1159                                                & ~MACH64_BUS_MASTER_DIS));
1160
1161                 /* try a DMA GUI-mastering pass and fall back to MMIO if it fails */
1162                 DRM_DEBUG("Starting DMA test...\n");
1163                 if ((ret = mach64_bm_dma_test(dev))) {
1164                         dev_priv->driver_mode = MACH64_MODE_MMIO;
1165                 }
1166         }
1167
1168         switch (dev_priv->driver_mode) {
1169         case MACH64_MODE_MMIO:
1170                 MACH64_WRITE(MACH64_BUS_CNTL, (MACH64_READ(MACH64_BUS_CNTL)
1171                                                | MACH64_BUS_EXT_REG_EN
1172                                                | MACH64_BUS_MASTER_DIS));
1173                 if (init->dma_mode == MACH64_MODE_MMIO)
1174                         DRM_INFO("Forcing pseudo-DMA mode\n");
1175                 else
1176                         DRM_INFO
1177                             ("DMA test failed (ret=%d), using pseudo-DMA mode\n",
1178                              ret);
1179                 break;
1180         case MACH64_MODE_DMA_SYNC:
1181                 DRM_INFO("DMA test succeeded, using synchronous DMA mode\n");
1182                 break;
1183         case MACH64_MODE_DMA_ASYNC:
1184         default:
1185                 DRM_INFO("DMA test succeeded, using asynchronous DMA mode\n");
1186         }
1187
1188         dev_priv->ring_running = 0;
1189
1190         /* setup offsets for physical address of table start and end */
1191         dev_priv->ring.head_addr = dev_priv->ring.start_addr;
1192         dev_priv->ring.head = dev_priv->ring.tail = 0;
1193         dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
1194         dev_priv->ring.space = dev_priv->ring.size;
1195
1196         /* setup physical address and size of descriptor table */
1197         mach64_do_wait_for_fifo(dev_priv, 1);
1198         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1199                      (dev_priv->ring.
1200                       head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB));
1201
1202         /* init frame counter */
1203         dev_priv->sarea_priv->frames_queued = 0;
1204         for (i = 0; i < MACH64_MAX_QUEUED_FRAMES; i++) {
1205                 dev_priv->frame_ofs[i] = ~0;    /* All ones indicates placeholder */
1206         }
1207
1208         /* Allocate the DMA buffer freelist */
1209         if ((ret = mach64_init_freelist(dev))) {
1210                 DRM_ERROR("Freelist allocation failed\n");
1211                 mach64_do_cleanup_dma(dev);
1212                 return ret;
1213         }
1214
1215         return 0;
1216 }
1217
1218 /*******************************************************************/
1219 /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)
1220  */
1221
1222 int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)
1223 {
1224         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1225         volatile u32 *ring_read;
1226         struct list_head *ptr;
1227         drm_mach64_freelist_t *entry;
1228         struct drm_buf *buf = NULL;
1229         u32 *buf_ptr;
1230         u32 used, reg, target;
1231         int fifo, count, found, ret, no_idle_wait;
1232
1233         fifo = count = reg = no_idle_wait = 0;
1234         target = MACH64_BM_ADDR;
1235
1236         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1237                 DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");
1238                 mach64_dump_engine_info(dev_priv);
1239                 mach64_do_engine_reset(dev_priv);
1240                 return ret;
1241         }
1242
1243         ring_read = (u32 *) ring->start;
1244
1245         while (ring->tail != ring->head) {
1246                 u32 buf_addr, new_target, offset;
1247                 u32 bytes, remaining, head, eol;
1248
1249                 head = ring->head;
1250
1251                 new_target =
1252                     le32_to_cpu(ring_read[head++]) - MACH64_APERTURE_OFFSET;
1253                 buf_addr = le32_to_cpu(ring_read[head++]);
1254                 eol = le32_to_cpu(ring_read[head]) & MACH64_DMA_EOL;
1255                 bytes = le32_to_cpu(ring_read[head++])
1256                     & ~(MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL);
1257                 head++;
1258                 head &= ring->tail_mask;
1259
1260                 /* can't wait for idle between a blit setup descriptor
1261                  * and a HOSTDATA descriptor or the engine will lock
1262                  */
1263                 if (new_target == MACH64_BM_HOSTDATA
1264                     && target == MACH64_BM_ADDR)
1265                         no_idle_wait = 1;
1266
1267                 target = new_target;
1268
1269                 found = 0;
1270                 offset = 0;
1271                 list_for_each(ptr, &dev_priv->pending) {
1272                         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1273                         buf = entry->buf;
1274                         offset = buf_addr - GETBUFADDR(buf);
1275                         if (offset < MACH64_BUFFER_SIZE) {
1276                                 found = 1;
1277                                 break;
1278                         }
1279                 }
1280
1281                 if (!found || buf == NULL) {
1282                         DRM_ERROR
1283                             ("Couldn't find pending buffer: head: %u tail: %u buf_addr: 0x%08x %s\n",
1284                              head, ring->tail, buf_addr, (eol ? "eol" : ""));
1285                         mach64_dump_ring_info(dev_priv);
1286                         mach64_do_engine_reset(dev_priv);
1287                         return -EINVAL;
1288                 }
1289
1290                 /* Hand feed the buffer to the card via MMIO, waiting for the fifo
1291                  * every 16 writes
1292                  */
1293                 DRM_DEBUG("target: (0x%08x) %s\n", target,
1294                           (target ==
1295                            MACH64_BM_HOSTDATA ? "BM_HOSTDATA" : "BM_ADDR"));
1296                 DRM_DEBUG("offset: %u bytes: %u used: %u\n", offset, bytes,
1297                           buf->used);
1298
1299                 remaining = (buf->used - offset) >> 2;  /* dwords remaining in buffer */
1300                 used = bytes >> 2;      /* dwords in buffer for this descriptor */
1301                 buf_ptr = (u32 *) ((char *)GETBUFPTR(buf) + offset);
1302
1303                 while (used) {
1304
1305                         if (count == 0) {
1306                                 if (target == MACH64_BM_HOSTDATA) {
1307                                         reg = DMAREG(MACH64_HOST_DATA0);
1308                                         count =
1309                                             (remaining > 16) ? 16 : remaining;
1310                                         fifo = 0;
1311                                 } else {
1312                                         reg = le32_to_cpu(*buf_ptr++);
1313                                         used--;
1314                                         count = (reg >> 16) + 1;
1315                                 }
1316
1317                                 reg = reg & 0xffff;
1318                                 reg = MMSELECT(reg);
1319                         }
1320                         while (count && used) {
1321                                 if (!fifo) {
1322                                         if (no_idle_wait) {
1323                                                 if ((ret =
1324                                                      mach64_do_wait_for_fifo
1325                                                      (dev_priv, 16)) < 0) {
1326                                                         no_idle_wait = 0;
1327                                                         return ret;
1328                                                 }
1329                                         } else {
1330                                                 if ((ret =
1331                                                      mach64_do_wait_for_idle
1332                                                      (dev_priv)) < 0) {
1333                                                         return ret;
1334                                                 }
1335                                         }
1336                                         fifo = 16;
1337                                 }
1338                                 --fifo;
1339                                 MACH64_WRITE(reg, le32_to_cpu(*buf_ptr++));
1340                                 used--;
1341                                 remaining--;
1342
1343                                 reg += 4;
1344                                 count--;
1345                         }
1346                 }
1347                 ring->head = head;
1348                 ring->head_addr = ring->start_addr + (ring->head * sizeof(u32));
1349                 ring->space += (4 * sizeof(u32));
1350         }
1351
1352         if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) {
1353                 return ret;
1354         }
1355         MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,
1356                      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB);
1357
1358         DRM_DEBUG("completed\n");
1359         return 0;
1360 }
1361
1362 /*@}*/
1363
1364
1365 /*******************************************************************/
1366 /** \name DMA cleanup */
1367 /*@{*/
1368
1369 int mach64_do_cleanup_dma(struct drm_device * dev)
1370 {
1371         DRM_DEBUG("\n");
1372
1373         /* Make sure interrupts are disabled here because the uninstall ioctl
1374          * may not have been called from userspace and after dev_private
1375          * is freed, it's too late.
1376          */
1377         if (dev->irq)
1378                 drm_irq_uninstall(dev);
1379
1380         if (dev->dev_private) {
1381                 drm_mach64_private_t *dev_priv = dev->dev_private;
1382
1383                 if (!dev_priv->is_pci) {
1384                         if (dev_priv->ring_map)
1385                                 drm_core_ioremapfree(dev_priv->ring_map, dev);
1386
1387                         if (dev->agp_buffer_map) {
1388                                 drm_core_ioremapfree(dev->agp_buffer_map, dev);
1389                                 dev->agp_buffer_map = NULL;
1390                         }
1391                 }
1392
1393                 mach64_destroy_freelist(dev);
1394
1395                 drm_free(dev_priv, DRM_MEM_DRIVER);
1396                 dev->dev_private = NULL;
1397         }
1398
1399         return 0;
1400 }
1401
1402 /*@}*/
1403
1404
1405 /*******************************************************************/
1406 /** \name IOCTL handlers */
1407 /*@{*/
1408
1409 int mach64_dma_init(struct drm_device *dev, void *data,
1410                     struct drm_file *file_priv)
1411 {
1412         drm_mach64_init_t *init = data;
1413
1414         DRM_DEBUG("\n");
1415
1416         LOCK_TEST_WITH_RETURN(dev, file_priv);
1417
1418         switch (init->func) {
1419         case DRM_MACH64_INIT_DMA:
1420                 return mach64_do_dma_init(dev, init);
1421         case DRM_MACH64_CLEANUP_DMA:
1422                 return mach64_do_cleanup_dma(dev);
1423         }
1424
1425         return -EINVAL;
1426 }
1427
1428 int mach64_dma_idle(struct drm_device *dev, void *data,
1429                     struct drm_file *file_priv)
1430 {
1431         drm_mach64_private_t *dev_priv = dev->dev_private;
1432
1433         DRM_DEBUG("\n");
1434
1435         LOCK_TEST_WITH_RETURN(dev, file_priv);
1436
1437         return mach64_do_dma_idle(dev_priv);
1438 }
1439
1440 int mach64_dma_flush(struct drm_device *dev, void *data,
1441                      struct drm_file *file_priv)
1442 {
1443         drm_mach64_private_t *dev_priv = dev->dev_private;
1444
1445         DRM_DEBUG("\n");
1446
1447         LOCK_TEST_WITH_RETURN(dev, file_priv);
1448
1449         return mach64_do_dma_flush(dev_priv);
1450 }
1451
1452 int mach64_engine_reset(struct drm_device *dev, void *data,
1453                         struct drm_file *file_priv)
1454 {
1455         drm_mach64_private_t *dev_priv = dev->dev_private;
1456
1457         DRM_DEBUG("\n");
1458
1459         LOCK_TEST_WITH_RETURN(dev, file_priv);
1460
1461         return mach64_do_engine_reset(dev_priv);
1462 }
1463
1464 /*@}*/
1465
1466
1467 /*******************************************************************/
1468 /** \name Freelist management */
1469 /*@{*/
1470
1471 int mach64_init_freelist(struct drm_device * dev)
1472 {
1473         struct drm_device_dma *dma = dev->dma;
1474         drm_mach64_private_t *dev_priv = dev->dev_private;
1475         drm_mach64_freelist_t *entry;
1476         struct list_head *ptr;
1477         int i;
1478
1479         DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);
1480
1481         for (i = 0; i < dma->buf_count; i++) {
1482                 if ((entry =
1483                      (drm_mach64_freelist_t *)
1484                      drm_alloc(sizeof(drm_mach64_freelist_t),
1485                                DRM_MEM_BUFLISTS)) == NULL)
1486                         return -ENOMEM;
1487                 memset(entry, 0, sizeof(drm_mach64_freelist_t));
1488                 entry->buf = dma->buflist[i];
1489                 ptr = &entry->list;
1490                 list_add_tail(ptr, &dev_priv->free_list);
1491         }
1492
1493         return 0;
1494 }
1495
1496 void mach64_destroy_freelist(struct drm_device * dev)
1497 {
1498         drm_mach64_private_t *dev_priv = dev->dev_private;
1499         drm_mach64_freelist_t *entry;
1500         struct list_head *ptr;
1501         struct list_head *tmp;
1502
1503         DRM_DEBUG("\n");
1504
1505         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1506                 list_del(ptr);
1507                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1508                 drm_free(entry, DRM_MEM_BUFLISTS);
1509         }
1510         list_for_each_safe(ptr, tmp, &dev_priv->placeholders) {
1511                 list_del(ptr);
1512                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1513                 drm_free(entry, DRM_MEM_BUFLISTS);
1514         }
1515
1516         list_for_each_safe(ptr, tmp, &dev_priv->free_list) {
1517                 list_del(ptr);
1518                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1519                 drm_free(entry, DRM_MEM_BUFLISTS);
1520         }
1521 }
1522
1523 /* IMPORTANT: This function should only be called when the engine is idle or locked up,
1524  * as it assumes all buffers in the pending list have been completed by the hardware.
1525  */
1526 int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)
1527 {
1528         struct list_head *ptr;
1529         struct list_head *tmp;
1530         drm_mach64_freelist_t *entry;
1531         int i;
1532
1533         if (list_empty(&dev_priv->pending))
1534                 return 0;
1535
1536         /* Iterate the pending list and move all buffers into the freelist... */
1537         i = 0;
1538         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1539                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1540                 if (entry->discard) {
1541                         entry->buf->pending = 0;
1542                         list_del(ptr);
1543                         list_add_tail(ptr, &dev_priv->free_list);
1544                         i++;
1545                 }
1546         }
1547
1548         DRM_DEBUG("released %d buffers from pending list\n", i);
1549
1550         return 0;
1551 }
1552
1553 static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)
1554 {
1555         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1556         struct list_head *ptr;
1557         struct list_head *tmp;
1558         drm_mach64_freelist_t *entry;
1559         u32 head, tail, ofs;
1560
1561         mach64_ring_tick(dev_priv, ring);
1562         head = ring->head;
1563         tail = ring->tail;
1564
1565         if (head == tail) {
1566 #if MACH64_EXTRA_CHECKING
1567                 if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) {
1568                         DRM_ERROR("Empty ring with non-idle engine!\n");
1569                         mach64_dump_ring_info(dev_priv);
1570                         return -1;
1571                 }
1572 #endif
1573                 /* last pass is complete, so release everything */
1574                 mach64_do_release_used_buffers(dev_priv);
1575                 DRM_DEBUG("idle engine, freed all buffers.\n");
1576                 if (list_empty(&dev_priv->free_list)) {
1577                         DRM_ERROR("Freelist empty with idle engine\n");
1578                         return -1;
1579                 }
1580                 return 0;
1581         }
1582         /* Look for a completed buffer and bail out of the loop
1583          * as soon as we find one -- don't waste time trying
1584          * to free extra bufs here, leave that to do_release_used_buffers
1585          */
1586         list_for_each_safe(ptr, tmp, &dev_priv->pending) {
1587                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1588                 ofs = entry->ring_ofs;
1589                 if (entry->discard &&
1590                     ((head < tail && (ofs < head || ofs >= tail)) ||
1591                      (head > tail && (ofs < head && ofs >= tail)))) {
1592 #if MACH64_EXTRA_CHECKING
1593                         int i;
1594
1595                         for (i = head; i != tail; i = (i + 4) & ring->tail_mask)
1596                         {
1597                                 u32 o1 = le32_to_cpu(((u32 *) ring->
1598                                                  start)[i + 1]);
1599                                 u32 o2 = GETBUFADDR(entry->buf);
1600
1601                                 if (o1 == o2) {
1602                                         DRM_ERROR
1603                                             ("Attempting to free used buffer: "
1604                                              "i=%d  buf=0x%08x\n",
1605                                              i, o1);
1606                                         mach64_dump_ring_info(dev_priv);
1607                                         return -1;
1608                                 }
1609                         }
1610 #endif
1611                         /* found a processed buffer */
1612                         entry->buf->pending = 0;
1613                         list_del(ptr);
1614                         list_add_tail(ptr, &dev_priv->free_list);
1615                         DRM_DEBUG
1616                             ("freed processed buffer (head=%d tail=%d "
1617                              "buf ring ofs=%d).\n",
1618                              head, tail, ofs);
1619                         return 0;
1620                 }
1621         }
1622
1623         return 1;
1624 }
1625
1626 struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)
1627 {
1628         drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
1629         drm_mach64_freelist_t *entry;
1630         struct list_head *ptr;
1631         int t;
1632
1633         if (list_empty(&dev_priv->free_list)) {
1634                 if (list_empty(&dev_priv->pending)) {
1635                         DRM_ERROR
1636                             ("Couldn't get buffer - pending and free lists empty\n");
1637                         t = 0;
1638                         list_for_each(ptr, &dev_priv->placeholders) {
1639                                 t++;
1640                         }
1641                         DRM_INFO("Placeholders: %d\n", t);
1642                         return NULL;
1643                 }
1644
1645                 for (t = 0; t < dev_priv->usec_timeout; t++) {
1646                         int ret;
1647
1648                         ret = mach64_do_reclaim_completed(dev_priv);
1649                         if (ret == 0)
1650                                 goto _freelist_entry_found;
1651                         if (ret < 0)
1652                                 return NULL;
1653
1654                         DRM_UDELAY(1);
1655                 }
1656                 mach64_dump_ring_info(dev_priv);
1657                 DRM_ERROR
1658                     ("timeout waiting for buffers: ring head_addr: 0x%08x head: %d tail: %d\n",
1659                      ring->head_addr, ring->head, ring->tail);
1660                 return NULL;
1661         }
1662
1663       _freelist_entry_found:
1664         ptr = dev_priv->free_list.next;
1665         list_del(ptr);
1666         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1667         entry->buf->used = 0;
1668         list_add_tail(ptr, &dev_priv->placeholders);
1669         return entry->buf;
1670 }
1671
1672 int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)
1673 {
1674         struct list_head *ptr;
1675         drm_mach64_freelist_t *entry;
1676
1677 #if MACH64_EXTRA_CHECKING
1678         list_for_each(ptr, &dev_priv->pending) {
1679                 entry = list_entry(ptr, drm_mach64_freelist_t, list);
1680                 if (copy_buf == entry->buf) {
1681                         DRM_ERROR("Trying to release a pending buf\n");
1682                         return -EFAULT;
1683                 }
1684         }
1685 #endif
1686         ptr = dev_priv->placeholders.next;
1687         entry = list_entry(ptr, drm_mach64_freelist_t, list);
1688         copy_buf->pending = 0;
1689         copy_buf->used = 0;
1690         entry->buf = copy_buf;
1691         entry->discard = 1;
1692         list_del(ptr);
1693         list_add_tail(ptr, &dev_priv->free_list);
1694
1695         return 0;
1696 }
1697
1698 /*@}*/
1699
1700
1701 /*******************************************************************/
1702 /** \name DMA buffer request and submission IOCTL handler */
1703 /*@{*/
1704
1705 static int mach64_dma_get_buffers(struct drm_device *dev,
1706                                   struct drm_file *file_priv,
1707                                   struct drm_dma * d)
1708 {
1709         int i;
1710         struct drm_buf *buf;
1711         drm_mach64_private_t *dev_priv = dev->dev_private;
1712
1713         for (i = d->granted_count; i < d->request_count; i++) {
1714                 buf = mach64_freelist_get(dev_priv);
1715 #if MACH64_EXTRA_CHECKING
1716                 if (!buf)
1717                         return -EFAULT;
1718 #else
1719                 if (!buf)
1720                         return -EAGAIN;
1721 #endif
1722
1723                 buf->file_priv = file_priv;
1724
1725                 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
1726                                      sizeof(buf->idx)))
1727                         return -EFAULT;
1728                 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
1729                                      sizeof(buf->total)))
1730                         return -EFAULT;
1731
1732                 d->granted_count++;
1733         }
1734         return 0;
1735 }
1736
1737 int mach64_dma_buffers(struct drm_device *dev, void *data,
1738                        struct drm_file *file_priv)
1739 {
1740         struct drm_device_dma *dma = dev->dma;
1741         struct drm_dma *d = data;
1742         int ret = 0;
1743
1744         LOCK_TEST_WITH_RETURN(dev, file_priv);
1745
1746         /* Please don't send us buffers.
1747          */
1748         if (d->send_count != 0) {
1749                 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1750                           DRM_CURRENTPID, d->send_count);
1751                 return -EINVAL;
1752         }
1753
1754         /* We'll send you buffers.
1755          */
1756         if (d->request_count < 0 || d->request_count > dma->buf_count) {
1757                 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1758                           DRM_CURRENTPID, d->request_count, dma->buf_count);
1759                 ret = -EINVAL;
1760         }
1761
1762         d->granted_count = 0;
1763
1764         if (d->request_count) {
1765                 ret = mach64_dma_get_buffers(dev, file_priv, d);
1766         }
1767
1768         return ret;
1769 }
1770
1771 void mach64_driver_lastclose(struct drm_device * dev)
1772 {
1773         mach64_do_cleanup_dma(dev);
1774 }
1775
1776 /*@}*/