93777f7d06444bb4178e2df6d7435d4f1a773aa6
[dragonfly.git] / sys / dev / drm / radeon / radeon_state.c
1 /* radeon_state.c -- State support for Radeon -*- linux-c -*-
2  *
3  * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Gareth Hughes <gareth@valinux.com>
27  *    Kevin E. Martin <martin@valinux.com>
28  *
29  * $FreeBSD: src/sys/dev/drm/radeon_state.c,v 1.6.2.1 2003/04/26 07:05:30 anholt Exp $
30  * $DragonFly: src/sys/dev/drm/radeon/Attic/radeon_state.c,v 1.4 2005/02/15 18:31:48 joerg Exp $
31  */
32
33 #include "radeon.h"
34 #include "dev/drm/drmP.h"
35 #include "dev/drm/drm.h"
36 #include "dev/drm/drm_sarea.h"
37 #include "radeon_drm.h"
38 #include "radeon_drv.h"
39
40
41 /* ================================================================
42  * CP hardware state programming functions
43  */
44
45 static __inline__ void radeon_emit_clip_rect( drm_radeon_private_t *dev_priv,
46                                           drm_clip_rect_t *box )
47 {
48         RING_LOCALS;
49
50         DRM_DEBUG( "   box:  x1=%d y1=%d  x2=%d y2=%d\n",
51                    box->x1, box->y1, box->x2, box->y2 );
52
53         BEGIN_RING( 4 );
54         OUT_RING( CP_PACKET0( RADEON_RE_TOP_LEFT, 0 ) );
55         OUT_RING( (box->y1 << 16) | box->x1 );
56         OUT_RING( CP_PACKET0( RADEON_RE_WIDTH_HEIGHT, 0 ) );
57         OUT_RING( ((box->y2 - 1) << 16) | (box->x2 - 1) );
58         ADVANCE_RING();
59 }
60
61 /* Emit 1.1 state
62  */
63 static void radeon_emit_state( drm_radeon_private_t *dev_priv,
64                                drm_radeon_context_regs_t *ctx,
65                                drm_radeon_texture_regs_t *tex,
66                                unsigned int dirty )
67 {
68         RING_LOCALS;
69         DRM_DEBUG( "dirty=0x%08x\n", dirty );
70
71         if ( dirty & RADEON_UPLOAD_CONTEXT ) {
72                 BEGIN_RING( 14 );
73                 OUT_RING( CP_PACKET0( RADEON_PP_MISC, 6 ) );
74                 OUT_RING( ctx->pp_misc );
75                 OUT_RING( ctx->pp_fog_color );
76                 OUT_RING( ctx->re_solid_color );
77                 OUT_RING( ctx->rb3d_blendcntl );
78                 OUT_RING( ctx->rb3d_depthoffset );
79                 OUT_RING( ctx->rb3d_depthpitch );
80                 OUT_RING( ctx->rb3d_zstencilcntl );
81                 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 2 ) );
82                 OUT_RING( ctx->pp_cntl );
83                 OUT_RING( ctx->rb3d_cntl );
84                 OUT_RING( ctx->rb3d_coloroffset );
85                 OUT_RING( CP_PACKET0( RADEON_RB3D_COLORPITCH, 0 ) );
86                 OUT_RING( ctx->rb3d_colorpitch );
87                 ADVANCE_RING();
88         }
89
90         if ( dirty & RADEON_UPLOAD_VERTFMT ) {
91                 BEGIN_RING( 2 );
92                 OUT_RING( CP_PACKET0( RADEON_SE_COORD_FMT, 0 ) );
93                 OUT_RING( ctx->se_coord_fmt );
94                 ADVANCE_RING();
95         }
96
97         if ( dirty & RADEON_UPLOAD_LINE ) {
98                 BEGIN_RING( 5 );
99                 OUT_RING( CP_PACKET0( RADEON_RE_LINE_PATTERN, 1 ) );
100                 OUT_RING( ctx->re_line_pattern );
101                 OUT_RING( ctx->re_line_state );
102                 OUT_RING( CP_PACKET0( RADEON_SE_LINE_WIDTH, 0 ) );
103                 OUT_RING( ctx->se_line_width );
104                 ADVANCE_RING();
105         }
106
107         if ( dirty & RADEON_UPLOAD_BUMPMAP ) {
108                 BEGIN_RING( 5 );
109                 OUT_RING( CP_PACKET0( RADEON_PP_LUM_MATRIX, 0 ) );
110                 OUT_RING( ctx->pp_lum_matrix );
111                 OUT_RING( CP_PACKET0( RADEON_PP_ROT_MATRIX_0, 1 ) );
112                 OUT_RING( ctx->pp_rot_matrix_0 );
113                 OUT_RING( ctx->pp_rot_matrix_1 );
114                 ADVANCE_RING();
115         }
116
117         if ( dirty & RADEON_UPLOAD_MASKS ) {
118                 BEGIN_RING( 4 );
119                 OUT_RING( CP_PACKET0( RADEON_RB3D_STENCILREFMASK, 2 ) );
120                 OUT_RING( ctx->rb3d_stencilrefmask );
121                 OUT_RING( ctx->rb3d_ropcntl );
122                 OUT_RING( ctx->rb3d_planemask );
123                 ADVANCE_RING();
124         }
125
126         if ( dirty & RADEON_UPLOAD_VIEWPORT ) {
127                 BEGIN_RING( 7 );
128                 OUT_RING( CP_PACKET0( RADEON_SE_VPORT_XSCALE, 5 ) );
129                 OUT_RING( ctx->se_vport_xscale );
130                 OUT_RING( ctx->se_vport_xoffset );
131                 OUT_RING( ctx->se_vport_yscale );
132                 OUT_RING( ctx->se_vport_yoffset );
133                 OUT_RING( ctx->se_vport_zscale );
134                 OUT_RING( ctx->se_vport_zoffset );
135                 ADVANCE_RING();
136         }
137
138         if ( dirty & RADEON_UPLOAD_SETUP ) {
139                 BEGIN_RING( 4 );
140                 OUT_RING( CP_PACKET0( RADEON_SE_CNTL, 0 ) );
141                 OUT_RING( ctx->se_cntl );
142                 OUT_RING( CP_PACKET0( RADEON_SE_CNTL_STATUS, 0 ) );
143                 OUT_RING( ctx->se_cntl_status );
144                 ADVANCE_RING();
145         }
146
147         if ( dirty & RADEON_UPLOAD_MISC ) {
148                 BEGIN_RING( 2 );
149                 OUT_RING( CP_PACKET0( RADEON_RE_MISC, 0 ) );
150                 OUT_RING( ctx->re_misc );
151                 ADVANCE_RING();
152         }
153
154         if ( dirty & RADEON_UPLOAD_TEX0 ) {
155                 BEGIN_RING( 9 );
156                 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_0, 5 ) );
157                 OUT_RING( tex[0].pp_txfilter );
158                 OUT_RING( tex[0].pp_txformat );
159                 OUT_RING( tex[0].pp_txoffset );
160                 OUT_RING( tex[0].pp_txcblend );
161                 OUT_RING( tex[0].pp_txablend );
162                 OUT_RING( tex[0].pp_tfactor );
163                 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_0, 0 ) );
164                 OUT_RING( tex[0].pp_border_color );
165                 ADVANCE_RING();
166         }
167
168         if ( dirty & RADEON_UPLOAD_TEX1 ) {
169                 BEGIN_RING( 9 );
170                 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_1, 5 ) );
171                 OUT_RING( tex[1].pp_txfilter );
172                 OUT_RING( tex[1].pp_txformat );
173                 OUT_RING( tex[1].pp_txoffset );
174                 OUT_RING( tex[1].pp_txcblend );
175                 OUT_RING( tex[1].pp_txablend );
176                 OUT_RING( tex[1].pp_tfactor );
177                 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_1, 0 ) );
178                 OUT_RING( tex[1].pp_border_color );
179                 ADVANCE_RING();
180         }
181
182         if ( dirty & RADEON_UPLOAD_TEX2 ) {
183                 BEGIN_RING( 9 );
184                 OUT_RING( CP_PACKET0( RADEON_PP_TXFILTER_2, 5 ) );
185                 OUT_RING( tex[2].pp_txfilter );
186                 OUT_RING( tex[2].pp_txformat );
187                 OUT_RING( tex[2].pp_txoffset );
188                 OUT_RING( tex[2].pp_txcblend );
189                 OUT_RING( tex[2].pp_txablend );
190                 OUT_RING( tex[2].pp_tfactor );
191                 OUT_RING( CP_PACKET0( RADEON_PP_BORDER_COLOR_2, 0 ) );
192                 OUT_RING( tex[2].pp_border_color );
193                 ADVANCE_RING();
194         }
195 }
196
197 /* Emit 1.2 state
198  */
199 static void radeon_emit_state2( drm_radeon_private_t *dev_priv,
200                                 drm_radeon_state_t *state )
201 {
202         RING_LOCALS;
203
204         if (state->dirty & RADEON_UPLOAD_ZBIAS) {
205                 BEGIN_RING( 3 );
206                 OUT_RING( CP_PACKET0( RADEON_SE_ZBIAS_FACTOR, 1 ) );
207                 OUT_RING( state->context2.se_zbias_factor ); 
208                 OUT_RING( state->context2.se_zbias_constant ); 
209                 ADVANCE_RING();
210         }
211
212         radeon_emit_state( dev_priv, &state->context, 
213                            state->tex, state->dirty );
214 }
215
216 /* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
217  * 1.3 cmdbuffers allow all previous state to be updated as well as
218  * the tcl scalar and vector areas.  
219  */
220 static struct { 
221         int start; 
222         int len; 
223         const char *name;
224 } packet[RADEON_MAX_STATE_PACKETS] = {
225         { RADEON_PP_MISC,7,"RADEON_PP_MISC" },
226         { RADEON_PP_CNTL,3,"RADEON_PP_CNTL" },
227         { RADEON_RB3D_COLORPITCH,1,"RADEON_RB3D_COLORPITCH" },
228         { RADEON_RE_LINE_PATTERN,2,"RADEON_RE_LINE_PATTERN" },
229         { RADEON_SE_LINE_WIDTH,1,"RADEON_SE_LINE_WIDTH" },
230         { RADEON_PP_LUM_MATRIX,1,"RADEON_PP_LUM_MATRIX" },
231         { RADEON_PP_ROT_MATRIX_0,2,"RADEON_PP_ROT_MATRIX_0" },
232         { RADEON_RB3D_STENCILREFMASK,3,"RADEON_RB3D_STENCILREFMASK" },
233         { RADEON_SE_VPORT_XSCALE,6,"RADEON_SE_VPORT_XSCALE" },
234         { RADEON_SE_CNTL,2,"RADEON_SE_CNTL" },
235         { RADEON_SE_CNTL_STATUS,1,"RADEON_SE_CNTL_STATUS" },
236         { RADEON_RE_MISC,1,"RADEON_RE_MISC" },
237         { RADEON_PP_TXFILTER_0,6,"RADEON_PP_TXFILTER_0" },
238         { RADEON_PP_BORDER_COLOR_0,1,"RADEON_PP_BORDER_COLOR_0" },
239         { RADEON_PP_TXFILTER_1,6,"RADEON_PP_TXFILTER_1" },
240         { RADEON_PP_BORDER_COLOR_1,1,"RADEON_PP_BORDER_COLOR_1" },
241         { RADEON_PP_TXFILTER_2,6,"RADEON_PP_TXFILTER_2" },
242         { RADEON_PP_BORDER_COLOR_2,1,"RADEON_PP_BORDER_COLOR_2" },
243         { RADEON_SE_ZBIAS_FACTOR,2,"RADEON_SE_ZBIAS_FACTOR" },
244         { RADEON_SE_TCL_OUTPUT_VTX_FMT,11,"RADEON_SE_TCL_OUTPUT_VTX_FMT" },
245         { RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED,17,"RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED" },
246         { R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0" },
247         { R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1" },
248         { R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2" },
249         { R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3" },
250         { R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4" },
251         { R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5" },
252         { R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6" },
253         { R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7" },
254         { R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0" },
255         { R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0" },
256         { R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0" },
257         { R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL" },
258         { R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0" },
259         { R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2" },
260         { R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL" },
261         { R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0" },
262         { R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1" },
263         { R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2" },
264         { R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3" },
265         { R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4" },
266         { R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5" },
267         { R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0" },
268         { R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1" },
269         { R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2" },
270         { R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3" },
271         { R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4" },
272         { R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5" },
273         { R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL" },
274         { R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL" },
275         { R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3" },
276         { R200_PP_CNTL_X, 1, "R200_PP_CNTL_X" }, 
277         { R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET" }, 
278         { R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL" }, 
279         { R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0" }, 
280         { R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1" }, 
281         { R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2" }, 
282         { R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS" }, 
283         { R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL" }, 
284         { R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE" }, 
285         { R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4, "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0" },
286         { R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0" }, /* 61 */
287         { R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0" }, /* 62 */
288         { R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1" },
289         { R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1" },
290         { R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2" },
291         { R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2" },
292         { R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3" },
293         { R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3" },
294         { R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4" },
295         { R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4" },
296         { R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5" },
297         { R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5" },
298 };
299
300
301
302 /* ================================================================
303  * Performance monitoring functions
304  */
305
306 static void radeon_clear_box( drm_radeon_private_t *dev_priv,
307                               int x, int y, int w, int h,
308                               int r, int g, int b )
309 {
310         u32 color;
311         RING_LOCALS;
312
313         x += dev_priv->sarea_priv->boxes[0].x1;
314         y += dev_priv->sarea_priv->boxes[0].y1;
315
316         switch ( dev_priv->color_fmt ) {
317         case RADEON_COLOR_FORMAT_RGB565:
318                 color = (((r & 0xf8) << 8) |
319                          ((g & 0xfc) << 3) |
320                          ((b & 0xf8) >> 3));
321                 break;
322         case RADEON_COLOR_FORMAT_ARGB8888:
323         default:
324                 color = (((0xff) << 24) | (r << 16) | (g <<  8) | b);
325                 break;
326         }
327
328         BEGIN_RING( 4 );
329         RADEON_WAIT_UNTIL_3D_IDLE();            
330         OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
331         OUT_RING( 0xffffffff );
332         ADVANCE_RING();
333
334         BEGIN_RING( 6 );
335
336         OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
337         OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
338                   RADEON_GMC_BRUSH_SOLID_COLOR |
339                   (dev_priv->color_fmt << 8) |
340                   RADEON_GMC_SRC_DATATYPE_COLOR |
341                   RADEON_ROP3_P |
342                   RADEON_GMC_CLR_CMP_CNTL_DIS );
343
344         if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) { 
345                 OUT_RING( dev_priv->front_pitch_offset );
346         } else {         
347                 OUT_RING( dev_priv->back_pitch_offset );
348         } 
349
350         OUT_RING( color );
351
352         OUT_RING( (x << 16) | y );
353         OUT_RING( (w << 16) | h );
354
355         ADVANCE_RING();
356 }
357
358 static void radeon_cp_performance_boxes( drm_radeon_private_t *dev_priv )
359 {
360         /* Collapse various things into a wait flag -- trying to
361          * guess if userspase slept -- better just to have them tell us.
362          */
363         if (dev_priv->stats.last_frame_reads > 1 ||
364             dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
365                 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
366         }
367
368         if (dev_priv->stats.freelist_loops) {
369                 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
370         }
371
372         /* Purple box for page flipping
373          */
374         if ( dev_priv->stats.boxes & RADEON_BOX_FLIP ) 
375                 radeon_clear_box( dev_priv, 4, 4, 8, 8, 255, 0, 255 );
376
377         /* Red box if we have to wait for idle at any point
378          */
379         if ( dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE ) 
380                 radeon_clear_box( dev_priv, 16, 4, 8, 8, 255, 0, 0 );
381
382         /* Blue box: lost context?
383          */
384
385         /* Yellow box for texture swaps
386          */
387         if ( dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD ) 
388                 radeon_clear_box( dev_priv, 40, 4, 8, 8, 255, 255, 0 );
389
390         /* Green box if hardware never idles (as far as we can tell)
391          */
392         if ( !(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE) ) 
393                 radeon_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
394
395
396         /* Draw bars indicating number of buffers allocated 
397          * (not a great measure, easily confused)
398          */
399         if (dev_priv->stats.requested_bufs) {
400                 if (dev_priv->stats.requested_bufs > 100)
401                         dev_priv->stats.requested_bufs = 100;
402
403                 radeon_clear_box( dev_priv, 4, 16,  
404                                   dev_priv->stats.requested_bufs, 4,
405                                   196, 128, 128 );
406         }
407
408         memset( &dev_priv->stats, 0, sizeof(dev_priv->stats) );
409
410 }
411 /* ================================================================
412  * CP command dispatch functions
413  */
414
415 static void radeon_cp_dispatch_clear( drm_device_t *dev,
416                                       drm_radeon_clear_t *clear,
417                                       drm_radeon_clear_rect_t *depth_boxes )
418 {
419         drm_radeon_private_t *dev_priv = dev->dev_private;
420         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
421         drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
422         int nbox = sarea_priv->nbox;
423         drm_clip_rect_t *pbox = sarea_priv->boxes;
424         unsigned int flags = clear->flags;
425         u32 rb3d_cntl = 0, rb3d_stencilrefmask= 0;
426         int i;
427         RING_LOCALS;
428         DRM_DEBUG( "flags = 0x%x\n", flags );
429
430         dev_priv->stats.clears++;
431
432         if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
433                 unsigned int tmp = flags;
434
435                 flags &= ~(RADEON_FRONT | RADEON_BACK);
436                 if ( tmp & RADEON_FRONT ) flags |= RADEON_BACK;
437                 if ( tmp & RADEON_BACK )  flags |= RADEON_FRONT;
438         }
439
440         if ( flags & (RADEON_FRONT | RADEON_BACK) ) {
441
442                 BEGIN_RING( 4 );
443
444                 /* Ensure the 3D stream is idle before doing a
445                  * 2D fill to clear the front or back buffer.
446                  */
447                 RADEON_WAIT_UNTIL_3D_IDLE();
448                 
449                 OUT_RING( CP_PACKET0( RADEON_DP_WRITE_MASK, 0 ) );
450                 OUT_RING( clear->color_mask );
451
452                 ADVANCE_RING();
453
454                 /* Make sure we restore the 3D state next time.
455                  */
456                 dev_priv->sarea_priv->ctx_owner = 0;
457
458                 for ( i = 0 ; i < nbox ; i++ ) {
459                         int x = pbox[i].x1;
460                         int y = pbox[i].y1;
461                         int w = pbox[i].x2 - x;
462                         int h = pbox[i].y2 - y;
463
464                         DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
465                                    x, y, w, h, flags );
466
467                         if ( flags & RADEON_FRONT ) {
468                                 BEGIN_RING( 6 );
469                                 
470                                 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
471                                 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
472                                           RADEON_GMC_BRUSH_SOLID_COLOR |
473                                           (dev_priv->color_fmt << 8) |
474                                           RADEON_GMC_SRC_DATATYPE_COLOR |
475                                           RADEON_ROP3_P |
476                                           RADEON_GMC_CLR_CMP_CNTL_DIS );
477
478                                 OUT_RING( dev_priv->front_pitch_offset );
479                                 OUT_RING( clear->clear_color );
480                                 
481                                 OUT_RING( (x << 16) | y );
482                                 OUT_RING( (w << 16) | h );
483                                 
484                                 ADVANCE_RING();
485                         }
486                         
487                         if ( flags & RADEON_BACK ) {
488                                 BEGIN_RING( 6 );
489                                 
490                                 OUT_RING( CP_PACKET3( RADEON_CNTL_PAINT_MULTI, 4 ) );
491                                 OUT_RING( RADEON_GMC_DST_PITCH_OFFSET_CNTL |
492                                           RADEON_GMC_BRUSH_SOLID_COLOR |
493                                           (dev_priv->color_fmt << 8) |
494                                           RADEON_GMC_SRC_DATATYPE_COLOR |
495                                           RADEON_ROP3_P |
496                                           RADEON_GMC_CLR_CMP_CNTL_DIS );
497                                 
498                                 OUT_RING( dev_priv->back_pitch_offset );
499                                 OUT_RING( clear->clear_color );
500
501                                 OUT_RING( (x << 16) | y );
502                                 OUT_RING( (w << 16) | h );
503
504                                 ADVANCE_RING();
505                         }
506                 }
507         }
508
509         /* We have to clear the depth and/or stencil buffers by
510          * rendering a quad into just those buffers.  Thus, we have to
511          * make sure the 3D engine is configured correctly.
512          */
513         if ( dev_priv->is_r200 &&
514              (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
515
516                 int tempPP_CNTL;
517                 int tempRE_CNTL;
518                 int tempRB3D_CNTL;
519                 int tempRB3D_ZSTENCILCNTL;
520                 int tempRB3D_STENCILREFMASK;
521                 int tempRB3D_PLANEMASK;
522                 int tempSE_CNTL;
523                 int tempSE_VTE_CNTL;
524                 int tempSE_VTX_FMT_0;
525                 int tempSE_VTX_FMT_1;
526                 int tempSE_VAP_CNTL;
527                 int tempRE_AUX_SCISSOR_CNTL;
528
529                 tempPP_CNTL = 0;
530                 tempRE_CNTL = 0;
531
532                 tempRB3D_CNTL = depth_clear->rb3d_cntl;
533                 tempRB3D_CNTL &= ~(1<<15); /* unset radeon magic flag */
534
535                 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
536                 tempRB3D_STENCILREFMASK = 0x0;
537
538                 tempSE_CNTL = depth_clear->se_cntl;
539
540
541
542                 /* Disable TCL */
543
544                 tempSE_VAP_CNTL = (/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
545                                    (0x9 << SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
546
547                 tempRB3D_PLANEMASK = 0x0;
548
549                 tempRE_AUX_SCISSOR_CNTL = 0x0;
550
551                 tempSE_VTE_CNTL =
552                         SE_VTE_CNTL__VTX_XY_FMT_MASK |
553                         SE_VTE_CNTL__VTX_Z_FMT_MASK;
554
555                 /* Vertex format (X, Y, Z, W)*/
556                 tempSE_VTX_FMT_0 =
557                         SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
558                         SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
559                 tempSE_VTX_FMT_1 = 0x0;
560
561
562                 /* 
563                  * Depth buffer specific enables 
564                  */
565                 if (flags & RADEON_DEPTH) {
566                         /* Enable depth buffer */
567                         tempRB3D_CNTL |= RADEON_Z_ENABLE;
568                 } else {
569                         /* Disable depth buffer */
570                         tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
571                 }
572
573                 /* 
574                  * Stencil buffer specific enables
575                  */
576                 if ( flags & RADEON_STENCIL ) {
577                         tempRB3D_CNTL |=  RADEON_STENCIL_ENABLE;
578                         tempRB3D_STENCILREFMASK = clear->depth_mask; 
579                 } else {
580                         tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
581                         tempRB3D_STENCILREFMASK = 0x00000000;
582                 }
583
584                 BEGIN_RING( 26 );
585                 RADEON_WAIT_UNTIL_2D_IDLE();
586
587                 OUT_RING_REG( RADEON_PP_CNTL, tempPP_CNTL );
588                 OUT_RING_REG( R200_RE_CNTL, tempRE_CNTL );
589                 OUT_RING_REG( RADEON_RB3D_CNTL, tempRB3D_CNTL );
590                 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
591                               tempRB3D_ZSTENCILCNTL );
592                 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK, 
593                               tempRB3D_STENCILREFMASK );
594                 OUT_RING_REG( RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK );
595                 OUT_RING_REG( RADEON_SE_CNTL, tempSE_CNTL );
596                 OUT_RING_REG( R200_SE_VTE_CNTL, tempSE_VTE_CNTL );
597                 OUT_RING_REG( R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0 );
598                 OUT_RING_REG( R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1 );
599                 OUT_RING_REG( R200_SE_VAP_CNTL, tempSE_VAP_CNTL );
600                 OUT_RING_REG( R200_RE_AUX_SCISSOR_CNTL, 
601                               tempRE_AUX_SCISSOR_CNTL );
602                 ADVANCE_RING();
603
604                 /* Make sure we restore the 3D state next time.
605                  */
606                 dev_priv->sarea_priv->ctx_owner = 0;
607
608                 for ( i = 0 ; i < nbox ; i++ ) {
609                         
610                         /* Funny that this should be required -- 
611                          *  sets top-left?
612                          */
613                         radeon_emit_clip_rect( dev_priv,
614                                                &sarea_priv->boxes[i] );
615
616                         BEGIN_RING( 14 );
617                         OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 12 ) );
618                         OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
619                                    RADEON_PRIM_WALK_RING |
620                                    (3 << RADEON_NUM_VERTICES_SHIFT)) );
621                         OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
622                         OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
623                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
624                         OUT_RING( 0x3f800000 );
625                         OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
626                         OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
627                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
628                         OUT_RING( 0x3f800000 );
629                         OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
630                         OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
631                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
632                         OUT_RING( 0x3f800000 );
633                         ADVANCE_RING();
634                 }
635         } 
636         else if ( (flags & (RADEON_DEPTH | RADEON_STENCIL)) ) {
637
638                 rb3d_cntl = depth_clear->rb3d_cntl;
639
640                 if ( flags & RADEON_DEPTH ) {
641                         rb3d_cntl |=  RADEON_Z_ENABLE;
642                 } else {
643                         rb3d_cntl &= ~RADEON_Z_ENABLE;
644                 }
645
646                 if ( flags & RADEON_STENCIL ) {
647                         rb3d_cntl |=  RADEON_STENCIL_ENABLE;
648                         rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
649                 } else {
650                         rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
651                         rb3d_stencilrefmask = 0x00000000;
652                 }
653
654                 BEGIN_RING( 13 );
655                 RADEON_WAIT_UNTIL_2D_IDLE();
656
657                 OUT_RING( CP_PACKET0( RADEON_PP_CNTL, 1 ) );
658                 OUT_RING( 0x00000000 );
659                 OUT_RING( rb3d_cntl );
660                 
661                 OUT_RING_REG( RADEON_RB3D_ZSTENCILCNTL,
662                               depth_clear->rb3d_zstencilcntl );
663                 OUT_RING_REG( RADEON_RB3D_STENCILREFMASK,
664                               rb3d_stencilrefmask );
665                 OUT_RING_REG( RADEON_RB3D_PLANEMASK,
666                               0x00000000 );
667                 OUT_RING_REG( RADEON_SE_CNTL,
668                               depth_clear->se_cntl );
669                 ADVANCE_RING();
670
671                 /* Make sure we restore the 3D state next time.
672                  */
673                 dev_priv->sarea_priv->ctx_owner = 0;
674
675                 for ( i = 0 ; i < nbox ; i++ ) {
676                         
677                         /* Funny that this should be required -- 
678                          *  sets top-left?
679                          */
680                         radeon_emit_clip_rect( dev_priv,
681                                                &sarea_priv->boxes[i] );
682
683                         BEGIN_RING( 15 );
684
685                         OUT_RING( CP_PACKET3( RADEON_3D_DRAW_IMMD, 13 ) );
686                         OUT_RING( RADEON_VTX_Z_PRESENT |
687                                   RADEON_VTX_PKCOLOR_PRESENT);
688                         OUT_RING( (RADEON_PRIM_TYPE_RECT_LIST |
689                                    RADEON_PRIM_WALK_RING |
690                                    RADEON_MAOS_ENABLE |
691                                    RADEON_VTX_FMT_RADEON_MODE |
692                                    (3 << RADEON_NUM_VERTICES_SHIFT)) );
693
694
695                         OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
696                         OUT_RING( depth_boxes[i].ui[CLEAR_Y1] );
697                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
698                         OUT_RING( 0x0 );
699
700                         OUT_RING( depth_boxes[i].ui[CLEAR_X1] );
701                         OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
702                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
703                         OUT_RING( 0x0 );
704
705                         OUT_RING( depth_boxes[i].ui[CLEAR_X2] );
706                         OUT_RING( depth_boxes[i].ui[CLEAR_Y2] );
707                         OUT_RING( depth_boxes[i].ui[CLEAR_DEPTH] );
708                         OUT_RING( 0x0 );
709
710                         ADVANCE_RING();
711                 }
712         }
713
714         /* Increment the clear counter.  The client-side 3D driver must
715          * wait on this value before performing the clear ioctl.  We
716          * need this because the card's so damned fast...
717          */
718         dev_priv->sarea_priv->last_clear++;
719
720         BEGIN_RING( 4 );
721
722         RADEON_CLEAR_AGE( dev_priv->sarea_priv->last_clear );
723         RADEON_WAIT_UNTIL_IDLE();
724
725         ADVANCE_RING();
726 }
727
728 static void radeon_cp_dispatch_swap( drm_device_t *dev )
729 {
730         drm_radeon_private_t *dev_priv = dev->dev_private;
731         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
732         int nbox = sarea_priv->nbox;
733         drm_clip_rect_t *pbox = sarea_priv->boxes;
734         int i;
735         RING_LOCALS;
736         DRM_DEBUG( "\n" );
737
738         /* Do some trivial performance monitoring...
739          */
740         if (dev_priv->do_boxes)
741                 radeon_cp_performance_boxes( dev_priv );
742
743
744         /* Wait for the 3D stream to idle before dispatching the bitblt.
745          * This will prevent data corruption between the two streams.
746          */
747         BEGIN_RING( 2 );
748
749         RADEON_WAIT_UNTIL_3D_IDLE();
750
751         ADVANCE_RING();
752
753         for ( i = 0 ; i < nbox ; i++ ) {
754                 int x = pbox[i].x1;
755                 int y = pbox[i].y1;
756                 int w = pbox[i].x2 - x;
757                 int h = pbox[i].y2 - y;
758
759                 DRM_DEBUG( "dispatch swap %d,%d-%d,%d\n",
760                            x, y, w, h );
761
762                 BEGIN_RING( 7 );
763
764                 OUT_RING( CP_PACKET3( RADEON_CNTL_BITBLT_MULTI, 5 ) );
765                 OUT_RING( RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
766                           RADEON_GMC_DST_PITCH_OFFSET_CNTL |
767                           RADEON_GMC_BRUSH_NONE |
768                           (dev_priv->color_fmt << 8) |
769                           RADEON_GMC_SRC_DATATYPE_COLOR |
770                           RADEON_ROP3_S |
771                           RADEON_DP_SRC_SOURCE_MEMORY |
772                           RADEON_GMC_CLR_CMP_CNTL_DIS |
773                           RADEON_GMC_WR_MSK_DIS );
774                 
775                 /* Make this work even if front & back are flipped:
776                  */
777                 if (dev_priv->current_page == 0) {
778                         OUT_RING( dev_priv->back_pitch_offset );
779                         OUT_RING( dev_priv->front_pitch_offset );
780                 } 
781                 else {
782                         OUT_RING( dev_priv->front_pitch_offset );
783                         OUT_RING( dev_priv->back_pitch_offset );
784                 }
785
786                 OUT_RING( (x << 16) | y );
787                 OUT_RING( (x << 16) | y );
788                 OUT_RING( (w << 16) | h );
789
790                 ADVANCE_RING();
791         }
792
793         /* Increment the frame counter.  The client-side 3D driver must
794          * throttle the framerate by waiting for this value before
795          * performing the swapbuffer ioctl.
796          */
797         dev_priv->sarea_priv->last_frame++;
798
799         BEGIN_RING( 4 );
800
801         RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
802         RADEON_WAIT_UNTIL_2D_IDLE();
803
804         ADVANCE_RING();
805 }
806
807 static void radeon_cp_dispatch_flip( drm_device_t *dev )
808 {
809         drm_radeon_private_t *dev_priv = dev->dev_private;
810         drm_sarea_t *sarea = (drm_sarea_t *)dev_priv->sarea->handle;
811         int offset = (dev_priv->current_page == 1)
812                    ? dev_priv->front_offset : dev_priv->back_offset;
813         RING_LOCALS;
814         DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n", 
815                 __FUNCTION__, 
816                 dev_priv->current_page,
817                 dev_priv->sarea_priv->pfCurrentPage);
818
819         /* Do some trivial performance monitoring...
820          */
821         if (dev_priv->do_boxes) {
822                 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
823                 radeon_cp_performance_boxes( dev_priv );
824         }
825
826         /* Update the frame offsets for both CRTCs
827          */
828         BEGIN_RING( 6 );
829
830         RADEON_WAIT_UNTIL_3D_IDLE();
831         OUT_RING_REG( RADEON_CRTC_OFFSET, ( ( sarea->frame.y * dev_priv->front_pitch
832                                               + sarea->frame.x 
833                                               * ( dev_priv->color_fmt - 2 ) ) & ~7 )
834                                           + offset );
835         OUT_RING_REG( RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
836                                            + offset );
837
838         ADVANCE_RING();
839
840         /* Increment the frame counter.  The client-side 3D driver must
841          * throttle the framerate by waiting for this value before
842          * performing the swapbuffer ioctl.
843          */
844         dev_priv->sarea_priv->last_frame++;
845         dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
846                                               1 - dev_priv->current_page;
847
848         BEGIN_RING( 2 );
849
850         RADEON_FRAME_AGE( dev_priv->sarea_priv->last_frame );
851
852         ADVANCE_RING();
853 }
854
855 static int bad_prim_vertex_nr( int primitive, int nr )
856 {
857         switch (primitive & RADEON_PRIM_TYPE_MASK) {
858         case RADEON_PRIM_TYPE_NONE:
859         case RADEON_PRIM_TYPE_POINT:
860                 return nr < 1;
861         case RADEON_PRIM_TYPE_LINE:
862                 return (nr & 1) || nr == 0;
863         case RADEON_PRIM_TYPE_LINE_STRIP:
864                 return nr < 2;
865         case RADEON_PRIM_TYPE_TRI_LIST:
866         case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
867         case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
868         case RADEON_PRIM_TYPE_RECT_LIST:
869                 return nr % 3 || nr == 0;
870         case RADEON_PRIM_TYPE_TRI_FAN:
871         case RADEON_PRIM_TYPE_TRI_STRIP:
872                 return nr < 3;
873         default:
874                 return 1;
875         }       
876 }
877
878
879
880 typedef struct {
881         unsigned int start;
882         unsigned int finish;
883         unsigned int prim;
884         unsigned int numverts;
885         unsigned int offset;   
886         unsigned int vc_format;
887 } drm_radeon_tcl_prim_t;
888
889 static void radeon_cp_dispatch_vertex( drm_device_t *dev,
890                                        drm_buf_t *buf,
891                                        drm_radeon_tcl_prim_t *prim,
892                                        drm_clip_rect_t *boxes,
893                                        int nbox )
894
895 {
896         drm_radeon_private_t *dev_priv = dev->dev_private;
897         drm_clip_rect_t box;
898         int offset = dev_priv->agp_buffers_offset + buf->offset + prim->start;
899         int numverts = (int)prim->numverts;
900         int i = 0;
901         RING_LOCALS;
902
903         DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
904                   prim->prim,
905                   prim->vc_format,
906                   prim->start,
907                   prim->finish,
908                   prim->numverts);
909
910         if (bad_prim_vertex_nr( prim->prim, prim->numverts )) {
911                 DRM_ERROR( "bad prim %x numverts %d\n", 
912                            prim->prim, prim->numverts );
913                 return;
914         }
915
916         do {
917                 /* Emit the next cliprect */
918                 if ( i < nbox ) {
919                         if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
920                                 return;
921
922                         radeon_emit_clip_rect( dev_priv, &box );
923                 }
924
925                 /* Emit the vertex buffer rendering commands */
926                 BEGIN_RING( 5 );
927
928                 OUT_RING( CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, 3 ) );
929                 OUT_RING( offset );
930                 OUT_RING( numverts );
931                 OUT_RING( prim->vc_format );
932                 OUT_RING( prim->prim | RADEON_PRIM_WALK_LIST |
933                           RADEON_COLOR_ORDER_RGBA |
934                           RADEON_VTX_FMT_RADEON_MODE |
935                           (numverts << RADEON_NUM_VERTICES_SHIFT) );
936
937                 ADVANCE_RING();
938
939                 i++;
940         } while ( i < nbox );
941 }
942
943
944
945 static void radeon_cp_discard_buffer( drm_device_t *dev, drm_buf_t *buf )
946 {
947         drm_radeon_private_t *dev_priv = dev->dev_private;
948         drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
949         RING_LOCALS;
950
951         buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
952
953         /* Emit the vertex buffer age */
954         BEGIN_RING( 2 );
955         RADEON_DISPATCH_AGE( buf_priv->age );
956         ADVANCE_RING();
957
958         buf->pending = 1;
959         buf->used = 0;
960 }
961
962 static void radeon_cp_dispatch_indirect( drm_device_t *dev,
963                                          drm_buf_t *buf,
964                                          int start, int end )
965 {
966         drm_radeon_private_t *dev_priv = dev->dev_private;
967         RING_LOCALS;
968         DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
969                    buf->idx, start, end );
970
971         if ( start != end ) {
972                 int offset = (dev_priv->agp_buffers_offset
973                               + buf->offset + start);
974                 int dwords = (end - start + 3) / sizeof(u32);
975
976                 /* Indirect buffer data must be an even number of
977                  * dwords, so if we've been given an odd number we must
978                  * pad the data with a Type-2 CP packet.
979                  */
980                 if ( dwords & 1 ) {
981                         u32 *data = (u32 *)
982                                 ((char *)dev_priv->buffers->handle
983                                  + buf->offset + start);
984                         data[dwords++] = RADEON_CP_PACKET2;
985                 }
986
987                 /* Fire off the indirect buffer */
988                 BEGIN_RING( 3 );
989
990                 OUT_RING( CP_PACKET0( RADEON_CP_IB_BASE, 1 ) );
991                 OUT_RING( offset );
992                 OUT_RING( dwords );
993
994                 ADVANCE_RING();
995         }
996 }
997
998
999 static void radeon_cp_dispatch_indices( drm_device_t *dev,
1000                                         drm_buf_t *elt_buf,
1001                                         drm_radeon_tcl_prim_t *prim, 
1002                                         drm_clip_rect_t *boxes,
1003                                         int nbox )
1004 {
1005         drm_radeon_private_t *dev_priv = dev->dev_private;
1006         drm_clip_rect_t box;
1007         int offset = dev_priv->agp_buffers_offset + prim->offset;
1008         u32 *data;
1009         int dwords;
1010         int i = 0;
1011         int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1012         int count = (prim->finish - start) / sizeof(u16);
1013
1014         DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1015                   prim->prim,
1016                   prim->vc_format,
1017                   prim->start,
1018                   prim->finish,
1019                   prim->offset,
1020                   prim->numverts);
1021
1022         if (bad_prim_vertex_nr( prim->prim, count )) {
1023                 DRM_ERROR( "bad prim %x count %d\n", 
1024                            prim->prim, count );
1025                 return;
1026         }
1027
1028
1029         if ( start >= prim->finish ||
1030              (prim->start & 0x7) ) {
1031                 DRM_ERROR( "buffer prim %d\n", prim->prim );
1032                 return;
1033         }
1034
1035         dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1036
1037         data = (u32 *)((char *)dev_priv->buffers->handle +
1038                        elt_buf->offset + prim->start);
1039
1040         data[0] = CP_PACKET3( RADEON_3D_RNDR_GEN_INDX_PRIM, dwords-2 );
1041         data[1] = offset;
1042         data[2] = prim->numverts;
1043         data[3] = prim->vc_format;
1044         data[4] = (prim->prim |
1045                    RADEON_PRIM_WALK_IND |
1046                    RADEON_COLOR_ORDER_RGBA |
1047                    RADEON_VTX_FMT_RADEON_MODE |
1048                    (count << RADEON_NUM_VERTICES_SHIFT) );
1049
1050         do {
1051                 if ( i < nbox ) {
1052                         if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
1053                                 return;
1054                         
1055                         radeon_emit_clip_rect( dev_priv, &box );
1056                 }
1057
1058                 radeon_cp_dispatch_indirect( dev, elt_buf,
1059                                              prim->start,
1060                                              prim->finish );
1061
1062                 i++;
1063         } while ( i < nbox );
1064
1065 }
1066
1067 #define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
1068
1069 static int radeon_cp_dispatch_texture( DRMFILE filp,
1070                                        drm_device_t *dev,
1071                                        drm_radeon_texture_t *tex,
1072                                        drm_radeon_tex_image_t *image )
1073 {
1074         drm_radeon_private_t *dev_priv = dev->dev_private;
1075         drm_buf_t *buf;
1076         u32 format;
1077         u32 *buffer;
1078         const u8 *data;
1079         int size, dwords, tex_width, blit_width;
1080         u32 height;
1081         int i;
1082         RING_LOCALS;
1083
1084         dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1085
1086         /* Flush the pixel cache.  This ensures no pixel data gets mixed
1087          * up with the texture data from the host data blit, otherwise
1088          * part of the texture image may be corrupted.
1089          */
1090         BEGIN_RING( 4 );
1091         RADEON_FLUSH_CACHE();
1092         RADEON_WAIT_UNTIL_IDLE();
1093         ADVANCE_RING();
1094
1095 #ifdef __BIG_ENDIAN
1096         /* The Mesa texture functions provide the data in little endian as the
1097          * chip wants it, but we need to compensate for the fact that the CP
1098          * ring gets byte-swapped
1099          */
1100         BEGIN_RING( 2 );
1101         OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
1102         ADVANCE_RING();
1103 #endif
1104
1105
1106         /* The compiler won't optimize away a division by a variable,
1107          * even if the only legal values are powers of two.  Thus, we'll
1108          * use a shift instead.
1109          */
1110         switch ( tex->format ) {
1111         case RADEON_TXFORMAT_ARGB8888:
1112         case RADEON_TXFORMAT_RGBA8888:
1113                 format = RADEON_COLOR_FORMAT_ARGB8888;
1114                 tex_width = tex->width * 4;
1115                 blit_width = image->width * 4;
1116                 break;
1117         case RADEON_TXFORMAT_AI88:
1118         case RADEON_TXFORMAT_ARGB1555:
1119         case RADEON_TXFORMAT_RGB565:
1120         case RADEON_TXFORMAT_ARGB4444:
1121         case RADEON_TXFORMAT_VYUY422:
1122         case RADEON_TXFORMAT_YVYU422:
1123                 format = RADEON_COLOR_FORMAT_RGB565;
1124                 tex_width = tex->width * 2;
1125                 blit_width = image->width * 2;
1126                 break;
1127         case RADEON_TXFORMAT_I8:
1128         case RADEON_TXFORMAT_RGB332:
1129                 format = RADEON_COLOR_FORMAT_CI8;
1130                 tex_width = tex->width * 1;
1131                 blit_width = image->width * 1;
1132                 break;
1133         default:
1134                 DRM_ERROR( "invalid texture format %d\n", tex->format );
1135                 return DRM_ERR(EINVAL);
1136         }
1137
1138         DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width );
1139
1140         do {
1141                 DRM_DEBUG( "tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1142                            tex->offset >> 10, tex->pitch, tex->format,
1143                            image->x, image->y, image->width, image->height );
1144
1145                 /* Make a copy of some parameters in case we have to
1146                  * update them for a multi-pass texture blit.
1147                  */
1148                 height = image->height;
1149                 data = (const u8 *)image->data;
1150                 
1151                 size = height * blit_width;
1152
1153                 if ( size > RADEON_MAX_TEXTURE_SIZE ) {
1154                         height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1155                         size = height * blit_width;
1156                 } else if ( size < 4 && size > 0 ) {
1157                         size = 4;
1158                 } else if ( size == 0 ) {
1159                         return 0;
1160                 }
1161
1162                 buf = radeon_freelist_get( dev );
1163                 if ( 0 && !buf ) {
1164                         radeon_do_cp_idle( dev_priv );
1165                         buf = radeon_freelist_get( dev );
1166                 }
1167                 if ( !buf ) {
1168                         DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1169                         DRM_COPY_TO_USER( tex->image, image, sizeof(*image) );
1170                         return DRM_ERR(EAGAIN);
1171                 }
1172
1173
1174                 /* Dispatch the indirect buffer.
1175                  */
1176                 buffer = (u32*)((char*)dev_priv->buffers->handle + buf->offset);
1177                 dwords = size / 4;
1178                 buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
1179                 buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1180                              RADEON_GMC_BRUSH_NONE |
1181                              (format << 8) |
1182                              RADEON_GMC_SRC_DATATYPE_COLOR |
1183                              RADEON_ROP3_S |
1184                              RADEON_DP_SRC_SOURCE_HOST_DATA |
1185                              RADEON_GMC_CLR_CMP_CNTL_DIS |
1186                              RADEON_GMC_WR_MSK_DIS);
1187                 
1188                 buffer[2] = (tex->pitch << 22) | (tex->offset >> 10);
1189                 buffer[3] = 0xffffffff;
1190                 buffer[4] = 0xffffffff;
1191                 buffer[5] = (image->y << 16) | image->x;
1192                 buffer[6] = (height << 16) | image->width;
1193                 buffer[7] = dwords;
1194                 buffer += 8;
1195
1196                 if ( tex_width >= 32 ) {
1197                         /* Texture image width is larger than the minimum, so we
1198                          * can upload it directly.
1199                          */
1200                         if ( DRM_COPY_FROM_USER( buffer, data, 
1201                                                  dwords * sizeof(u32) ) ) {
1202                                 DRM_ERROR( "EFAULT on data, %d dwords\n", 
1203                                            dwords );
1204                                 return DRM_ERR(EFAULT);
1205                         }
1206                 } else {
1207                         /* Texture image width is less than the minimum, so we
1208                          * need to pad out each image scanline to the minimum
1209                          * width.
1210                          */
1211                         for ( i = 0 ; i < tex->height ; i++ ) {
1212                                 if ( DRM_COPY_FROM_USER( buffer, data, 
1213                                                          tex_width ) ) {
1214                                         DRM_ERROR( "EFAULT on pad, %d bytes\n",
1215                                                    tex_width );
1216                                         return DRM_ERR(EFAULT);
1217                                 }
1218                                 buffer += 8;
1219                                 data += tex_width;
1220                         }
1221                 }
1222
1223                 buf->filp = filp;
1224                 buf->used = (dwords + 8) * sizeof(u32);
1225                 radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
1226                 radeon_cp_discard_buffer( dev, buf );
1227
1228                 /* Update the input parameters for next time */
1229                 image->y += height;
1230                 image->height -= height;
1231                 image->data = (const u8 *) image->data+ size;
1232         } while (image->height > 0);
1233
1234         /* Flush the pixel cache after the blit completes.  This ensures
1235          * the texture data is written out to memory before rendering
1236          * continues.
1237          */
1238         BEGIN_RING( 4 );
1239         RADEON_FLUSH_CACHE();
1240         RADEON_WAIT_UNTIL_2D_IDLE();
1241         ADVANCE_RING();
1242         return 0;
1243 }
1244
1245
1246 static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1247 {
1248         drm_radeon_private_t *dev_priv = dev->dev_private;
1249         int i;
1250         RING_LOCALS;
1251         DRM_DEBUG( "\n" );
1252
1253         BEGIN_RING( 35 );
1254
1255         OUT_RING( CP_PACKET0( RADEON_RE_STIPPLE_ADDR, 0 ) );
1256         OUT_RING( 0x00000000 );
1257
1258         OUT_RING( CP_PACKET0_TABLE( RADEON_RE_STIPPLE_DATA, 31 ) );
1259         for ( i = 0 ; i < 32 ; i++ ) {
1260                 OUT_RING( stipple[i] );
1261         }
1262
1263         ADVANCE_RING();
1264 }
1265
1266
1267 /* ================================================================
1268  * IOCTL functions
1269  */
1270
1271 int radeon_cp_clear( DRM_IOCTL_ARGS )
1272 {
1273         DRM_DEVICE;
1274         drm_radeon_private_t *dev_priv = dev->dev_private;
1275         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1276         drm_radeon_clear_t clear;
1277         drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1278         DRM_DEBUG( "\n" );
1279
1280         LOCK_TEST_WITH_RETURN( dev, filp );
1281
1282         DRM_COPY_FROM_USER_IOCTL( clear, (drm_radeon_clear_t *)data,
1283                              sizeof(clear) );
1284
1285         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1286
1287         if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1288                 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1289
1290         if ( DRM_COPY_FROM_USER( &depth_boxes, clear.depth_boxes,
1291                              sarea_priv->nbox * sizeof(depth_boxes[0]) ) )
1292                 return DRM_ERR(EFAULT);
1293
1294         radeon_cp_dispatch_clear( dev, &clear, depth_boxes );
1295
1296         COMMIT_RING();
1297         return 0;
1298 }
1299
1300
1301 /* Not sure why this isn't set all the time:
1302  */ 
1303 static int radeon_do_init_pageflip( drm_device_t *dev )
1304 {
1305         drm_radeon_private_t *dev_priv = dev->dev_private;
1306         RING_LOCALS;
1307
1308         DRM_DEBUG( "\n" );
1309
1310         BEGIN_RING( 6 );
1311         RADEON_WAIT_UNTIL_3D_IDLE();
1312         OUT_RING( CP_PACKET0( RADEON_CRTC_OFFSET_CNTL, 0 ) );
1313         OUT_RING( RADEON_READ( RADEON_CRTC_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1314         OUT_RING( CP_PACKET0( RADEON_CRTC2_OFFSET_CNTL, 0 ) );
1315         OUT_RING( RADEON_READ( RADEON_CRTC2_OFFSET_CNTL ) | RADEON_CRTC_OFFSET_FLIP_CNTL );
1316         ADVANCE_RING();
1317
1318         dev_priv->page_flipping = 1;
1319         dev_priv->current_page = 0;
1320         dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1321
1322         return 0;
1323 }
1324
1325 /* Called whenever a client dies, from DRM(release).
1326  * NOTE:  Lock isn't necessarily held when this is called!
1327  */
1328 int radeon_do_cleanup_pageflip( drm_device_t *dev )
1329 {
1330         drm_radeon_private_t *dev_priv = dev->dev_private;
1331         DRM_DEBUG( "\n" );
1332
1333         if (dev_priv->current_page != 0)
1334                 radeon_cp_dispatch_flip( dev );
1335
1336         dev_priv->page_flipping = 0;
1337         return 0;
1338 }
1339
1340 /* Swapping and flipping are different operations, need different ioctls.
1341  * They can & should be intermixed to support multiple 3d windows.  
1342  */
1343 int radeon_cp_flip( DRM_IOCTL_ARGS )
1344 {
1345         DRM_DEVICE;
1346         drm_radeon_private_t *dev_priv = dev->dev_private;
1347         DRM_DEBUG( "\n" );
1348
1349         LOCK_TEST_WITH_RETURN( dev, filp );
1350
1351         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1352
1353         if (!dev_priv->page_flipping) 
1354                 radeon_do_init_pageflip( dev );
1355                 
1356         radeon_cp_dispatch_flip( dev );
1357
1358         COMMIT_RING();
1359         return 0;
1360 }
1361
1362 int radeon_cp_swap( DRM_IOCTL_ARGS )
1363 {
1364         DRM_DEVICE;
1365         drm_radeon_private_t *dev_priv = dev->dev_private;
1366         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1367         DRM_DEBUG( "\n" );
1368
1369         LOCK_TEST_WITH_RETURN( dev, filp );
1370
1371         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1372
1373         if ( sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS )
1374                 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1375
1376         radeon_cp_dispatch_swap( dev );
1377         dev_priv->sarea_priv->ctx_owner = 0;
1378
1379         COMMIT_RING();
1380         return 0;
1381 }
1382
1383 int radeon_cp_vertex( DRM_IOCTL_ARGS )
1384 {
1385         DRM_DEVICE;
1386         drm_radeon_private_t *dev_priv = dev->dev_private;
1387         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1388         drm_device_dma_t *dma = dev->dma;
1389         drm_buf_t *buf;
1390         drm_radeon_vertex_t vertex;
1391         drm_radeon_tcl_prim_t prim;
1392
1393         LOCK_TEST_WITH_RETURN( dev, filp );
1394
1395         if ( !dev_priv ) {
1396                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1397                 return DRM_ERR(EINVAL);
1398         }
1399
1400         DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex_t *)data,
1401                              sizeof(vertex) );
1402
1403         DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1404                    DRM_CURRENTPID,
1405                    vertex.idx, vertex.count, vertex.discard );
1406
1407         if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1408                 DRM_ERROR( "buffer index %d (of %d max)\n",
1409                            vertex.idx, dma->buf_count - 1 );
1410                 return DRM_ERR(EINVAL);
1411         }
1412         if ( vertex.prim < 0 ||
1413              vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1414                 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1415                 return DRM_ERR(EINVAL);
1416         }
1417
1418         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1419         VB_AGE_TEST_WITH_RETURN( dev_priv );
1420
1421         buf = dma->buflist[vertex.idx];
1422
1423         if ( buf->filp != filp ) {
1424                 DRM_ERROR( "process %d using buffer owned by %p\n",
1425                            DRM_CURRENTPID, buf->filp );
1426                 return DRM_ERR(EINVAL);
1427         }
1428         if ( buf->pending ) {
1429                 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1430                 return DRM_ERR(EINVAL);
1431         }
1432
1433         /* Build up a prim_t record:
1434          */
1435         if (vertex.count) {
1436                 buf->used = vertex.count; /* not used? */
1437
1438                 if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1439                         radeon_emit_state( dev_priv,
1440                                            &sarea_priv->context_state,
1441                                            sarea_priv->tex_state,
1442                                            sarea_priv->dirty );
1443                         
1444                         sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1445                                                RADEON_UPLOAD_TEX1IMAGES |
1446                                                RADEON_UPLOAD_TEX2IMAGES |
1447                                                RADEON_REQUIRE_QUIESCENCE);
1448                 }
1449
1450                 prim.start = 0;
1451                 prim.finish = vertex.count; /* unused */
1452                 prim.prim = vertex.prim;
1453                 prim.numverts = vertex.count;
1454                 prim.vc_format = dev_priv->sarea_priv->vc_format;
1455                 
1456                 radeon_cp_dispatch_vertex( dev, buf, &prim,
1457                                            dev_priv->sarea_priv->boxes,
1458                                            dev_priv->sarea_priv->nbox );
1459         }
1460
1461         if (vertex.discard) {
1462                 radeon_cp_discard_buffer( dev, buf );
1463         }
1464
1465         COMMIT_RING();
1466         return 0;
1467 }
1468
1469 int radeon_cp_indices( DRM_IOCTL_ARGS )
1470 {
1471         DRM_DEVICE;
1472         drm_radeon_private_t *dev_priv = dev->dev_private;
1473         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1474         drm_device_dma_t *dma = dev->dma;
1475         drm_buf_t *buf;
1476         drm_radeon_indices_t elts;
1477         drm_radeon_tcl_prim_t prim;
1478         int count;
1479
1480         LOCK_TEST_WITH_RETURN( dev, filp );
1481
1482         if ( !dev_priv ) {
1483                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1484                 return DRM_ERR(EINVAL);
1485         }
1486
1487         DRM_COPY_FROM_USER_IOCTL( elts, (drm_radeon_indices_t *)data,
1488                              sizeof(elts) );
1489
1490         DRM_DEBUG( "pid=%d index=%d start=%d end=%d discard=%d\n",
1491                    DRM_CURRENTPID,
1492                    elts.idx, elts.start, elts.end, elts.discard );
1493
1494         if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1495                 DRM_ERROR( "buffer index %d (of %d max)\n",
1496                            elts.idx, dma->buf_count - 1 );
1497                 return DRM_ERR(EINVAL);
1498         }
1499         if ( elts.prim < 0 ||
1500              elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST ) {
1501                 DRM_ERROR( "buffer prim %d\n", elts.prim );
1502                 return DRM_ERR(EINVAL);
1503         }
1504
1505         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1506         VB_AGE_TEST_WITH_RETURN( dev_priv );
1507
1508         buf = dma->buflist[elts.idx];
1509
1510         if ( buf->filp != filp ) {
1511                 DRM_ERROR( "process %d using buffer owned by %p\n",
1512                            DRM_CURRENTPID, buf->filp );
1513                 return DRM_ERR(EINVAL);
1514         }
1515         if ( buf->pending ) {
1516                 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1517                 return DRM_ERR(EINVAL);
1518         }
1519
1520         count = (elts.end - elts.start) / sizeof(u16);
1521         elts.start -= RADEON_INDEX_PRIM_OFFSET;
1522
1523         if ( elts.start & 0x7 ) {
1524                 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1525                 return DRM_ERR(EINVAL);
1526         }
1527         if ( elts.start < buf->used ) {
1528                 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1529                 return DRM_ERR(EINVAL);
1530         }
1531
1532         buf->used = elts.end;
1533
1534         if ( sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS ) {
1535                 radeon_emit_state( dev_priv,
1536                                    &sarea_priv->context_state,
1537                                    sarea_priv->tex_state,
1538                                    sarea_priv->dirty );
1539
1540                 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
1541                                        RADEON_UPLOAD_TEX1IMAGES |
1542                                        RADEON_UPLOAD_TEX2IMAGES |
1543                                        RADEON_REQUIRE_QUIESCENCE);
1544         }
1545
1546
1547         /* Build up a prim_t record:
1548          */
1549         prim.start = elts.start;
1550         prim.finish = elts.end; 
1551         prim.prim = elts.prim;
1552         prim.offset = 0;        /* offset from start of dma buffers */
1553         prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
1554         prim.vc_format = dev_priv->sarea_priv->vc_format;
1555         
1556         radeon_cp_dispatch_indices( dev, buf, &prim,
1557                                    dev_priv->sarea_priv->boxes,
1558                                    dev_priv->sarea_priv->nbox );
1559         if (elts.discard) {
1560                 radeon_cp_discard_buffer( dev, buf );
1561         }
1562
1563         COMMIT_RING();
1564         return 0;
1565 }
1566
1567 int radeon_cp_texture( DRM_IOCTL_ARGS )
1568 {
1569         DRM_DEVICE;
1570         drm_radeon_private_t *dev_priv = dev->dev_private;
1571         drm_radeon_texture_t tex;
1572         drm_radeon_tex_image_t image;
1573         int ret;
1574
1575         LOCK_TEST_WITH_RETURN( dev, filp );
1576
1577         DRM_COPY_FROM_USER_IOCTL( tex, (drm_radeon_texture_t *)data, sizeof(tex) );
1578
1579         if ( tex.image == NULL ) {
1580                 DRM_ERROR( "null texture image!\n" );
1581                 return DRM_ERR(EINVAL);
1582         }
1583
1584         if ( DRM_COPY_FROM_USER( &image,
1585                              (drm_radeon_tex_image_t *)tex.image,
1586                              sizeof(image) ) )
1587                 return DRM_ERR(EFAULT);
1588
1589         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1590         VB_AGE_TEST_WITH_RETURN( dev_priv );
1591
1592         ret = radeon_cp_dispatch_texture( filp, dev, &tex, &image );
1593
1594         COMMIT_RING();
1595         return ret;
1596 }
1597
1598 int radeon_cp_stipple( DRM_IOCTL_ARGS )
1599 {
1600         DRM_DEVICE;
1601         drm_radeon_private_t *dev_priv = dev->dev_private;
1602         drm_radeon_stipple_t stipple;
1603         u32 mask[32];
1604
1605         LOCK_TEST_WITH_RETURN( dev, filp );
1606
1607         DRM_COPY_FROM_USER_IOCTL( stipple, (drm_radeon_stipple_t *)data,
1608                              sizeof(stipple) );
1609
1610         if ( DRM_COPY_FROM_USER( &mask, stipple.mask, 32 * sizeof(u32) ) )
1611                 return DRM_ERR(EFAULT);
1612
1613         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1614
1615         radeon_cp_dispatch_stipple( dev, mask );
1616
1617         COMMIT_RING();
1618         return 0;
1619 }
1620
1621 int radeon_cp_indirect( DRM_IOCTL_ARGS )
1622 {
1623         DRM_DEVICE;
1624         drm_radeon_private_t *dev_priv = dev->dev_private;
1625         drm_device_dma_t *dma = dev->dma;
1626         drm_buf_t *buf;
1627         drm_radeon_indirect_t indirect;
1628         RING_LOCALS;
1629
1630         LOCK_TEST_WITH_RETURN( dev, filp );
1631
1632         if ( !dev_priv ) {
1633                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1634                 return DRM_ERR(EINVAL);
1635         }
1636
1637         DRM_COPY_FROM_USER_IOCTL( indirect, (drm_radeon_indirect_t *)data,
1638                              sizeof(indirect) );
1639
1640         DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1641                    indirect.idx, indirect.start,
1642                    indirect.end, indirect.discard );
1643
1644         if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1645                 DRM_ERROR( "buffer index %d (of %d max)\n",
1646                            indirect.idx, dma->buf_count - 1 );
1647                 return DRM_ERR(EINVAL);
1648         }
1649
1650         buf = dma->buflist[indirect.idx];
1651
1652         if ( buf->filp != filp ) {
1653                 DRM_ERROR( "process %d using buffer owned by %p\n",
1654                            DRM_CURRENTPID, buf->filp );
1655                 return DRM_ERR(EINVAL);
1656         }
1657         if ( buf->pending ) {
1658                 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1659                 return DRM_ERR(EINVAL);
1660         }
1661
1662         if ( indirect.start < buf->used ) {
1663                 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1664                            indirect.start, buf->used );
1665                 return DRM_ERR(EINVAL);
1666         }
1667
1668         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1669         VB_AGE_TEST_WITH_RETURN( dev_priv );
1670
1671         buf->used = indirect.end;
1672
1673         /* Wait for the 3D stream to idle before the indirect buffer
1674          * containing 2D acceleration commands is processed.
1675          */
1676         BEGIN_RING( 2 );
1677
1678         RADEON_WAIT_UNTIL_3D_IDLE();
1679
1680         ADVANCE_RING();
1681
1682         /* Dispatch the indirect buffer full of commands from the
1683          * X server.  This is insecure and is thus only available to
1684          * privileged clients.
1685          */
1686         radeon_cp_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1687         if (indirect.discard) {
1688                 radeon_cp_discard_buffer( dev, buf );
1689         }
1690
1691
1692         COMMIT_RING();
1693         return 0;
1694 }
1695
1696 int radeon_cp_vertex2( DRM_IOCTL_ARGS )
1697 {
1698         DRM_DEVICE;
1699         drm_radeon_private_t *dev_priv = dev->dev_private;
1700         drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1701         drm_device_dma_t *dma = dev->dma;
1702         drm_buf_t *buf;
1703         drm_radeon_vertex2_t vertex;
1704         int i;
1705         unsigned char laststate;
1706
1707         LOCK_TEST_WITH_RETURN( dev, filp );
1708
1709         if ( !dev_priv ) {
1710                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1711                 return DRM_ERR(EINVAL);
1712         }
1713
1714         DRM_COPY_FROM_USER_IOCTL( vertex, (drm_radeon_vertex2_t *)data,
1715                              sizeof(vertex) );
1716
1717         DRM_DEBUG( "pid=%d index=%d discard=%d\n",
1718                    DRM_CURRENTPID,
1719                    vertex.idx, vertex.discard );
1720
1721         if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1722                 DRM_ERROR( "buffer index %d (of %d max)\n",
1723                            vertex.idx, dma->buf_count - 1 );
1724                 return DRM_ERR(EINVAL);
1725         }
1726
1727         RING_SPACE_TEST_WITH_RETURN( dev_priv );
1728         VB_AGE_TEST_WITH_RETURN( dev_priv );
1729
1730         buf = dma->buflist[vertex.idx];
1731
1732         if ( buf->filp != filp ) {
1733                 DRM_ERROR( "process %d using buffer owned by %p\n",
1734                            DRM_CURRENTPID, buf->filp );
1735                 return DRM_ERR(EINVAL);
1736         }
1737
1738         if ( buf->pending ) {
1739                 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1740                 return DRM_ERR(EINVAL);
1741         }
1742         
1743         if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
1744                 return DRM_ERR(EINVAL);
1745
1746         for (laststate = 0xff, i = 0 ; i < vertex.nr_prims ; i++) {
1747                 drm_radeon_prim_t prim;
1748                 drm_radeon_tcl_prim_t tclprim;
1749                 
1750                 if ( DRM_COPY_FROM_USER( &prim, &vertex.prim[i], sizeof(prim) ) )
1751                         return DRM_ERR(EFAULT);
1752                 
1753                 if ( prim.stateidx != laststate ) {
1754                         drm_radeon_state_t state;                              
1755                                 
1756                         if ( DRM_COPY_FROM_USER( &state, 
1757                                              &vertex.state[prim.stateidx], 
1758                                              sizeof(state) ) )
1759                                 return DRM_ERR(EFAULT);
1760
1761                         radeon_emit_state2( dev_priv, &state );
1762
1763                         laststate = prim.stateidx;
1764                 }
1765
1766                 tclprim.start = prim.start;
1767                 tclprim.finish = prim.finish;
1768                 tclprim.prim = prim.prim;
1769                 tclprim.vc_format = prim.vc_format;
1770
1771                 if ( prim.prim & RADEON_PRIM_WALK_IND ) {
1772                         tclprim.offset = prim.numverts * 64;
1773                         tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
1774
1775                         radeon_cp_dispatch_indices( dev, buf, &tclprim,
1776                                                     sarea_priv->boxes,
1777                                                     sarea_priv->nbox);
1778                 } else {
1779                         tclprim.numverts = prim.numverts;
1780                         tclprim.offset = 0; /* not used */
1781
1782                         radeon_cp_dispatch_vertex( dev, buf, &tclprim,
1783                                                    sarea_priv->boxes,
1784                                                    sarea_priv->nbox);
1785                 }
1786                 
1787                 if (sarea_priv->nbox == 1)
1788                         sarea_priv->nbox = 0;
1789         }
1790
1791         if ( vertex.discard ) {
1792                 radeon_cp_discard_buffer( dev, buf );
1793         }
1794
1795         COMMIT_RING();
1796         return 0;
1797 }
1798
1799
1800 static int radeon_emit_packets( 
1801         drm_radeon_private_t *dev_priv,
1802         drm_radeon_cmd_header_t header,
1803         drm_radeon_cmd_buffer_t *cmdbuf )
1804 {
1805         int id = (int)header.packet.packet_id;
1806         int sz, reg;
1807         int *data = (int *)cmdbuf->buf;
1808         RING_LOCALS;
1809    
1810         if (id >= RADEON_MAX_STATE_PACKETS)
1811                 return DRM_ERR(EINVAL);
1812
1813         sz = packet[id].len;
1814         reg = packet[id].start;
1815
1816         if (sz * sizeof(int) > cmdbuf->bufsz) 
1817                 return DRM_ERR(EINVAL);
1818
1819         BEGIN_RING(sz+1);
1820         OUT_RING( CP_PACKET0( reg, (sz-1) ) );
1821         OUT_RING_USER_TABLE( data, sz );
1822         ADVANCE_RING();
1823
1824         cmdbuf->buf += sz * sizeof(int);
1825         cmdbuf->bufsz -= sz * sizeof(int);
1826         return 0;
1827 }
1828
1829 static __inline__ int radeon_emit_scalars( 
1830         drm_radeon_private_t *dev_priv,
1831         drm_radeon_cmd_header_t header,
1832         drm_radeon_cmd_buffer_t *cmdbuf )
1833 {
1834         int sz = header.scalars.count;
1835         int *data = (int *)cmdbuf->buf;
1836         int start = header.scalars.offset;
1837         int stride = header.scalars.stride;
1838         RING_LOCALS;
1839
1840         BEGIN_RING( 3+sz );
1841         OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
1842         OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
1843         OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
1844         OUT_RING_USER_TABLE( data, sz );
1845         ADVANCE_RING();
1846         cmdbuf->buf += sz * sizeof(int);
1847         cmdbuf->bufsz -= sz * sizeof(int);
1848         return 0;
1849 }
1850
1851 /* God this is ugly
1852  */
1853 static __inline__ int radeon_emit_scalars2( 
1854         drm_radeon_private_t *dev_priv,
1855         drm_radeon_cmd_header_t header,
1856         drm_radeon_cmd_buffer_t *cmdbuf )
1857 {
1858         int sz = header.scalars.count;
1859         int *data = (int *)cmdbuf->buf;
1860         int start = ((unsigned int)header.scalars.offset) + 0x100;
1861         int stride = header.scalars.stride;
1862         RING_LOCALS;
1863
1864         BEGIN_RING( 3+sz );
1865         OUT_RING( CP_PACKET0( RADEON_SE_TCL_SCALAR_INDX_REG, 0 ) );
1866         OUT_RING( start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
1867         OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_SCALAR_DATA_REG, sz-1 ) );
1868         OUT_RING_USER_TABLE( data, sz );
1869         ADVANCE_RING();
1870         cmdbuf->buf += sz * sizeof(int);
1871         cmdbuf->bufsz -= sz * sizeof(int);
1872         return 0;
1873 }
1874
1875 static __inline__ int radeon_emit_vectors( 
1876         drm_radeon_private_t *dev_priv,
1877         drm_radeon_cmd_header_t header,
1878         drm_radeon_cmd_buffer_t *cmdbuf )
1879 {
1880         int sz = header.vectors.count;
1881         int *data = (int *)cmdbuf->buf;
1882         int start = header.vectors.offset;
1883         int stride = header.vectors.stride;
1884         RING_LOCALS;
1885
1886         BEGIN_RING( 3+sz );
1887         OUT_RING( CP_PACKET0( RADEON_SE_TCL_VECTOR_INDX_REG, 0 ) );
1888         OUT_RING( start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
1889         OUT_RING( CP_PACKET0_TABLE( RADEON_SE_TCL_VECTOR_DATA_REG, (sz-1) ) );
1890         OUT_RING_USER_TABLE( data, sz );
1891         ADVANCE_RING();
1892
1893         cmdbuf->buf += sz * sizeof(int);
1894         cmdbuf->bufsz -= sz * sizeof(int);
1895         return 0;
1896 }
1897
1898
1899 static int radeon_emit_packet3( drm_device_t *dev,
1900                                 drm_radeon_cmd_buffer_t *cmdbuf )
1901 {
1902         drm_radeon_private_t *dev_priv = dev->dev_private;
1903         int cmdsz, tmp;
1904         int *cmd = (int *)cmdbuf->buf;
1905         RING_LOCALS;
1906
1907
1908         DRM_DEBUG("\n");
1909
1910         if (DRM_GET_USER_UNCHECKED( tmp, &cmd[0]))
1911                 return DRM_ERR(EFAULT);
1912
1913         cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16);
1914
1915         if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 ||
1916             cmdsz * 4 > cmdbuf->bufsz)
1917                 return DRM_ERR(EINVAL);
1918
1919         BEGIN_RING( cmdsz );
1920         OUT_RING_USER_TABLE( cmd, cmdsz );
1921         ADVANCE_RING();
1922
1923         cmdbuf->buf += cmdsz * 4;
1924         cmdbuf->bufsz -= cmdsz * 4;
1925         return 0;
1926 }
1927
1928
1929 static int radeon_emit_packet3_cliprect( drm_device_t *dev,
1930                                          drm_radeon_cmd_buffer_t *cmdbuf,
1931                                          int orig_nbox )
1932 {
1933         drm_radeon_private_t *dev_priv = dev->dev_private;
1934         drm_clip_rect_t box;
1935         int cmdsz, tmp;
1936         int *cmd = (int *)cmdbuf->buf;
1937         drm_clip_rect_t *boxes = cmdbuf->boxes;
1938         int i = 0;
1939         RING_LOCALS;
1940
1941         DRM_DEBUG("\n");
1942
1943         if (DRM_GET_USER_UNCHECKED( tmp, &cmd[0]))
1944                 return DRM_ERR(EFAULT);
1945
1946         cmdsz = 2 + ((tmp & RADEON_CP_PACKET_COUNT_MASK) >> 16);
1947
1948         if ((tmp & 0xc0000000) != RADEON_CP_PACKET3 ||
1949             cmdsz * 4 > cmdbuf->bufsz)
1950                 return DRM_ERR(EINVAL);
1951
1952         if (!orig_nbox)
1953                 goto out;
1954
1955         do {
1956                 if ( i < cmdbuf->nbox ) {
1957                         if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
1958                                 return DRM_ERR(EFAULT);
1959                         /* FIXME The second and subsequent times round
1960                          * this loop, send a WAIT_UNTIL_3D_IDLE before
1961                          * calling emit_clip_rect(). This fixes a
1962                          * lockup on fast machines when sending
1963                          * several cliprects with a cmdbuf, as when
1964                          * waving a 2D window over a 3D
1965                          * window. Something in the commands from user
1966                          * space seems to hang the card when they're
1967                          * sent several times in a row. That would be
1968                          * the correct place to fix it but this works
1969                          * around it until I can figure that out - Tim
1970                          * Smith */
1971                         if ( i ) {
1972                                 BEGIN_RING( 2 );
1973                                 RADEON_WAIT_UNTIL_3D_IDLE();
1974                                 ADVANCE_RING();
1975                         }
1976                         radeon_emit_clip_rect( dev_priv, &box );
1977                 }
1978                 
1979                 BEGIN_RING( cmdsz );
1980                 OUT_RING_USER_TABLE( cmd, cmdsz );
1981                 ADVANCE_RING();
1982
1983         } while ( ++i < cmdbuf->nbox );
1984         if (cmdbuf->nbox == 1)
1985                 cmdbuf->nbox = 0;
1986
1987  out:
1988         cmdbuf->buf += cmdsz * 4;
1989         cmdbuf->bufsz -= cmdsz * 4;
1990         return 0;
1991 }
1992
1993
1994 static int radeon_emit_wait( drm_device_t *dev, int flags )
1995 {
1996         drm_radeon_private_t *dev_priv = dev->dev_private;
1997         RING_LOCALS;
1998
1999         DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2000         switch (flags) {
2001         case RADEON_WAIT_2D:
2002                 BEGIN_RING( 2 );
2003                 RADEON_WAIT_UNTIL_2D_IDLE(); 
2004                 ADVANCE_RING();
2005                 break;
2006         case RADEON_WAIT_3D:
2007                 BEGIN_RING( 2 );
2008                 RADEON_WAIT_UNTIL_3D_IDLE(); 
2009                 ADVANCE_RING();
2010                 break;
2011         case RADEON_WAIT_2D|RADEON_WAIT_3D:
2012                 BEGIN_RING( 2 );
2013                 RADEON_WAIT_UNTIL_IDLE(); 
2014                 ADVANCE_RING();
2015                 break;
2016         default:
2017                 return DRM_ERR(EINVAL);
2018         }
2019
2020         return 0;
2021 }
2022
2023 int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
2024 {
2025         DRM_DEVICE;
2026         drm_radeon_private_t *dev_priv = dev->dev_private;
2027         drm_device_dma_t *dma = dev->dma;
2028         drm_buf_t *buf = 0;
2029         int idx;
2030         drm_radeon_cmd_buffer_t cmdbuf;
2031         drm_radeon_cmd_header_t header;
2032         int orig_nbox;
2033
2034         LOCK_TEST_WITH_RETURN( dev, filp );
2035
2036         if ( !dev_priv ) {
2037                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2038                 return DRM_ERR(EINVAL);
2039         }
2040
2041         DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
2042                              sizeof(cmdbuf) );
2043
2044         RING_SPACE_TEST_WITH_RETURN( dev_priv );
2045         VB_AGE_TEST_WITH_RETURN( dev_priv );
2046
2047
2048         if (DRM_VERIFYAREA_READ( cmdbuf.buf, cmdbuf.bufsz ))
2049                 return DRM_ERR(EFAULT);
2050
2051         if (cmdbuf.nbox &&
2052             DRM_VERIFYAREA_READ(cmdbuf.boxes, 
2053                          cmdbuf.nbox * sizeof(drm_clip_rect_t)))
2054                 return DRM_ERR(EFAULT);
2055
2056         orig_nbox = cmdbuf.nbox;
2057
2058         while ( cmdbuf.bufsz >= sizeof(header) ) {
2059                 
2060                 if (DRM_GET_USER_UNCHECKED( header.i, (int *)cmdbuf.buf )) {
2061                         DRM_ERROR("__get_user %p\n", cmdbuf.buf);
2062                         return DRM_ERR(EFAULT);
2063                 }
2064
2065                 cmdbuf.buf += sizeof(header);
2066                 cmdbuf.bufsz -= sizeof(header);
2067
2068                 switch (header.header.cmd_type) {
2069                 case RADEON_CMD_PACKET: 
2070                         DRM_DEBUG("RADEON_CMD_PACKET\n");
2071                         if (radeon_emit_packets( dev_priv, header, &cmdbuf )) {
2072                                 DRM_ERROR("radeon_emit_packets failed\n");
2073                                 return DRM_ERR(EINVAL);
2074                         }
2075                         break;
2076
2077                 case RADEON_CMD_SCALARS:
2078                         DRM_DEBUG("RADEON_CMD_SCALARS\n");
2079                         if (radeon_emit_scalars( dev_priv, header, &cmdbuf )) {
2080                                 DRM_ERROR("radeon_emit_scalars failed\n");
2081                                 return DRM_ERR(EINVAL);
2082                         }
2083                         break;
2084
2085                 case RADEON_CMD_VECTORS:
2086                         DRM_DEBUG("RADEON_CMD_VECTORS\n");
2087                         if (radeon_emit_vectors( dev_priv, header, &cmdbuf )) {
2088                                 DRM_ERROR("radeon_emit_vectors failed\n");
2089                                 return DRM_ERR(EINVAL);
2090                         }
2091                         break;
2092
2093                 case RADEON_CMD_DMA_DISCARD:
2094                         DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2095                         idx = header.dma.buf_idx;
2096                         if ( idx < 0 || idx >= dma->buf_count ) {
2097                                 DRM_ERROR( "buffer index %d (of %d max)\n",
2098                                            idx, dma->buf_count - 1 );
2099                                 return DRM_ERR(EINVAL);
2100                         }
2101
2102                         buf = dma->buflist[idx];
2103                         if ( buf->filp != filp || buf->pending ) {
2104                                 DRM_ERROR( "bad buffer %p %p %d\n",
2105                                            buf->filp, filp, buf->pending);
2106                                 return DRM_ERR(EINVAL);
2107                         }
2108
2109                         radeon_cp_discard_buffer( dev, buf );
2110                         break;
2111
2112                 case RADEON_CMD_PACKET3:
2113                         DRM_DEBUG("RADEON_CMD_PACKET3\n");
2114                         if (radeon_emit_packet3( dev, &cmdbuf )) {
2115                                 DRM_ERROR("radeon_emit_packet3 failed\n");
2116                                 return DRM_ERR(EINVAL);
2117                         }
2118                         break;
2119
2120                 case RADEON_CMD_PACKET3_CLIP:
2121                         DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2122                         if (radeon_emit_packet3_cliprect( dev, &cmdbuf, orig_nbox )) {
2123                                 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2124                                 return DRM_ERR(EINVAL);
2125                         }
2126                         break;
2127
2128                 case RADEON_CMD_SCALARS2:
2129                         DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2130                         if (radeon_emit_scalars2( dev_priv, header, &cmdbuf )) {
2131                                 DRM_ERROR("radeon_emit_scalars2 failed\n");
2132                                 return DRM_ERR(EINVAL);
2133                         }
2134                         break;
2135
2136                 case RADEON_CMD_WAIT:
2137                         DRM_DEBUG("RADEON_CMD_WAIT\n");
2138                         if (radeon_emit_wait( dev, header.wait.flags )) {
2139                                 DRM_ERROR("radeon_emit_wait failed\n");
2140                                 return DRM_ERR(EINVAL);
2141                         }
2142                         break;
2143                 default:
2144                         DRM_ERROR("bad cmd_type %d at %p\n", 
2145                                   header.header.cmd_type,
2146                                   cmdbuf.buf - sizeof(header));
2147                         return DRM_ERR(EINVAL);
2148                 }
2149         }
2150
2151
2152         DRM_DEBUG("DONE\n");
2153         COMMIT_RING();
2154         return 0;
2155 }
2156
2157
2158
2159 int radeon_cp_getparam( DRM_IOCTL_ARGS )
2160 {
2161         DRM_DEVICE;
2162         drm_radeon_private_t *dev_priv = dev->dev_private;
2163         drm_radeon_getparam_t param;
2164         int value;
2165
2166         if ( !dev_priv ) {
2167                 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
2168                 return DRM_ERR(EINVAL);
2169         }
2170
2171         DRM_COPY_FROM_USER_IOCTL( param, (drm_radeon_getparam_t *)data,
2172                              sizeof(param) );
2173
2174         DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
2175
2176         switch( param.param ) {
2177         case RADEON_PARAM_AGP_BUFFER_OFFSET:
2178                 value = dev_priv->agp_buffers_offset;
2179                 break;
2180         case RADEON_PARAM_LAST_FRAME:
2181                 dev_priv->stats.last_frame_reads++;
2182                 value = GET_SCRATCH( 0 );
2183                 break;
2184         case RADEON_PARAM_LAST_DISPATCH:
2185                 value = GET_SCRATCH( 1 );
2186                 break;
2187         case RADEON_PARAM_LAST_CLEAR:
2188                 dev_priv->stats.last_clear_reads++;
2189                 value = GET_SCRATCH( 2 );
2190                 break;
2191         case RADEON_PARAM_IRQ_NR:
2192                 value = dev->irq;
2193                 break;
2194         case RADEON_PARAM_AGP_BASE:
2195                 value = dev_priv->agp_vm_start;
2196                 break;
2197         case RADEON_PARAM_REGISTER_HANDLE:
2198                 value = dev_priv->mmio_offset;
2199                 break;
2200         case RADEON_PARAM_STATUS_HANDLE:
2201                 value = dev_priv->ring_rptr_offset;
2202                 break;
2203         case RADEON_PARAM_SAREA_HANDLE:
2204                 /* The lock is the first dword in the sarea. */
2205                 value = (int)dev->lock.hw_lock; 
2206                 break;  
2207         case RADEON_PARAM_AGP_TEX_HANDLE:
2208                 value = dev_priv->agp_textures_offset;
2209                 break;
2210         default:
2211                 return DRM_ERR(EINVAL);
2212         }
2213
2214         if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
2215                 DRM_ERROR( "copy_to_user\n" );
2216                 return DRM_ERR(EFAULT);
2217         }
2218         
2219         return 0;
2220 }