Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4358 Serge 1
 
2
#define COMMON_CONTEXT_H
3
4
 
5
#include "math/m_vector.h"
6
#include "tnl/t_context.h"
7
#include "main/colormac.h"
8
9
 
10
#include "radeon_screen.h"
11
#include "radeon_drm.h"
12
#include "dri_util.h"
13
#include "tnl/t_vertex.h"
14
#include "swrast/s_context.h"
15
16
 
17
18
 
19
#include "radeon_cs_gem.h"
20
21
 
22
   with float to uint32_t casts due to strict-aliasing */
23
typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
24
25
 
26
typedef struct radeon_context radeonContextRec;
27
typedef struct radeon_context *radeonContextPtr;
28
29
 
30
 
31
#define TEX_1   0x2
32
#define TEX_2   0x4
33
#define TEX_3	0x8
34
#define TEX_4	0x10
35
#define TEX_5	0x20
36
37
 
38
/* See correponding strings in r200_swtcl.c */
39
#define RADEON_FALLBACK_TEXTURE		0x0001
40
#define RADEON_FALLBACK_DRAW_BUFFER	0x0002
41
#define RADEON_FALLBACK_STENCIL		0x0004
42
#define RADEON_FALLBACK_RENDER_MODE	0x0008
43
#define RADEON_FALLBACK_BLEND_EQ	0x0010
44
#define RADEON_FALLBACK_BLEND_FUNC	0x0020
45
#define RADEON_FALLBACK_DISABLE 	0x0040
46
#define RADEON_FALLBACK_BORDER_MODE	0x0080
47
#define RADEON_FALLBACK_DEPTH_BUFFER	0x0100
48
#define RADEON_FALLBACK_STENCIL_BUFFER  0x0200
49
50
 
51
#define R200_FALLBACK_DRAW_BUFFER       0x02
52
#define R200_FALLBACK_STENCIL           0x04
53
#define R200_FALLBACK_RENDER_MODE       0x08
54
#define R200_FALLBACK_DISABLE           0x10
55
#define R200_FALLBACK_BORDER_MODE       0x20
56
57
 
58
#define RADEON_TCL_FALLBACK_UNFILLED          0x2 /* unfilled tris */
59
#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE     0x4 /* twoside tris */
60
#define RADEON_TCL_FALLBACK_MATERIAL          0x8 /* material in vb */
61
#define RADEON_TCL_FALLBACK_TEXGEN_0          0x10 /* texgen, unit 0 */
62
#define RADEON_TCL_FALLBACK_TEXGEN_1          0x20 /* texgen, unit 1 */
63
#define RADEON_TCL_FALLBACK_TEXGEN_2          0x40 /* texgen, unit 2 */
64
#define RADEON_TCL_FALLBACK_TCL_DISABLE       0x80 /* user disable */
65
#define RADEON_TCL_FALLBACK_FOGCOORDSPEC      0x100 /* fogcoord, sep. spec light */
66
67
 
68
 */
69
#define BLIT_WIDTH_BYTES 1024
70
71
 
72
 */
73
#define COLOR_IS_RGBA
74
#define TAG(x) radeon##x
75
#include "tnl_dd/t_dd_vertex.h"
76
#undef TAG
77
78
 
79
80
 
81
{
82
	struct swrast_renderbuffer base;
83
84
 
85
	unsigned int cpp;
86
	/* unsigned int offset; */
87
	unsigned int pitch;
88
89
 
90
	GLbitfield map_mode;
91
	int map_x, map_y, map_w, map_h;
92
	int map_pitch;
93
	void *map_buffer;
94
95
 
96
	/* boo Xorg 6.8.2 compat */
97
	int has_surface;
98
99
 
100
	__DRIdrawable *dPriv;
101
};
102
103
 
104
{
105
	struct gl_framebuffer base;
106
107
 
108
};
109
110
 
111
 
112
	int roundEnable;
113
	struct gl_renderbuffer *rb;
114
	uint32_t draw_offset; /* offset into color renderbuffer - FBOs */
115
};
116
117
 
118
	struct gl_renderbuffer *rb;
119
};
120
121
 
122
	drm_clip_rect_t rect;
123
	GLboolean enabled;
124
};
125
126
 
127
	struct radeon_state_atom *next, *prev;
128
	const char *name;	/* for debug */
129
	int cmd_size;		/* size in bytes */
130
        GLuint idx;
131
	GLuint is_tcl;
132
        GLuint *cmd;		/* one or more cmd's */
133
	GLuint *lastcmd;		/* one or more cmd's */
134
	GLboolean dirty;	/* dirty-mark in emit_state_list */
135
        int (*check) (struct gl_context *, struct radeon_state_atom *atom); /* is this state active? */
136
        void (*emit) (struct gl_context *, struct radeon_state_atom *atom);
137
};
138
139
 
140
  	/* Head of the linked list of state atoms. */
141
	struct radeon_state_atom atomlist;
142
	int max_state_size;	/* Number of bytes necessary for a full state emit. */
143
	int max_post_flush_size; /* Number of bytes necessary for post flushing emits */
144
	GLboolean is_dirty, all_dirty;
145
};
146
147
 
148
 
149
typedef struct _radeon_texture_image radeon_texture_image;
150
151
 
152
 
153
 * This is a subclass of swrast_texture_image since we use swrast
154
 * for software fallback rendering.
155
 */
156
struct _radeon_texture_image {
157
	struct swrast_texture_image base;
158
159
 
160
	 * If mt != 0, the image is stored in hardware format in the
161
	 * given mipmap tree. In this case, base.Data may point into the
162
	 * mapping of the buffer object that contains the mipmap tree.
163
	 *
164
	 * If mt == 0, the image is stored in normal memory pointed to
165
	 * by base.Data.
166
	 */
167
	struct _radeon_mipmap_tree *mt;
168
	struct radeon_bo *bo;
169
	GLboolean used_as_render_target;
170
};
171
172
 
173
 
174
{
175
	return (radeon_texture_image*)image;
176
}
177
178
 
179
 
180
181
 
182
183
 
184
 */
185
struct radeon_tex_obj {
186
	struct gl_texture_object base;
187
	struct _radeon_mipmap_tree *mt;
188
189
 
190
	 * This is true if we've verified that the mipmap tree above is complete
191
	 * and so on.
192
	 */
193
	GLboolean validated;
194
	/* Minimum LOD to be used during rendering */
195
	unsigned minLod;
196
	/* Miximum LOD to be used during rendering */
197
	unsigned maxLod;
198
199
 
200
	GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
201
	GLuint tile_bits;	/* hw texture tile bits used on this texture */
202
        struct radeon_bo *bo;
203
204
 
205
	GLuint pp_txformat;
206
	GLuint pp_txformat_x;
207
	GLuint pp_txsize;	/* npot only */
208
	GLuint pp_txpitch;	/* npot only */
209
	GLuint pp_border_color;
210
	GLuint pp_cubic_faces;	/* cube face 1,2,3,4 log2 sizes */
211
212
 
213
};
214
215
 
216
{
217
	return (radeonTexObj*)texObj;
218
}
219
220
 
221
struct radeon_query_object {
222
	struct gl_query_object Base;
223
	struct radeon_bo *bo;
224
	int curr_offset;
225
	GLboolean emitted_begin;
226
227
 
228
	struct radeon_query_object *prev, *next;
229
};
230
231
 
232
 */
233
struct radeon_dma_buffer {
234
	int refcount;		/* the number of retained regions in buf */
235
	drmBufPtr buf;
236
};
237
238
 
239
	struct radeon_bo *bo; /** Buffer object where vertex data is stored */
240
	int offset; /** Offset into buffer object, in bytes */
241
	int components; /** Number of components per vertex */
242
	int stride; /** Stride in dwords (may be 0 for repeating) */
243
	int count; /** Number of vertices */
244
};
245
246
 
247
248
 
249
  struct radeon_dma_bo *next, *prev;
250
  struct radeon_bo *bo;
251
  int expire_counter;
252
};
253
254
 
255
        /* Active dma region.  Allocations for vertices and retained
256
         * regions come from here.  Also used for emitting random vertices,
257
         * these may be flushed by calling flush_current();
258
         */
259
	struct radeon_dma_bo free;
260
	struct radeon_dma_bo wait;
261
	struct radeon_dma_bo reserved;
262
        size_t current_used; /** Number of bytes allocated and forgotten about */
263
        size_t current_vertexptr; /** End of active vertex region */
264
        size_t minimum_size;
265
266
 
267
         * If current_vertexptr != current_used then flush must be non-zero.
268
         * flush must be called before non-active vertex allocations can be
269
         * performed.
270
         */
271
        void (*flush) (struct gl_context *);
272
};
273
274
 
275
 */
276
struct radeon_swtcl_info {
277
278
 
279
	GLuint vertex_size;
280
	GLubyte *verts;
281
282
 
283
	 */
284
	GLuint hw_primitive;
285
	GLenum render_primitive;
286
	GLuint numverts;
287
288
 
289
	GLuint vertex_attr_count;
290
291
 
292
        struct radeon_bo *bo;
293
};
294
295
 
296
struct radeon_tcl_info {
297
	struct radeon_aos aos[RADEON_MAX_AOS_ARRAYS];
298
	GLuint aos_count;
299
	struct radeon_bo *elt_dma_bo; /** Buffer object that contains element indices */
300
	int elt_dma_offset; /** Offset into this buffer object, in bytes */
301
};
302
303
 
304
	GLuint vertex_offset;
305
	GLuint vertex_max;
306
	struct radeon_bo *bo;
307
	GLuint vertex_size;
308
};
309
310
 
311
312
 
313
	GLuint start;
314
	GLuint end;
315
	GLuint prim;
316
};
317
318
 
319
                                     GLubyte r, GLubyte g,
320
                                     GLubyte b, GLubyte a)
321
{
322
	switch (cpp) {
323
	case 2:
324
		return PACK_COLOR_565(r, g, b);
325
	case 4:
326
		return PACK_COLOR_8888(a, r, g, b);
327
	default:
328
		return 0;
329
	}
330
}
331
332
 
333
334
 
335
336
 
337
	GLuint statenr;
338
	GLuint primnr;
339
	char cmd_buf[MAX_CMD_BUF_SZ];
340
	int cmd_used;
341
	int elts_start;
342
};
343
344
 
345
	__DRIcontext *context;	/* DRI context */
346
	__DRIscreen *screen;	/* DRI screen */
347
348
 
349
	drm_hw_lock_t *hwLock;
350
	int hwLockCount;
351
	int fd;
352
	int drmMinor;
353
};
354
355
 
356
				 radeonVertex *,
357
				 radeonVertex *, radeonVertex *);
358
359
 
360
				  radeonVertex *, radeonVertex *);
361
362
 
363
364
 
365
struct radeon_state {
366
	struct radeon_colorbuffer_state color;
367
	struct radeon_depthbuffer_state depth;
368
	struct radeon_scissor_state scissor;
369
};
370
371
 
372
 * This structure holds the command buffer while it is being constructed.
373
 *
374
 * The first batch of commands in the buffer is always the state that needs
375
 * to be re-emitted when the context is lost. This batch can be skipped
376
 * otherwise.
377
 */
378
struct radeon_cmdbuf {
379
	struct radeon_cs_manager    *csm;
380
	struct radeon_cs            *cs;
381
	int size; /** # of dwords total */
382
	unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
383
};
384
385
 
386
   struct gl_context glCtx;             /**< base class, must be first */
387
   radeonScreenPtr radeonScreen;	/* Screen private DRI data */
388
389
 
390
    */
391
   int                   texture_depth;
392
   float                 initialMaxAnisotropy;
393
   uint32_t              texture_row_align;
394
   uint32_t              texture_rect_row_align;
395
   uint32_t              texture_compressed_row_align;
396
397
 
398
  struct radeon_hw_state hw;
399
   /* Rasterization and vertex state:
400
    */
401
   GLuint TclFallback;
402
   GLuint Fallback;
403
   GLuint NewGLState;
404
   GLbitfield64 tnl_index_bitset;	/* index of bits for last tnl_install_attrs */
405
406
 
407
   unsigned int lastStamp;
408
   drm_radeon_sarea_t *sarea;	/* Private SAREA data */
409
410
 
411
   struct radeon_dri_mirror dri;
412
413
 
414
   GLuint do_usleeps;
415
   GLuint do_irqs;
416
   GLuint irqsEmitted;
417
   drm_radeon_irq_wait_t iw;
418
419
 
420
   struct radeon_state state;
421
422
 
423
   struct radeon_tcl_info tcl;
424
   /* Configuration cache
425
    */
426
   driOptionCache optionCache;
427
428
 
429
430
 
431
432
 
433
  GLboolean front_cliprects;
434
435
 
436
    * Set if rendering has occured to the drawable's front buffer.
437
    *
438
    * This is used in the DRI2 case to detect that glFlush should also copy
439
    * the contents of the fake front buffer to the real front buffer.
440
    */
441
   GLboolean front_buffer_dirty;
442
443
 
444
    * Track whether front-buffer rendering is currently enabled
445
    *
446
    * A separate flag is used to track this in order to support MRT more
447
    * easily.
448
    */
449
   GLboolean is_front_buffer_rendering;
450
451
 
452
    * Track whether front-buffer is the current read target.
453
    *
454
    * This is closely associated with is_front_buffer_rendering, but may
455
    * be set separately.  The DRI2 fake front buffer must be referenced
456
    * either way.
457
    */
458
   GLboolean is_front_buffer_reading;
459
460
 
461
	struct radeon_query_object *current;
462
	struct radeon_state_atom queryobj;
463
   } query;
464
465
 
466
	   void (*get_lock)(radeonContextPtr radeon);
467
	   void (*update_viewport_offset)(struct gl_context *ctx);
468
	   void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
469
	   void (*swtcl_flush)(struct gl_context *ctx, uint32_t offset);
470
	   void (*pre_emit_atoms)(radeonContextPtr rmesa);
471
	   void (*pre_emit_state)(radeonContextPtr rmesa);
472
	   void (*fallback)(struct gl_context *ctx, GLuint bit, GLboolean mode);
473
	   void (*free_context)(struct gl_context *ctx);
474
	   void (*emit_query_finish)(radeonContextPtr radeon);
475
	   void (*update_scissor)(struct gl_context *ctx);
476
	   unsigned (*check_blit)(gl_format mesa_format, uint32_t dst_pitch);
477
	   unsigned (*blit)(struct gl_context *ctx,
478
                        struct radeon_bo *src_bo,
479
                        intptr_t src_offset,
480
                        gl_format src_mesaformat,
481
                        unsigned src_pitch,
482
                        unsigned src_width,
483
                        unsigned src_height,
484
                        unsigned src_x_offset,
485
                        unsigned src_y_offset,
486
                        struct radeon_bo *dst_bo,
487
                        intptr_t dst_offset,
488
                        gl_format dst_mesaformat,
489
                        unsigned dst_pitch,
490
                        unsigned dst_width,
491
                        unsigned dst_height,
492
                        unsigned dst_x_offset,
493
                        unsigned dst_y_offset,
494
                        unsigned reg_width,
495
                        unsigned reg_height,
496
                        unsigned flip_y);
497
	   unsigned (*is_format_renderable)(gl_format mesa_format);
498
   } vtbl;
499
};
500
501
 
502
{
503
	return (radeonContextPtr) ctx;
504
}
505
506
 
507
{
508
	return radeon->dri.context->driDrawablePriv;
509
}
510
511
 
512
{
513
	return radeon->dri.context->driReadablePriv;
514
}
515
516
 
517
			    struct dd_function_table* functions,
518
			    const struct gl_config * glVisual,
519
			    __DRIcontext * driContextPriv,
520
			    void *sharedContextPrivate);
521
522
 
523
GLboolean radeonUnbindContext(__DRIcontext * driContextPriv);
524
void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable,
525
				 GLboolean front_only);
526
GLboolean radeonMakeCurrent(__DRIcontext * driContextPriv,
527
			    __DRIdrawable * driDrawPriv,
528
			    __DRIdrawable * driReadPriv);
529
extern void radeonDestroyContext(__DRIcontext * driContextPriv);
530
void radeon_prepare_render(radeonContextPtr radeon);
531
532
 
533