Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1901 serge 1
/*
2
 * Copyright 2003 Tungsten Graphics, inc.
3
 * All Rights Reserved.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * on the rights to use, copy, modify, merge, publish, distribute, sub
9
 * license, and/or sell copies of the Software, and to permit persons to whom
10
 * the Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19
 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors:
25
 *    Keith Whitwell 
26
 */
27
 
28
#include "main/glheader.h"
29
#include "main/context.h"
30
#include "main/colormac.h"
31
 
32
#include "t_context.h"
33
#include "t_vertex.h"
34
 
35
#define DBG 0
36
 
37
/* Build and manage clipspace/ndc/window vertices.
38
 */
39
 
40
static GLboolean match_fastpath( struct tnl_clipspace *vtx,
41
				 const struct tnl_clipspace_fastpath *fp)
42
{
43
   GLuint j;
44
 
45
   if (vtx->attr_count != fp->attr_count)
46
      return GL_FALSE;
47
 
48
   for (j = 0; j < vtx->attr_count; j++)
49
      if (vtx->attr[j].format != fp->attr[j].format ||
50
	  vtx->attr[j].inputsize != fp->attr[j].size ||
51
	  vtx->attr[j].vertoffset != fp->attr[j].offset)
52
	 return GL_FALSE;
53
 
54
   if (fp->match_strides) {
55
      if (vtx->vertex_size != fp->vertex_size)
56
	 return GL_FALSE;
57
 
58
      for (j = 0; j < vtx->attr_count; j++)
59
	 if (vtx->attr[j].inputstride != fp->attr[j].stride)
60
	    return GL_FALSE;
61
   }
62
 
63
   return GL_TRUE;
64
}
65
 
66
static GLboolean search_fastpath_emit( struct tnl_clipspace *vtx )
67
{
68
   struct tnl_clipspace_fastpath *fp = vtx->fastpath;
69
 
70
   for ( ; fp ; fp = fp->next) {
71
      if (match_fastpath(vtx, fp)) {
72
         vtx->emit = fp->func;
73
	 return GL_TRUE;
74
      }
75
   }
76
 
77
   return GL_FALSE;
78
}
79
 
80
void _tnl_register_fastpath( struct tnl_clipspace *vtx,
81
			     GLboolean match_strides )
82
{
83
   struct tnl_clipspace_fastpath *fastpath = CALLOC_STRUCT(tnl_clipspace_fastpath);
84
   GLuint i;
85
 
86
   fastpath->vertex_size = vtx->vertex_size;
87
   fastpath->attr_count = vtx->attr_count;
88
   fastpath->match_strides = match_strides;
89
   fastpath->func = vtx->emit;
90
   fastpath->attr = (struct tnl_attr_type *)
91
      malloc(vtx->attr_count * sizeof(fastpath->attr[0]));
92
 
93
   for (i = 0; i < vtx->attr_count; i++) {
94
      fastpath->attr[i].format = vtx->attr[i].format;
95
      fastpath->attr[i].stride = vtx->attr[i].inputstride;
96
      fastpath->attr[i].size = vtx->attr[i].inputsize;
97
      fastpath->attr[i].offset = vtx->attr[i].vertoffset;
98
   }
99
 
100
   fastpath->next = vtx->fastpath;
101
   vtx->fastpath = fastpath;
102
}
103
 
104
 
105
 
106
/***********************************************************************
107
 * Build codegen functions or return generic ones:
108
 */
109
static void choose_emit_func( struct gl_context *ctx, GLuint count, GLubyte *dest)
110
{
111
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
112
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
113
   struct tnl_clipspace_attr *a = vtx->attr;
114
   const GLuint attr_count = vtx->attr_count;
115
   GLuint j;
116
 
117
   for (j = 0; j < attr_count; j++) {
118
      GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
119
      a[j].inputstride = vptr->stride;
120
      a[j].inputsize = vptr->size;
121
      a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */
122
   }
123
 
124
   vtx->emit = NULL;
125
 
126
   /* Does this match an existing (hardwired, codegen or known-bad)
127
    * fastpath?
128
    */
129
   if (search_fastpath_emit(vtx)) {
130
      /* Use this result.  If it is null, then it is already known
131
       * that the current state will fail for codegen and there is no
132
       * point trying again.
133
       */
134
   }
135
   else if (vtx->codegen_emit) {
136
      vtx->codegen_emit(ctx);
137
   }
138
 
139
   if (!vtx->emit) {
140
      _tnl_generate_hardwired_emit(ctx);
141
   }
142
 
143
   /* Otherwise use the generic version:
144
    */
145
   if (!vtx->emit)
146
      vtx->emit = _tnl_generic_emit;
147
 
148
   vtx->emit( ctx, count, dest );
149
}
150
 
151
 
152
 
153
static void choose_interp_func( struct gl_context *ctx,
154
				GLfloat t,
155
				GLuint edst, GLuint eout, GLuint ein,
156
				GLboolean force_boundary )
157
{
158
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
159
 
160
   if (vtx->need_extras &&
161
       (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
162
      vtx->interp = _tnl_generic_interp_extras;
163
   } else {
164
      vtx->interp = _tnl_generic_interp;
165
   }
166
 
167
   vtx->interp( ctx, t, edst, eout, ein, force_boundary );
168
}
169
 
170
 
171
static void choose_copy_pv_func(  struct gl_context *ctx, GLuint edst, GLuint esrc )
172
{
173
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
174
 
175
   if (vtx->need_extras &&
176
       (ctx->_TriangleCaps & (DD_TRI_LIGHT_TWOSIDE|DD_TRI_UNFILLED))) {
177
      vtx->copy_pv = _tnl_generic_copy_pv_extras;
178
   } else {
179
      vtx->copy_pv = _tnl_generic_copy_pv;
180
   }
181
 
182
   vtx->copy_pv( ctx, edst, esrc );
183
}
184
 
185
 
186
/***********************************************************************
187
 * Public entrypoints, mostly dispatch to the above:
188
 */
189
 
190
 
191
/* Interpolate between two vertices to produce a third:
192
 */
193
void _tnl_interp( struct gl_context *ctx,
194
		  GLfloat t,
195
		  GLuint edst, GLuint eout, GLuint ein,
196
		  GLboolean force_boundary )
197
{
198
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
199
   vtx->interp( ctx, t, edst, eout, ein, force_boundary );
200
}
201
 
202
/* Copy colors from one vertex to another:
203
 */
204
void _tnl_copy_pv(  struct gl_context *ctx, GLuint edst, GLuint esrc )
205
{
206
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
207
   vtx->copy_pv( ctx, edst, esrc );
208
}
209
 
210
 
211
/* Extract a named attribute from a hardware vertex.  Will have to
212
 * reverse any viewport transformation, swizzling or other conversions
213
 * which may have been applied:
214
 */
215
void _tnl_get_attr( struct gl_context *ctx, const void *vin,
216
			      GLenum attr, GLfloat *dest )
217
{
218
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
219
   const struct tnl_clipspace_attr *a = vtx->attr;
220
   const GLuint attr_count = vtx->attr_count;
221
   GLuint j;
222
 
223
   for (j = 0; j < attr_count; j++) {
224
      if (a[j].attrib == attr) {
225
	 a[j].extract( &a[j], dest, (GLubyte *)vin + a[j].vertoffset );
226
	 return;
227
      }
228
   }
229
 
230
   /* Else return the value from ctx->Current.
231
    */
232
   if (attr == _TNL_ATTRIB_POINTSIZE) {
233
      /* If the hardware vertex doesn't have point size then use size from
234
       * struct gl_context.  XXX this will be wrong if drawing attenuated points!
235
       */
236
      dest[0] = ctx->Point.Size;
237
   }
238
   else {
239
      memcpy( dest, ctx->Current.Attrib[attr], 4*sizeof(GLfloat));
240
   }
241
}
242
 
243
 
244
/* Complementary operation to the above.
245
 */
246
void _tnl_set_attr( struct gl_context *ctx, void *vout,
247
		    GLenum attr, const GLfloat *src )
248
{
249
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
250
   const struct tnl_clipspace_attr *a = vtx->attr;
251
   const GLuint attr_count = vtx->attr_count;
252
   GLuint j;
253
 
254
   for (j = 0; j < attr_count; j++) {
255
      if (a[j].attrib == attr) {
256
	 a[j].insert[4-1]( &a[j], (GLubyte *)vout + a[j].vertoffset, src );
257
	 return;
258
      }
259
   }
260
}
261
 
262
 
263
void *_tnl_get_vertex( struct gl_context *ctx, GLuint nr )
264
{
265
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
266
 
267
   return vtx->vertex_buf + nr * vtx->vertex_size;
268
}
269
 
270
void _tnl_invalidate_vertex_state( struct gl_context *ctx, GLuint new_state )
271
{
272
   if (new_state & (_DD_NEW_TRI_LIGHT_TWOSIDE|_DD_NEW_TRI_UNFILLED) ) {
273
      struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
274
      vtx->new_inputs = ~0;
275
      vtx->interp = choose_interp_func;
276
      vtx->copy_pv = choose_copy_pv_func;
277
   }
278
}
279
 
280
static void invalidate_funcs( struct tnl_clipspace *vtx )
281
{
282
   vtx->emit = choose_emit_func;
283
   vtx->interp = choose_interp_func;
284
   vtx->copy_pv = choose_copy_pv_func;
285
   vtx->new_inputs = ~0;
286
}
287
 
288
GLuint _tnl_install_attrs( struct gl_context *ctx, const struct tnl_attr_map *map,
289
			   GLuint nr, const GLfloat *vp,
290
			   GLuint unpacked_size )
291
{
292
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
293
   GLuint offset = 0;
294
   GLuint i, j;
295
 
296
   assert(nr < _TNL_ATTRIB_MAX);
297
   assert(nr == 0 || map[0].attrib == VERT_ATTRIB_POS);
298
 
299
   vtx->new_inputs = ~0;
300
   vtx->need_viewport = GL_FALSE;
301
 
302
   if (vp) {
303
      vtx->need_viewport = GL_TRUE;
304
   }
305
 
306
   for (j = 0, i = 0; i < nr; i++) {
307
      const GLuint format = map[i].format;
308
      if (format == EMIT_PAD) {
309
	 if (DBG)
310
	    printf("%d: pad %d, offset %d\n", i,
311
		   map[i].offset, offset);
312
 
313
	 offset += map[i].offset;
314
 
315
      }
316
      else {
317
	 GLuint tmpoffset;
318
 
319
	 if (unpacked_size)
320
	    tmpoffset = map[i].offset;
321
	 else
322
	    tmpoffset = offset;
323
 
324
	 if (vtx->attr_count != j ||
325
	     vtx->attr[j].attrib != map[i].attrib ||
326
	     vtx->attr[j].format != format ||
327
	     vtx->attr[j].vertoffset != tmpoffset) {
328
	    invalidate_funcs(vtx);
329
 
330
	    vtx->attr[j].attrib = map[i].attrib;
331
	    vtx->attr[j].format = format;
332
	    vtx->attr[j].vp = vp;
333
	    vtx->attr[j].insert = _tnl_format_info[format].insert;
334
	    vtx->attr[j].extract = _tnl_format_info[format].extract;
335
	    vtx->attr[j].vertattrsize = _tnl_format_info[format].attrsize;
336
	    vtx->attr[j].vertoffset = tmpoffset;
337
	 }
338
 
339
 
340
	 if (DBG)
341
	    printf("%d: %s, vp %p, offset %d\n", i,
342
		   _tnl_format_info[format].name, (void *)vp,
343
		   vtx->attr[j].vertoffset);
344
 
345
	 offset += _tnl_format_info[format].attrsize;
346
	 j++;
347
      }
348
   }
349
 
350
   vtx->attr_count = j;
351
 
352
   if (unpacked_size)
353
      vtx->vertex_size = unpacked_size;
354
   else
355
      vtx->vertex_size = offset;
356
 
357
   assert(vtx->vertex_size <= vtx->max_vertex_size);
358
   return vtx->vertex_size;
359
}
360
 
361
 
362
 
363
void _tnl_invalidate_vertices( struct gl_context *ctx, GLuint newinputs )
364
{
365
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
366
   vtx->new_inputs |= newinputs;
367
}
368
 
369
 
370
/* This event has broader use beyond this file - will move elsewhere
371
 * and probably invoke a driver callback.
372
 */
373
void _tnl_notify_pipeline_output_change( struct gl_context *ctx )
374
{
375
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
376
   invalidate_funcs(vtx);
377
}
378
 
379
 
380
static void adjust_input_ptrs( struct gl_context *ctx, GLint diff)
381
{
382
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
383
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
384
   struct tnl_clipspace_attr *a = vtx->attr;
385
   const GLuint count = vtx->attr_count;
386
   GLuint j;
387
 
388
   diff -= 1;
389
   for (j=0; j
390
           register GLvector4f *vptr = VB->AttribPtr[a->attrib];
391
	   (a++)->inputptr += diff*vptr->stride;
392
   }
393
}
394
 
395
static void update_input_ptrs( struct gl_context *ctx, GLuint start )
396
{
397
   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
398
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
399
   struct tnl_clipspace_attr *a = vtx->attr;
400
   const GLuint count = vtx->attr_count;
401
   GLuint j;
402
 
403
   for (j = 0; j < count; j++) {
404
      GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
405
 
406
      if (vtx->emit != choose_emit_func) {
407
	 assert(a[j].inputstride == vptr->stride);
408
	 assert(a[j].inputsize == vptr->size);
409
      }
410
 
411
      a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride;
412
   }
413
 
414
   if (a->vp) {
415
      vtx->vp_scale[0] = a->vp[MAT_SX];
416
      vtx->vp_scale[1] = a->vp[MAT_SY];
417
      vtx->vp_scale[2] = a->vp[MAT_SZ];
418
      vtx->vp_scale[3] = 1.0;
419
      vtx->vp_xlate[0] = a->vp[MAT_TX];
420
      vtx->vp_xlate[1] = a->vp[MAT_TY];
421
      vtx->vp_xlate[2] = a->vp[MAT_TZ];
422
      vtx->vp_xlate[3] = 0.0;
423
   }
424
}
425
 
426
 
427
void _tnl_build_vertices( struct gl_context *ctx,
428
			  GLuint start,
429
			  GLuint end,
430
			  GLuint newinputs )
431
{
432
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
433
   update_input_ptrs( ctx, start );
434
   vtx->emit( ctx, end - start,
435
	      (GLubyte *)(vtx->vertex_buf +
436
			  start * vtx->vertex_size));
437
}
438
 
439
/* Emit VB vertices start..end to dest.  Note that VB vertex at
440
 * postion start will be emitted to dest at position zero.
441
 */
442
void *_tnl_emit_vertices_to_buffer( struct gl_context *ctx,
443
				    GLuint start,
444
				    GLuint end,
445
				    void *dest )
446
{
447
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
448
 
449
   update_input_ptrs(ctx, start);
450
   /* Note: dest should not be adjusted for non-zero 'start' values:
451
    */
452
   vtx->emit( ctx, end - start, (GLubyte*) dest );
453
   return (void *)((GLubyte *)dest + vtx->vertex_size * (end - start));
454
}
455
 
456
/* Emit indexed VB vertices start..end to dest.  Note that VB vertex at
457
 * postion start will be emitted to dest at position zero.
458
 */
459
 
460
void *_tnl_emit_indexed_vertices_to_buffer( struct gl_context *ctx,
461
					    const GLuint *elts,
462
					    GLuint start,
463
					    GLuint end,
464
					    void *dest )
465
{
466
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
467
   GLuint oldIndex;
468
   GLubyte *cdest = dest;
469
 
470
   update_input_ptrs(ctx, oldIndex = elts[start++]);
471
   vtx->emit( ctx, 1, cdest );
472
   cdest += vtx->vertex_size;
473
 
474
   for (; start < end; ++start) {
475
      adjust_input_ptrs(ctx, elts[start] - oldIndex);
476
      oldIndex = elts[start];
477
      vtx->emit( ctx, 1, cdest);
478
      cdest += vtx->vertex_size;
479
   }
480
 
481
   return (void *) cdest;
482
}
483
 
484
 
485
void _tnl_init_vertices( struct gl_context *ctx,
486
			GLuint vb_size,
487
			GLuint max_vertex_size )
488
{
489
   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
490
 
491
   _tnl_install_attrs( ctx, NULL, 0, NULL, 0 );
492
 
493
   vtx->need_extras = GL_TRUE;
494
   if (max_vertex_size > vtx->max_vertex_size) {
495
      _tnl_free_vertices( ctx );
496
      vtx->max_vertex_size = max_vertex_size;
497
      vtx->vertex_buf = (GLubyte *)_mesa_align_calloc(vb_size * max_vertex_size, 32 );
498
      invalidate_funcs(vtx);
499
   }
500
 
501
   switch(CHAN_TYPE) {
502
   case GL_UNSIGNED_BYTE:
503
      vtx->chan_scale[0] = 255.0;
504
      vtx->chan_scale[1] = 255.0;
505
      vtx->chan_scale[2] = 255.0;
506
      vtx->chan_scale[3] = 255.0;
507
      break;
508
   case GL_UNSIGNED_SHORT:
509
      vtx->chan_scale[0] = 65535.0;
510
      vtx->chan_scale[1] = 65535.0;
511
      vtx->chan_scale[2] = 65535.0;
512
      vtx->chan_scale[3] = 65535.0;
513
      break;
514
   default:
515
      vtx->chan_scale[0] = 1.0;
516
      vtx->chan_scale[1] = 1.0;
517
      vtx->chan_scale[2] = 1.0;
518
      vtx->chan_scale[3] = 1.0;
519
      break;
520
   }
521
 
522
   vtx->identity[0] = 0.0;
523
   vtx->identity[1] = 0.0;
524
   vtx->identity[2] = 0.0;
525
   vtx->identity[3] = 1.0;
526
 
527
   vtx->codegen_emit = NULL;
528
 
529
#ifdef USE_SSE_ASM
530
   if (!_mesa_getenv("MESA_NO_CODEGEN"))
531
      vtx->codegen_emit = _tnl_generate_sse_emit;
532
#endif
533
}
534
 
535
 
536
void _tnl_free_vertices( struct gl_context *ctx )
537
{
538
   TNLcontext *tnl = TNL_CONTEXT(ctx);
539
   if (tnl) {
540
      struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
541
      struct tnl_clipspace_fastpath *fp, *tmp;
542
 
543
      if (vtx->vertex_buf) {
544
         _mesa_align_free(vtx->vertex_buf);
545
         vtx->vertex_buf = NULL;
546
      }
547
 
548
      for (fp = vtx->fastpath ; fp ; fp = tmp) {
549
         tmp = fp->next;
550
         FREE(fp->attr);
551
 
552
         /* KW: At the moment, fp->func is constrained to be allocated by
553
          * _mesa_exec_alloc(), as the hardwired fastpaths in
554
          * t_vertex_generic.c are handled specially.  It would be nice
555
          * to unify them, but this probably won't change until this
556
          * module gets another overhaul.
557
          */
558
         _mesa_exec_free((void *) fp->func);
559
         FREE(fp);
560
      }
561
 
562
      vtx->fastpath = NULL;
563
   }
564
}