Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5564 serge 1
/*
2
 * Copyright (C) 2009 Maciej Cencora.
3
 * Copyright (C) 2008 Nicolai Haehnle.
4
 *
5
 * All Rights Reserved.
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining
8
 * a copy of this software and associated documentation files (the
9
 * "Software"), to deal in the Software without restriction, including
10
 * without limitation the rights to use, copy, modify, merge, publish,
11
 * distribute, sublicense, and/or sell copies of the Software, and to
12
 * permit persons to whom the Software is furnished to do so, subject to
13
 * the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the
16
 * next paragraph) shall be included in all copies or substantial
17
 * portions of the Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 */
28
 
29
#include "radeon_mipmap_tree.h"
30
 
31
#include 
32
#include 
33
 
34
#include "util/simple_list.h"
35
#include "main/teximage.h"
36
#include "main/texobj.h"
37
#include "main/enums.h"
38
#include "radeon_texture.h"
39
#include "radeon_tile.h"
40
 
41
static unsigned get_aligned_compressed_row_stride(
42
		mesa_format format,
43
		unsigned width,
44
		unsigned minStride)
45
{
46
	const unsigned blockBytes = _mesa_get_format_bytes(format);
47
	unsigned blockWidth, blockHeight;
48
	unsigned stride;
49
 
50
	_mesa_get_format_block_size(format, &blockWidth, &blockHeight);
51
 
52
	/* Count number of blocks required to store the given width.
53
	 * And then multiple it with bytes required to store a block.
54
	 */
55
	stride = (width + blockWidth - 1) / blockWidth * blockBytes;
56
 
57
	/* Round the given minimum stride to the next full blocksize.
58
	 * (minStride + blockBytes - 1) / blockBytes * blockBytes
59
	 */
60
	if ( stride < minStride )
61
		stride = (minStride + blockBytes - 1) / blockBytes * blockBytes;
62
 
63
	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
64
			"%s width %u, minStride %u, block(bytes %u, width %u):"
65
			"stride %u\n",
66
			__func__, width, minStride,
67
			blockBytes, blockWidth,
68
			stride);
69
 
70
	return stride;
71
}
72
 
73
unsigned get_texture_image_size(
74
		mesa_format format,
75
		unsigned rowStride,
76
		unsigned height,
77
		unsigned depth,
78
		unsigned tiling)
79
{
80
	if (_mesa_is_format_compressed(format)) {
81
		unsigned blockWidth, blockHeight;
82
 
83
		_mesa_get_format_block_size(format, &blockWidth, &blockHeight);
84
 
85
		return rowStride * ((height + blockHeight - 1) / blockHeight) * depth;
86
	} else if (tiling) {
87
		/* Need to align height to tile height */
88
		unsigned tileWidth, tileHeight;
89
 
90
		get_tile_size(format, &tileWidth, &tileHeight);
91
		tileHeight--;
92
 
93
		height = (height + tileHeight) & ~tileHeight;
94
	}
95
 
96
	return rowStride * height * depth;
97
}
98
 
99
unsigned get_texture_image_row_stride(radeonContextPtr rmesa, mesa_format format, unsigned width, unsigned tiling, GLuint target)
100
{
101
	if (_mesa_is_format_compressed(format)) {
102
		return get_aligned_compressed_row_stride(format, width, rmesa->texture_compressed_row_align);
103
	} else {
104
		unsigned row_align;
105
 
106
		if (!_mesa_is_pow_two(width) || target == GL_TEXTURE_RECTANGLE) {
107
			row_align = rmesa->texture_rect_row_align - 1;
108
		} else if (tiling) {
109
			unsigned tileWidth, tileHeight;
110
			get_tile_size(format, &tileWidth, &tileHeight);
111
			row_align = tileWidth * _mesa_get_format_bytes(format) - 1;
112
		} else {
113
			row_align = rmesa->texture_row_align - 1;
114
		}
115
 
116
		return (_mesa_format_row_stride(format, width) + row_align) & ~row_align;
117
	}
118
}
119
 
120
/**
121
 * Compute sizes and fill in offset and blit information for the given
122
 * image (determined by \p face and \p level).
123
 *
124
 * \param curOffset points to the offset at which the image is to be stored
125
 * and is updated by this function according to the size of the image.
126
 */
127
static void compute_tex_image_offset(radeonContextPtr rmesa, radeon_mipmap_tree *mt,
128
	GLuint face, GLuint level, GLuint* curOffset)
129
{
130
	radeon_mipmap_level *lvl = &mt->levels[level];
131
	GLuint height;
132
 
133
	height = _mesa_next_pow_two_32(lvl->height);
134
 
135
	lvl->rowstride = get_texture_image_row_stride(rmesa, mt->mesaFormat, lvl->width, mt->tilebits, mt->target);
136
	lvl->size = get_texture_image_size(mt->mesaFormat, lvl->rowstride, height, lvl->depth, mt->tilebits);
137
 
138
	assert(lvl->size > 0);
139
 
140
	lvl->faces[face].offset = *curOffset;
141
	*curOffset += lvl->size;
142
 
143
	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
144
			"%s(%p) level %d, face %d: rs:%d %dx%d at %d\n",
145
			__func__, rmesa,
146
			level, face,
147
			lvl->rowstride, lvl->width, height, lvl->faces[face].offset);
148
}
149
 
150
static void calculate_miptree_layout(radeonContextPtr rmesa, radeon_mipmap_tree *mt)
151
{
152
	GLuint curOffset, i, face, level;
153
 
154
	assert(mt->numLevels <= rmesa->glCtx.Const.MaxTextureLevels);
155
 
156
	curOffset = 0;
157
	for(face = 0; face < mt->faces; face++) {
158
 
159
		for(i = 0, level = mt->baseLevel; i < mt->numLevels; i++, level++) {
160
			mt->levels[level].valid = 1;
161
			mt->levels[level].width = minify(mt->width0, i);
162
			mt->levels[level].height = minify(mt->height0, i);
163
			mt->levels[level].depth = minify(mt->depth0, i);
164
			compute_tex_image_offset(rmesa, mt, face, level, &curOffset);
165
		}
166
	}
167
 
168
	/* Note the required size in memory */
169
	mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
170
 
171
	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
172
			"%s(%p, %p) total size %d\n",
173
			__func__, rmesa, mt, mt->totalsize);
174
}
175
 
176
/**
177
 * Create a new mipmap tree, calculate its layout and allocate memory.
178
 */
179
radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa,
180
					  GLenum target, mesa_format mesaFormat, GLuint baseLevel, GLuint numLevels,
181
					  GLuint width0, GLuint height0, GLuint depth0, GLuint tilebits)
182
{
183
	radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
184
 
185
	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
186
		"%s(%p) new tree is %p.\n",
187
		__func__, rmesa, mt);
188
 
189
	mt->mesaFormat = mesaFormat;
190
	mt->refcount = 1;
191
	mt->target = target;
192
	mt->faces = _mesa_num_tex_faces(target);
193
	mt->baseLevel = baseLevel;
194
	mt->numLevels = numLevels;
195
	mt->width0 = width0;
196
	mt->height0 = height0;
197
	mt->depth0 = depth0;
198
	mt->tilebits = tilebits;
199
 
200
	calculate_miptree_layout(rmesa, mt);
201
 
202
	mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
203
                            0, mt->totalsize, 1024,
204
                            RADEON_GEM_DOMAIN_VRAM,
205
                            0);
206
 
207
	return mt;
208
}
209
 
210
void radeon_miptree_reference(radeon_mipmap_tree *mt, radeon_mipmap_tree **ptr)
211
{
212
	assert(!*ptr);
213
 
214
	mt->refcount++;
215
	assert(mt->refcount > 0);
216
 
217
	*ptr = mt;
218
}
219
 
220
void radeon_miptree_unreference(radeon_mipmap_tree **ptr)
221
{
222
	radeon_mipmap_tree *mt = *ptr;
223
	if (!mt)
224
		return;
225
 
226
	assert(mt->refcount > 0);
227
 
228
	mt->refcount--;
229
	if (!mt->refcount) {
230
		radeon_bo_unref(mt->bo);
231
		free(mt);
232
	}
233
 
234
	*ptr = 0;
235
}
236
 
237
/**
238
 * Calculate min and max LOD for the given texture object.
239
 * @param[in] tObj texture object whose LOD values to calculate
240
 * @param[out] pminLod minimal LOD
241
 * @param[out] pmaxLod maximal LOD
242
 */
243
static void calculate_min_max_lod(struct gl_sampler_object *samp, struct gl_texture_object *tObj,
244
				       unsigned *pminLod, unsigned *pmaxLod)
245
{
246
	int minLod, maxLod;
247
	/* Yes, this looks overly complicated, but it's all needed.
248
	*/
249
	switch (tObj->Target) {
250
	case GL_TEXTURE_1D:
251
	case GL_TEXTURE_2D:
252
	case GL_TEXTURE_3D:
253
	case GL_TEXTURE_CUBE_MAP:
254
		if (samp->MinFilter == GL_NEAREST || samp->MinFilter == GL_LINEAR) {
255
			/* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
256
			*/
257
			minLod = maxLod = tObj->BaseLevel;
258
		} else {
259
			minLod = tObj->BaseLevel + (GLint)(samp->MinLod);
260
			minLod = MAX2(minLod, tObj->BaseLevel);
261
			minLod = MIN2(minLod, tObj->MaxLevel);
262
			maxLod = tObj->BaseLevel + (GLint)(samp->MaxLod + 0.5);
263
			maxLod = MIN2(maxLod, tObj->MaxLevel);
264
			maxLod = MIN2(maxLod, tObj->Image[0][minLod]->MaxNumLevels - 1 + minLod);
265
			maxLod = MAX2(maxLod, minLod); /* need at least one level */
266
		}
267
		break;
268
	case GL_TEXTURE_RECTANGLE_NV:
269
	case GL_TEXTURE_4D_SGIS:
270
		minLod = maxLod = 0;
271
		break;
272
	default:
273
		return;
274
	}
275
 
276
	radeon_print(RADEON_TEXTURE, RADEON_TRACE,
277
			"%s(%p) target %s, min %d, max %d.\n",
278
			__func__, tObj,
279
			_mesa_lookup_enum_by_nr(tObj->Target),
280
			minLod, maxLod);
281
 
282
	/* save these values */
283
	*pminLod = minLod;
284
	*pmaxLod = maxLod;
285
}
286
 
287
/**
288
 * Checks whether the given miptree can hold the given texture image at the
289
 * given face and level.
290
 */
291
GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
292
				       struct gl_texture_image *texImage)
293
{
294
	radeon_mipmap_level *lvl;
295
	GLuint level = texImage->Level;
296
	if (texImage->TexFormat != mt->mesaFormat)
297
		return GL_FALSE;
298
 
299
	lvl = &mt->levels[level];
300
	if (!lvl->valid ||
301
	    lvl->width != texImage->Width ||
302
	    lvl->height != texImage->Height ||
303
	    lvl->depth != texImage->Depth)
304
		return GL_FALSE;
305
 
306
	return GL_TRUE;
307
}
308
 
309
/**
310
 * Checks whether the given miptree has the right format to store the given texture object.
311
 */
312
static GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
313
{
314
	struct gl_texture_image *firstImage;
315
	unsigned numLevels;
316
	radeon_mipmap_level *mtBaseLevel;
317
 
318
	if (texObj->BaseLevel < mt->baseLevel)
319
		return GL_FALSE;
320
 
321
	mtBaseLevel = &mt->levels[texObj->BaseLevel - mt->baseLevel];
322
	firstImage = texObj->Image[0][texObj->BaseLevel];
323
	numLevels = MIN2(texObj->_MaxLevel - texObj->BaseLevel + 1, firstImage->MaxNumLevels);
324
 
325
	if (radeon_is_debug_enabled(RADEON_TEXTURE,RADEON_TRACE)) {
326
		fprintf(stderr, "Checking if miptree %p matches texObj %p\n", mt, texObj);
327
		fprintf(stderr, "target %d vs %d\n", mt->target, texObj->Target);
328
		fprintf(stderr, "format %d vs %d\n", mt->mesaFormat, firstImage->TexFormat);
329
		fprintf(stderr, "numLevels %d vs %d\n", mt->numLevels, numLevels);
330
		fprintf(stderr, "width0 %d vs %d\n", mtBaseLevel->width, firstImage->Width);
331
		fprintf(stderr, "height0 %d vs %d\n", mtBaseLevel->height, firstImage->Height);
332
		fprintf(stderr, "depth0 %d vs %d\n", mtBaseLevel->depth, firstImage->Depth);
333
		if (mt->target == texObj->Target &&
334
	        mt->mesaFormat == firstImage->TexFormat &&
335
	        mt->numLevels >= numLevels &&
336
	        mtBaseLevel->width == firstImage->Width &&
337
	        mtBaseLevel->height == firstImage->Height &&
338
	        mtBaseLevel->depth == firstImage->Depth) {
339
			fprintf(stderr, "MATCHED\n");
340
		} else {
341
			fprintf(stderr, "NOT MATCHED\n");
342
		}
343
	}
344
 
345
	return (mt->target == texObj->Target &&
346
	        mt->mesaFormat == firstImage->TexFormat &&
347
	        mt->numLevels >= numLevels &&
348
	        mtBaseLevel->width == firstImage->Width &&
349
	        mtBaseLevel->height == firstImage->Height &&
350
	        mtBaseLevel->depth == firstImage->Depth);
351
}
352
 
353
/**
354
 * Try to allocate a mipmap tree for the given texture object.
355
 * @param[in] rmesa radeon context
356
 * @param[in] t radeon texture object
357
 */
358
void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t)
359
{
360
	struct gl_texture_object *texObj = &t->base;
361
	struct gl_texture_image *texImg = texObj->Image[0][texObj->BaseLevel];
362
	GLuint numLevels;
363
	assert(!t->mt);
364
 
365
	if (!texImg) {
366
		radeon_warning("%s(%p) No image in given texture object(%p).\n",
367
				__func__, rmesa, t);
368
		return;
369
	}
370
 
371
 
372
	numLevels = MIN2(texObj->MaxLevel - texObj->BaseLevel + 1, texImg->MaxNumLevels);
373
 
374
	t->mt = radeon_miptree_create(rmesa, t->base.Target,
375
		texImg->TexFormat, texObj->BaseLevel,
376
		numLevels, texImg->Width, texImg->Height,
377
		texImg->Depth, t->tile_bits);
378
}
379
 
380
GLuint
381
radeon_miptree_image_offset(radeon_mipmap_tree *mt,
382
			    GLuint face, GLuint level)
383
{
384
	if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
385
		return (mt->levels[level].faces[face].offset);
386
	else
387
		return mt->levels[level].faces[0].offset;
388
}
389
 
390
/**
391
 * Ensure that the given image is stored in the given miptree from now on.
392
 */
393
static void migrate_image_to_miptree(radeon_mipmap_tree *mt,
394
									 radeon_texture_image *image,
395
									 int face, int level)
396
{
397
	radeon_mipmap_level *dstlvl = &mt->levels[level];
398
	unsigned char *dest;
399
 
400
	assert(image->mt != mt);
401
	assert(dstlvl->valid);
402
	assert(dstlvl->width == image->base.Base.Width);
403
	assert(dstlvl->height == image->base.Base.Height);
404
	assert(dstlvl->depth == image->base.Base.Depth);
405
 
406
	radeon_print(RADEON_TEXTURE, RADEON_VERBOSE,
407
			"%s miptree %p, image %p, face %d, level %d.\n",
408
			__func__, mt, image, face, level);
409
 
410
	radeon_bo_map(mt->bo, GL_TRUE);
411
	dest = mt->bo->ptr + dstlvl->faces[face].offset;
412
 
413
	if (image->mt) {
414
		/* Format etc. should match, so we really just need a memcpy().
415
		 * In fact, that memcpy() could be done by the hardware in many
416
		 * cases, provided that we have a proper memory manager.
417
		 */
418
		assert(mt->mesaFormat == image->base.Base.TexFormat);
419
 
420
		radeon_mipmap_level *srclvl = &image->mt->levels[image->base.Base.Level];
421
 
422
		assert(image->base.Base.Level == level);
423
		assert(srclvl->size == dstlvl->size);
424
		assert(srclvl->rowstride == dstlvl->rowstride);
425
 
426
		radeon_bo_map(image->mt->bo, GL_FALSE);
427
 
428
		memcpy(dest,
429
			image->mt->bo->ptr + srclvl->faces[face].offset,
430
			dstlvl->size);
431
		radeon_bo_unmap(image->mt->bo);
432
 
433
		radeon_miptree_unreference(&image->mt);
434
	}
435
 
436
	radeon_bo_unmap(mt->bo);
437
 
438
	radeon_miptree_reference(mt, &image->mt);
439
}
440
 
441
/**
442
 * Filter matching miptrees, and select one with the most of data.
443
 * @param[in] texObj radeon texture object
444
 * @param[in] firstLevel first texture level to check
445
 * @param[in] lastLevel last texture level to check
446
 */
447
static radeon_mipmap_tree * get_biggest_matching_miptree(radeonTexObj *texObj,
448
														 unsigned firstLevel,
449
														 unsigned lastLevel)
450
{
451
	const unsigned numLevels = lastLevel - firstLevel + 1;
452
	unsigned *mtSizes = calloc(numLevels, sizeof(unsigned));
453
	radeon_mipmap_tree **mts = calloc(numLevels, sizeof(radeon_mipmap_tree *));
454
	unsigned mtCount = 0;
455
	unsigned maxMtIndex = 0;
456
	radeon_mipmap_tree *tmp;
457
	unsigned int level;
458
	int i;
459
 
460
	for (level = firstLevel; level <= lastLevel; ++level) {
461
		radeon_texture_image *img = get_radeon_texture_image(texObj->base.Image[0][level]);
462
		unsigned found = 0;
463
		// TODO: why this hack??
464
		if (!img)
465
			break;
466
 
467
		if (!img->mt)
468
			continue;
469
 
470
		for (i = 0; i < mtCount; ++i) {
471
			if (mts[i] == img->mt) {
472
				found = 1;
473
				mtSizes[i] += img->mt->levels[img->base.Base.Level].size;
474
				break;
475
			}
476
		}
477
 
478
		if (!found && radeon_miptree_matches_texture(img->mt, &texObj->base)) {
479
			mtSizes[mtCount] = img->mt->levels[img->base.Base.Level].size;
480
			mts[mtCount] = img->mt;
481
			mtCount++;
482
		}
483
	}
484
 
485
	if (mtCount == 0) {
486
		free(mtSizes);
487
		free(mts);
488
		return NULL;
489
	}
490
 
491
	for (i = 1; i < mtCount; ++i) {
492
		if (mtSizes[i] > mtSizes[maxMtIndex]) {
493
			maxMtIndex = i;
494
		}
495
	}
496
 
497
	tmp = mts[maxMtIndex];
498
	free(mtSizes);
499
	free(mts);
500
 
501
	return tmp;
502
}
503
 
504
/**
505
 * Validate texture mipmap tree.
506
 * If individual images are stored in different mipmap trees
507
 * use the mipmap tree that has the most of the correct data.
508
 */
509
int radeon_validate_texture_miptree(struct gl_context * ctx,
510
				    struct gl_sampler_object *samp,
511
				    struct gl_texture_object *texObj)
512
{
513
	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
514
	radeonTexObj *t = radeon_tex_obj(texObj);
515
	radeon_mipmap_tree *dst_miptree;
516
 
517
	if (samp == &texObj->Sampler && (t->validated || t->image_override)) {
518
		return GL_TRUE;
519
	}
520
 
521
	calculate_min_max_lod(samp, &t->base, &t->minLod, &t->maxLod);
522
 
523
	radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
524
			"%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
525
			__func__, texObj ,t->minLod, t->maxLod);
526
 
527
	dst_miptree = get_biggest_matching_miptree(t, t->base.BaseLevel, t->base._MaxLevel);
528
 
529
	radeon_miptree_unreference(&t->mt);
530
	if (!dst_miptree) {
531
		radeon_try_alloc_miptree(rmesa, t);
532
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
533
			"%s: No matching miptree found, allocated new one %p\n",
534
			__func__, t->mt);
535
 
536
	} else {
537
		radeon_miptree_reference(dst_miptree, &t->mt);
538
		radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
539
			"%s: Using miptree %p\n", __func__, t->mt);
540
	}
541
 
542
	const unsigned faces = _mesa_num_tex_faces(texObj->Target);
543
	unsigned face, level;
544
	radeon_texture_image *img;
545
	/* Validate only the levels that will actually be used during rendering */
546
	for (face = 0; face < faces; ++face) {
547
		for (level = t->minLod; level <= t->maxLod; ++level) {
548
			img = get_radeon_texture_image(texObj->Image[face][level]);
549
 
550
			radeon_print(RADEON_TEXTURE, RADEON_TRACE,
551
				"Checking image level %d, face %d, mt %p ... ",
552
				level, face, img->mt);
553
 
554
			if (img->mt != t->mt && !img->used_as_render_target) {
555
				radeon_print(RADEON_TEXTURE, RADEON_TRACE,
556
					"MIGRATING\n");
557
 
558
				struct radeon_bo *src_bo = (img->mt) ? img->mt->bo : img->bo;
559
				if (src_bo && radeon_bo_is_referenced_by_cs(src_bo, rmesa->cmdbuf.cs)) {
560
					radeon_firevertices(rmesa);
561
				}
562
				migrate_image_to_miptree(t->mt, img, face, level);
563
			} else
564
				radeon_print(RADEON_TEXTURE, RADEON_TRACE, "OK\n");
565
		}
566
	}
567
 
568
	t->validated = GL_TRUE;
569
 
570
	return GL_TRUE;
571
}
572
 
573
uint32_t get_base_teximage_offset(radeonTexObj *texObj)
574
{
575
	if (!texObj->mt) {
576
		return 0;
577
	} else {
578
		return radeon_miptree_image_offset(texObj->mt, 0, texObj->minLod);
579
	}
580
}