Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5563 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
19 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
21 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
22 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
23 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
24 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include |
||
29 | #include |
||
30 | |||
31 | #include "intel_batchbuffer.h" |
||
32 | #include "intel_chipset.h" |
||
33 | #include "intel_context.h" |
||
34 | #include "intel_mipmap_tree.h" |
||
35 | #include "intel_regions.h" |
||
36 | #include "intel_tex_layout.h" |
||
37 | #include "intel_tex.h" |
||
38 | #include "intel_blit.h" |
||
39 | |||
40 | #include "main/enums.h" |
||
41 | #include "main/formats.h" |
||
42 | #include "main/glformats.h" |
||
43 | #include "main/teximage.h" |
||
44 | |||
45 | #define FILE_DEBUG_FLAG DEBUG_MIPTREE |
||
46 | |||
47 | static GLenum |
||
48 | target_to_target(GLenum target) |
||
49 | { |
||
50 | switch (target) { |
||
51 | case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB: |
||
52 | case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB: |
||
53 | case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB: |
||
54 | case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB: |
||
55 | case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB: |
||
56 | case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB: |
||
57 | return GL_TEXTURE_CUBE_MAP_ARB; |
||
58 | default: |
||
59 | return target; |
||
60 | } |
||
61 | } |
||
62 | |||
63 | /** |
||
64 | * @param for_bo Indicates that the caller is |
||
65 | * intel_miptree_create_for_bo(). If true, then do not create |
||
66 | * \c stencil_mt. |
||
67 | */ |
||
68 | struct intel_mipmap_tree * |
||
69 | intel_miptree_create_layout(struct intel_context *intel, |
||
70 | GLenum target, |
||
71 | gl_format format, |
||
72 | GLuint first_level, |
||
73 | GLuint last_level, |
||
74 | GLuint width0, |
||
75 | GLuint height0, |
||
76 | GLuint depth0, |
||
77 | bool for_bo) |
||
78 | { |
||
79 | struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); |
||
80 | if (!mt) |
||
81 | return NULL; |
||
82 | |||
83 | DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__, |
||
84 | _mesa_lookup_enum_by_nr(target), |
||
85 | _mesa_get_format_name(format), |
||
86 | first_level, last_level, mt); |
||
87 | |||
88 | mt->target = target_to_target(target); |
||
89 | mt->format = format; |
||
90 | mt->first_level = first_level; |
||
91 | mt->last_level = last_level; |
||
92 | mt->logical_width0 = width0; |
||
93 | mt->logical_height0 = height0; |
||
94 | mt->logical_depth0 = depth0; |
||
95 | |||
96 | /* The cpp is bytes per (1, blockheight)-sized block for compressed |
||
97 | * textures. This is why you'll see divides by blockheight all over |
||
98 | */ |
||
99 | unsigned bw, bh; |
||
100 | _mesa_get_format_block_size(format, &bw, &bh); |
||
101 | assert(_mesa_get_format_bytes(mt->format) % bw == 0); |
||
102 | mt->cpp = _mesa_get_format_bytes(mt->format) / bw; |
||
103 | |||
104 | mt->compressed = _mesa_is_format_compressed(format); |
||
105 | mt->refcount = 1; |
||
106 | |||
107 | if (target == GL_TEXTURE_CUBE_MAP) { |
||
108 | assert(depth0 == 1); |
||
109 | depth0 = 6; |
||
110 | } |
||
111 | |||
112 | mt->physical_width0 = width0; |
||
113 | mt->physical_height0 = height0; |
||
114 | mt->physical_depth0 = depth0; |
||
115 | |||
116 | intel_get_texture_alignment_unit(intel, mt->format, |
||
117 | &mt->align_w, &mt->align_h); |
||
118 | |||
119 | (void) intel; |
||
120 | if (intel->is_945) |
||
121 | i945_miptree_layout(mt); |
||
122 | else |
||
123 | i915_miptree_layout(mt); |
||
124 | |||
125 | return mt; |
||
126 | } |
||
127 | |||
128 | /** |
||
129 | * \brief Helper function for intel_miptree_create(). |
||
130 | */ |
||
131 | static uint32_t |
||
132 | intel_miptree_choose_tiling(struct intel_context *intel, |
||
133 | gl_format format, |
||
134 | uint32_t width0, |
||
135 | enum intel_miptree_tiling_mode requested, |
||
136 | struct intel_mipmap_tree *mt) |
||
137 | { |
||
138 | /* Some usages may want only one type of tiling, like depth miptrees (Y |
||
139 | * tiled), or temporary BOs for uploading data once (linear). |
||
140 | */ |
||
141 | switch (requested) { |
||
142 | case INTEL_MIPTREE_TILING_ANY: |
||
143 | break; |
||
144 | case INTEL_MIPTREE_TILING_Y: |
||
145 | return I915_TILING_Y; |
||
146 | case INTEL_MIPTREE_TILING_NONE: |
||
147 | return I915_TILING_NONE; |
||
148 | } |
||
149 | |||
150 | int minimum_pitch = mt->total_width * mt->cpp; |
||
151 | |||
152 | /* If the width is much smaller than a tile, don't bother tiling. */ |
||
153 | if (minimum_pitch < 64) |
||
154 | return I915_TILING_NONE; |
||
155 | |||
156 | if (ALIGN(minimum_pitch, 512) >= 32768) { |
||
157 | perf_debug("%dx%d miptree too large to blit, falling back to untiled", |
||
158 | mt->total_width, mt->total_height); |
||
159 | return I915_TILING_NONE; |
||
160 | } |
||
161 | |||
162 | /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */ |
||
163 | return I915_TILING_X; |
||
164 | } |
||
165 | |||
166 | struct intel_mipmap_tree * |
||
167 | intel_miptree_create(struct intel_context *intel, |
||
168 | GLenum target, |
||
169 | gl_format format, |
||
170 | GLuint first_level, |
||
171 | GLuint last_level, |
||
172 | GLuint width0, |
||
173 | GLuint height0, |
||
174 | GLuint depth0, |
||
175 | bool expect_accelerated_upload, |
||
176 | enum intel_miptree_tiling_mode requested_tiling) |
||
177 | { |
||
178 | struct intel_mipmap_tree *mt; |
||
179 | GLuint total_width, total_height; |
||
180 | |||
181 | |||
182 | mt = intel_miptree_create_layout(intel, target, format, |
||
183 | first_level, last_level, width0, |
||
184 | height0, depth0, |
||
185 | false); |
||
186 | /* |
||
187 | * pitch == 0 || height == 0 indicates the null texture |
||
188 | */ |
||
189 | if (!mt || !mt->total_width || !mt->total_height) { |
||
190 | intel_miptree_release(&mt); |
||
191 | return NULL; |
||
192 | } |
||
193 | |||
194 | total_width = mt->total_width; |
||
195 | total_height = mt->total_height; |
||
196 | |||
197 | uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0, |
||
198 | requested_tiling, |
||
199 | mt); |
||
200 | bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X); |
||
201 | |||
202 | mt->region = intel_region_alloc(intel->intelScreen, |
||
203 | y_or_x ? I915_TILING_Y : tiling, |
||
204 | mt->cpp, |
||
205 | total_width, |
||
206 | total_height, |
||
207 | expect_accelerated_upload); |
||
208 | |||
209 | /* If the region is too large to fit in the aperture, we need to use the |
||
210 | * BLT engine to support it. The BLT paths can't currently handle Y-tiling, |
||
211 | * so we need to fall back to X. |
||
212 | */ |
||
213 | if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) { |
||
214 | perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n", |
||
215 | mt->total_width, mt->total_height); |
||
216 | intel_region_release(&mt->region); |
||
217 | |||
218 | mt->region = intel_region_alloc(intel->intelScreen, |
||
219 | I915_TILING_X, |
||
220 | mt->cpp, |
||
221 | total_width, |
||
222 | total_height, |
||
223 | expect_accelerated_upload); |
||
224 | } |
||
225 | |||
226 | mt->offset = 0; |
||
227 | |||
228 | if (!mt->region) { |
||
229 | intel_miptree_release(&mt); |
||
230 | return NULL; |
||
231 | } |
||
232 | |||
233 | return mt; |
||
234 | } |
||
235 | |||
236 | struct intel_mipmap_tree * |
||
237 | intel_miptree_create_for_bo(struct intel_context *intel, |
||
238 | drm_intel_bo *bo, |
||
239 | gl_format format, |
||
240 | uint32_t offset, |
||
241 | uint32_t width, |
||
242 | uint32_t height, |
||
243 | int pitch, |
||
244 | uint32_t tiling) |
||
245 | { |
||
246 | struct intel_mipmap_tree *mt; |
||
247 | |||
248 | struct intel_region *region = calloc(1, sizeof(*region)); |
||
249 | if (!region) |
||
250 | return NULL; |
||
251 | |||
252 | /* Nothing will be able to use this miptree with the BO if the offset isn't |
||
253 | * aligned. |
||
254 | */ |
||
255 | if (tiling != I915_TILING_NONE) |
||
256 | assert(offset % 4096 == 0); |
||
257 | |||
258 | /* miptrees can't handle negative pitch. If you need flipping of images, |
||
259 | * that's outside of the scope of the mt. |
||
260 | */ |
||
261 | assert(pitch >= 0); |
||
262 | |||
263 | mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format, |
||
264 | 0, 0, |
||
265 | width, height, 1, |
||
266 | true); |
||
267 | if (!mt) |
||
268 | return mt; |
||
269 | |||
270 | region->cpp = mt->cpp; |
||
271 | region->width = width; |
||
272 | region->height = height; |
||
273 | region->pitch = pitch; |
||
274 | region->refcount = 1; |
||
275 | drm_intel_bo_reference(bo); |
||
276 | region->bo = bo; |
||
277 | region->tiling = tiling; |
||
278 | |||
279 | mt->region = region; |
||
280 | mt->offset = offset; |
||
281 | |||
282 | return mt; |
||
283 | } |
||
284 | |||
285 | |||
286 | /** |
||
287 | * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree. |
||
288 | * |
||
289 | * For a multisample DRI2 buffer, this wraps the given region with |
||
290 | * a singlesample miptree, then creates a multisample miptree into which the |
||
291 | * singlesample miptree is embedded as a child. |
||
292 | */ |
||
293 | struct intel_mipmap_tree* |
||
294 | intel_miptree_create_for_dri2_buffer(struct intel_context *intel, |
||
295 | unsigned dri_attachment, |
||
296 | gl_format format, |
||
297 | struct intel_region *region) |
||
298 | { |
||
299 | struct intel_mipmap_tree *mt = NULL; |
||
300 | |||
301 | /* Only the front and back buffers, which are color buffers, are shared |
||
302 | * through DRI2. |
||
303 | */ |
||
304 | assert(dri_attachment == __DRI_BUFFER_BACK_LEFT || |
||
305 | dri_attachment == __DRI_BUFFER_FRONT_LEFT || |
||
306 | dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT); |
||
307 | assert(_mesa_get_format_base_format(format) == GL_RGB || |
||
308 | _mesa_get_format_base_format(format) == GL_RGBA); |
||
309 | |||
310 | mt = intel_miptree_create_for_bo(intel, |
||
311 | region->bo, |
||
312 | format, |
||
313 | 0, |
||
314 | region->width, |
||
315 | region->height, |
||
316 | region->pitch, |
||
317 | region->tiling); |
||
318 | if (!mt) |
||
319 | return NULL; |
||
320 | mt->region->name = region->name; |
||
321 | |||
322 | return mt; |
||
323 | } |
||
324 | |||
325 | struct intel_mipmap_tree* |
||
326 | intel_miptree_create_for_renderbuffer(struct intel_context *intel, |
||
327 | gl_format format, |
||
328 | uint32_t width, |
||
329 | uint32_t height) |
||
330 | { |
||
331 | uint32_t depth = 1; |
||
332 | |||
333 | return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0, |
||
334 | width, height, depth, true, |
||
335 | INTEL_MIPTREE_TILING_ANY); |
||
336 | } |
||
337 | |||
338 | void |
||
339 | intel_miptree_reference(struct intel_mipmap_tree **dst, |
||
340 | struct intel_mipmap_tree *src) |
||
341 | { |
||
342 | if (*dst == src) |
||
343 | return; |
||
344 | |||
345 | intel_miptree_release(dst); |
||
346 | |||
347 | if (src) { |
||
348 | src->refcount++; |
||
349 | DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount); |
||
350 | } |
||
351 | |||
352 | *dst = src; |
||
353 | } |
||
354 | |||
355 | |||
356 | void |
||
357 | intel_miptree_release(struct intel_mipmap_tree **mt) |
||
358 | { |
||
359 | if (!*mt) |
||
360 | return; |
||
361 | |||
362 | DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1); |
||
363 | if (--(*mt)->refcount <= 0) { |
||
364 | GLuint i; |
||
365 | |||
366 | DBG("%s deleting %p\n", __FUNCTION__, *mt); |
||
367 | |||
368 | intel_region_release(&((*mt)->region)); |
||
369 | |||
370 | for (i = 0; i < MAX_TEXTURE_LEVELS; i++) { |
||
371 | free((*mt)->level[i].slice); |
||
372 | } |
||
373 | |||
374 | free(*mt); |
||
375 | } |
||
376 | *mt = NULL; |
||
377 | } |
||
378 | |||
379 | void |
||
380 | intel_miptree_get_dimensions_for_image(struct gl_texture_image *image, |
||
381 | int *width, int *height, int *depth) |
||
382 | { |
||
383 | switch (image->TexObject->Target) { |
||
384 | case GL_TEXTURE_1D_ARRAY: |
||
385 | *width = image->Width; |
||
386 | *height = 1; |
||
387 | *depth = image->Height; |
||
388 | break; |
||
389 | default: |
||
390 | *width = image->Width; |
||
391 | *height = image->Height; |
||
392 | *depth = image->Depth; |
||
393 | break; |
||
394 | } |
||
395 | } |
||
396 | |||
397 | /** |
||
398 | * Can the image be pulled into a unified mipmap tree? This mirrors |
||
399 | * the completeness test in a lot of ways. |
||
400 | * |
||
401 | * Not sure whether I want to pass gl_texture_image here. |
||
402 | */ |
||
403 | bool |
||
404 | intel_miptree_match_image(struct intel_mipmap_tree *mt, |
||
405 | struct gl_texture_image *image) |
||
406 | { |
||
407 | struct intel_texture_image *intelImage = intel_texture_image(image); |
||
408 | GLuint level = intelImage->base.Base.Level; |
||
409 | int width, height, depth; |
||
410 | |||
411 | /* glTexImage* choose the texture object based on the target passed in, and |
||
412 | * objects can't change targets over their lifetimes, so this should be |
||
413 | * true. |
||
414 | */ |
||
415 | assert(target_to_target(image->TexObject->Target) == mt->target); |
||
416 | |||
417 | gl_format mt_format = mt->format; |
||
418 | |||
419 | if (image->TexFormat != mt_format) |
||
420 | return false; |
||
421 | |||
422 | intel_miptree_get_dimensions_for_image(image, &width, &height, &depth); |
||
423 | |||
424 | if (mt->target == GL_TEXTURE_CUBE_MAP) |
||
425 | depth = 6; |
||
426 | |||
427 | /* Test image dimensions against the base level image adjusted for |
||
428 | * minification. This will also catch images not present in the |
||
429 | * tree, changed targets, etc. |
||
430 | */ |
||
431 | if (mt->target == GL_TEXTURE_2D_MULTISAMPLE || |
||
432 | mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) { |
||
433 | /* nonzero level here is always bogus */ |
||
434 | assert(level == 0); |
||
435 | |||
436 | if (width != mt->logical_width0 || |
||
437 | height != mt->logical_height0 || |
||
438 | depth != mt->logical_depth0) { |
||
439 | return false; |
||
440 | } |
||
441 | } |
||
442 | else { |
||
443 | /* all normal textures, renderbuffers, etc */ |
||
444 | if (width != mt->level[level].width || |
||
445 | height != mt->level[level].height || |
||
446 | depth != mt->level[level].depth) { |
||
447 | return false; |
||
448 | } |
||
449 | } |
||
450 | |||
451 | return true; |
||
452 | } |
||
453 | |||
454 | |||
455 | void |
||
456 | intel_miptree_set_level_info(struct intel_mipmap_tree *mt, |
||
457 | GLuint level, |
||
458 | GLuint x, GLuint y, |
||
459 | GLuint w, GLuint h, GLuint d) |
||
460 | { |
||
461 | mt->level[level].width = w; |
||
462 | mt->level[level].height = h; |
||
463 | mt->level[level].depth = d; |
||
464 | mt->level[level].level_x = x; |
||
465 | mt->level[level].level_y = y; |
||
466 | |||
467 | DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__, |
||
468 | level, w, h, d, x, y); |
||
469 | |||
470 | assert(mt->level[level].slice == NULL); |
||
471 | |||
472 | mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice)); |
||
473 | mt->level[level].slice[0].x_offset = mt->level[level].level_x; |
||
474 | mt->level[level].slice[0].y_offset = mt->level[level].level_y; |
||
475 | } |
||
476 | |||
477 | |||
478 | void |
||
479 | intel_miptree_set_image_offset(struct intel_mipmap_tree *mt, |
||
480 | GLuint level, GLuint img, |
||
481 | GLuint x, GLuint y) |
||
482 | { |
||
483 | if (img == 0 && level == 0) |
||
484 | assert(x == 0 && y == 0); |
||
485 | |||
486 | assert(img < mt->level[level].depth); |
||
487 | |||
488 | mt->level[level].slice[img].x_offset = mt->level[level].level_x + x; |
||
489 | mt->level[level].slice[img].y_offset = mt->level[level].level_y + y; |
||
490 | |||
491 | DBG("%s level %d img %d pos %d,%d\n", |
||
492 | __FUNCTION__, level, img, |
||
493 | mt->level[level].slice[img].x_offset, |
||
494 | mt->level[level].slice[img].y_offset); |
||
495 | } |
||
496 | |||
497 | void |
||
498 | intel_miptree_get_image_offset(struct intel_mipmap_tree *mt, |
||
499 | GLuint level, GLuint slice, |
||
500 | GLuint *x, GLuint *y) |
||
501 | { |
||
502 | assert(slice < mt->level[level].depth); |
||
503 | |||
504 | *x = mt->level[level].slice[slice].x_offset; |
||
505 | *y = mt->level[level].slice[slice].y_offset; |
||
506 | } |
||
507 | |||
508 | /** |
||
509 | * Rendering with tiled buffers requires that the base address of the buffer |
||
510 | * be aligned to a page boundary. For renderbuffers, and sometimes with |
||
511 | * textures, we may want the surface to point at a texture image level that |
||
512 | * isn't at a page boundary. |
||
513 | * |
||
514 | * This function returns an appropriately-aligned base offset |
||
515 | * according to the tiling restrictions, plus any required x/y offset |
||
516 | * from there. |
||
517 | */ |
||
518 | uint32_t |
||
519 | intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt, |
||
520 | GLuint level, GLuint slice, |
||
521 | uint32_t *tile_x, |
||
522 | uint32_t *tile_y) |
||
523 | { |
||
524 | struct intel_region *region = mt->region; |
||
525 | uint32_t x, y; |
||
526 | uint32_t mask_x, mask_y; |
||
527 | |||
528 | intel_region_get_tile_masks(region, &mask_x, &mask_y, false); |
||
529 | intel_miptree_get_image_offset(mt, level, slice, &x, &y); |
||
530 | |||
531 | *tile_x = x & mask_x; |
||
532 | *tile_y = y & mask_y; |
||
533 | |||
534 | return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y, |
||
535 | false); |
||
536 | } |
||
537 | |||
538 | static void |
||
539 | intel_miptree_copy_slice_sw(struct intel_context *intel, |
||
540 | struct intel_mipmap_tree *dst_mt, |
||
541 | struct intel_mipmap_tree *src_mt, |
||
542 | int level, |
||
543 | int slice, |
||
544 | int width, |
||
545 | int height) |
||
546 | { |
||
547 | void *src, *dst; |
||
548 | int src_stride, dst_stride; |
||
549 | int cpp = dst_mt->cpp; |
||
550 | |||
551 | intel_miptree_map(intel, src_mt, |
||
552 | level, slice, |
||
553 | 0, 0, |
||
554 | width, height, |
||
555 | GL_MAP_READ_BIT, |
||
556 | &src, &src_stride); |
||
557 | |||
558 | intel_miptree_map(intel, dst_mt, |
||
559 | level, slice, |
||
560 | 0, 0, |
||
561 | width, height, |
||
562 | GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT, |
||
563 | &dst, &dst_stride); |
||
564 | |||
565 | DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n", |
||
566 | _mesa_get_format_name(src_mt->format), |
||
567 | src_mt, src, src_stride, |
||
568 | _mesa_get_format_name(dst_mt->format), |
||
569 | dst_mt, dst, dst_stride, |
||
570 | width, height); |
||
571 | |||
572 | int row_size = cpp * width; |
||
573 | if (src_stride == row_size && |
||
574 | dst_stride == row_size) { |
||
575 | memcpy(dst, src, row_size * height); |
||
576 | } else { |
||
577 | for (int i = 0; i < height; i++) { |
||
578 | memcpy(dst, src, row_size); |
||
579 | dst += dst_stride; |
||
580 | src += src_stride; |
||
581 | } |
||
582 | } |
||
583 | |||
584 | intel_miptree_unmap(intel, dst_mt, level, slice); |
||
585 | intel_miptree_unmap(intel, src_mt, level, slice); |
||
586 | } |
||
587 | |||
588 | static void |
||
589 | intel_miptree_copy_slice(struct intel_context *intel, |
||
590 | struct intel_mipmap_tree *dst_mt, |
||
591 | struct intel_mipmap_tree *src_mt, |
||
592 | int level, |
||
593 | int face, |
||
594 | int depth) |
||
595 | |||
596 | { |
||
597 | gl_format format = src_mt->format; |
||
598 | uint32_t width = src_mt->level[level].width; |
||
599 | uint32_t height = src_mt->level[level].height; |
||
600 | int slice; |
||
601 | |||
602 | if (face > 0) |
||
603 | slice = face; |
||
604 | else |
||
605 | slice = depth; |
||
606 | |||
607 | assert(depth < src_mt->level[level].depth); |
||
608 | assert(src_mt->format == dst_mt->format); |
||
609 | |||
610 | if (dst_mt->compressed) { |
||
611 | height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h; |
||
612 | width = ALIGN(width, dst_mt->align_w); |
||
613 | } |
||
614 | |||
615 | uint32_t dst_x, dst_y, src_x, src_y; |
||
616 | intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y); |
||
617 | intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y); |
||
618 | |||
619 | DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n", |
||
620 | _mesa_get_format_name(src_mt->format), |
||
621 | src_mt, src_x, src_y, src_mt->region->pitch, |
||
622 | _mesa_get_format_name(dst_mt->format), |
||
623 | dst_mt, dst_x, dst_y, dst_mt->region->pitch, |
||
624 | width, height); |
||
625 | |||
626 | if (!intel_miptree_blit(intel, |
||
627 | src_mt, level, slice, 0, 0, false, |
||
628 | dst_mt, level, slice, 0, 0, false, |
||
629 | width, height, GL_COPY)) { |
||
630 | perf_debug("miptree validate blit for %s failed\n", |
||
631 | _mesa_get_format_name(format)); |
||
632 | |||
633 | intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice, |
||
634 | width, height); |
||
635 | } |
||
636 | } |
||
637 | |||
638 | /** |
||
639 | * Copies the image's current data to the given miptree, and associates that |
||
640 | * miptree with the image. |
||
641 | * |
||
642 | * If \c invalidate is true, then the actual image data does not need to be |
||
643 | * copied, but the image still needs to be associated to the new miptree (this |
||
644 | * is set to true if we're about to clear the image). |
||
645 | */ |
||
646 | void |
||
647 | intel_miptree_copy_teximage(struct intel_context *intel, |
||
648 | struct intel_texture_image *intelImage, |
||
649 | struct intel_mipmap_tree *dst_mt, |
||
650 | bool invalidate) |
||
651 | { |
||
652 | struct intel_mipmap_tree *src_mt = intelImage->mt; |
||
653 | struct intel_texture_object *intel_obj = |
||
654 | intel_texture_object(intelImage->base.Base.TexObject); |
||
655 | int level = intelImage->base.Base.Level; |
||
656 | int face = intelImage->base.Base.Face; |
||
657 | GLuint depth = intelImage->base.Base.Depth; |
||
658 | |||
659 | if (!invalidate) { |
||
660 | for (int slice = 0; slice < depth; slice++) { |
||
661 | intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice); |
||
662 | } |
||
663 | } |
||
664 | |||
665 | intel_miptree_reference(&intelImage->mt, dst_mt); |
||
666 | intel_obj->needs_validate = true; |
||
667 | } |
||
668 | |||
669 | void * |
||
670 | intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt) |
||
671 | { |
||
672 | drm_intel_bo *bo = mt->region->bo; |
||
673 | |||
674 | if (unlikely(INTEL_DEBUG & DEBUG_PERF)) { |
||
675 | if (drm_intel_bo_busy(bo)) { |
||
676 | perf_debug("Mapping a busy BO, causing a stall on the GPU.\n"); |
||
677 | } |
||
678 | } |
||
679 | |||
680 | intel_flush(&intel->ctx); |
||
681 | |||
682 | if (mt->region->tiling != I915_TILING_NONE) |
||
683 | drm_intel_gem_bo_map_gtt(bo); |
||
684 | else |
||
685 | drm_intel_bo_map(bo, true); |
||
686 | |||
687 | return bo->virtual; |
||
688 | } |
||
689 | |||
690 | void |
||
691 | intel_miptree_unmap_raw(struct intel_context *intel, |
||
692 | struct intel_mipmap_tree *mt) |
||
693 | { |
||
694 | drm_intel_bo_unmap(mt->region->bo); |
||
695 | } |
||
696 | |||
697 | static void |
||
698 | intel_miptree_map_gtt(struct intel_context *intel, |
||
699 | struct intel_mipmap_tree *mt, |
||
700 | struct intel_miptree_map *map, |
||
701 | unsigned int level, unsigned int slice) |
||
702 | { |
||
703 | unsigned int bw, bh; |
||
704 | void *base; |
||
705 | unsigned int image_x, image_y; |
||
706 | int x = map->x; |
||
707 | int y = map->y; |
||
708 | |||
709 | /* For compressed formats, the stride is the number of bytes per |
||
710 | * row of blocks. intel_miptree_get_image_offset() already does |
||
711 | * the divide. |
||
712 | */ |
||
713 | _mesa_get_format_block_size(mt->format, &bw, &bh); |
||
714 | assert(y % bh == 0); |
||
715 | y /= bh; |
||
716 | |||
717 | base = intel_miptree_map_raw(intel, mt) + mt->offset; |
||
718 | |||
719 | if (base == NULL) |
||
720 | map->ptr = NULL; |
||
721 | else { |
||
722 | /* Note that in the case of cube maps, the caller must have passed the |
||
723 | * slice number referencing the face. |
||
724 | */ |
||
725 | intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); |
||
726 | x += image_x; |
||
727 | y += image_y; |
||
728 | |||
729 | map->stride = mt->region->pitch; |
||
730 | map->ptr = base + y * map->stride + x * mt->cpp; |
||
731 | } |
||
732 | |||
733 | DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, |
||
734 | map->x, map->y, map->w, map->h, |
||
735 | mt, _mesa_get_format_name(mt->format), |
||
736 | x, y, map->ptr, map->stride); |
||
737 | } |
||
738 | |||
739 | static void |
||
740 | intel_miptree_unmap_gtt(struct intel_context *intel, |
||
741 | struct intel_mipmap_tree *mt, |
||
742 | struct intel_miptree_map *map, |
||
743 | unsigned int level, |
||
744 | unsigned int slice) |
||
745 | { |
||
746 | intel_miptree_unmap_raw(intel, mt); |
||
747 | } |
||
748 | |||
749 | static void |
||
750 | intel_miptree_map_blit(struct intel_context *intel, |
||
751 | struct intel_mipmap_tree *mt, |
||
752 | struct intel_miptree_map *map, |
||
753 | unsigned int level, unsigned int slice) |
||
754 | { |
||
755 | map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format, |
||
756 | 0, 0, |
||
757 | map->w, map->h, 1, |
||
758 | false, |
||
759 | INTEL_MIPTREE_TILING_NONE); |
||
760 | if (!map->mt) { |
||
761 | fprintf(stderr, "Failed to allocate blit temporary\n"); |
||
762 | goto fail; |
||
763 | } |
||
764 | map->stride = map->mt->region->pitch; |
||
765 | |||
766 | if (!intel_miptree_blit(intel, |
||
767 | mt, level, slice, |
||
768 | map->x, map->y, false, |
||
769 | map->mt, 0, 0, |
||
770 | 0, 0, false, |
||
771 | map->w, map->h, GL_COPY)) { |
||
772 | fprintf(stderr, "Failed to blit\n"); |
||
773 | goto fail; |
||
774 | } |
||
775 | |||
776 | intel_batchbuffer_flush(intel); |
||
777 | map->ptr = intel_miptree_map_raw(intel, map->mt); |
||
778 | |||
779 | DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __FUNCTION__, |
||
780 | map->x, map->y, map->w, map->h, |
||
781 | mt, _mesa_get_format_name(mt->format), |
||
782 | level, slice, map->ptr, map->stride); |
||
783 | |||
784 | return; |
||
785 | |||
786 | fail: |
||
787 | intel_miptree_release(&map->mt); |
||
788 | map->ptr = NULL; |
||
789 | map->stride = 0; |
||
790 | } |
||
791 | |||
792 | static void |
||
793 | intel_miptree_unmap_blit(struct intel_context *intel, |
||
794 | struct intel_mipmap_tree *mt, |
||
795 | struct intel_miptree_map *map, |
||
796 | unsigned int level, |
||
797 | unsigned int slice) |
||
798 | { |
||
799 | struct gl_context *ctx = &intel->ctx; |
||
800 | |||
801 | intel_miptree_unmap_raw(intel, map->mt); |
||
802 | |||
803 | if (map->mode & GL_MAP_WRITE_BIT) { |
||
804 | bool ok = intel_miptree_blit(intel, |
||
805 | map->mt, 0, 0, |
||
806 | 0, 0, false, |
||
807 | mt, level, slice, |
||
808 | map->x, map->y, false, |
||
809 | map->w, map->h, GL_COPY); |
||
810 | WARN_ONCE(!ok, "Failed to blit from linear temporary mapping"); |
||
811 | } |
||
812 | |||
813 | intel_miptree_release(&map->mt); |
||
814 | } |
||
815 | |||
816 | /** |
||
817 | * Create and attach a map to the miptree at (level, slice). Return the |
||
818 | * attached map. |
||
819 | */ |
||
820 | static struct intel_miptree_map* |
||
821 | intel_miptree_attach_map(struct intel_mipmap_tree *mt, |
||
822 | unsigned int level, |
||
823 | unsigned int slice, |
||
824 | unsigned int x, |
||
825 | unsigned int y, |
||
826 | unsigned int w, |
||
827 | unsigned int h, |
||
828 | GLbitfield mode) |
||
829 | { |
||
830 | struct intel_miptree_map *map = calloc(1, sizeof(*map)); |
||
831 | |||
832 | if (!map) |
||
833 | return NULL; |
||
834 | |||
835 | assert(mt->level[level].slice[slice].map == NULL); |
||
836 | mt->level[level].slice[slice].map = map; |
||
837 | |||
838 | map->mode = mode; |
||
839 | map->x = x; |
||
840 | map->y = y; |
||
841 | map->w = w; |
||
842 | map->h = h; |
||
843 | |||
844 | return map; |
||
845 | } |
||
846 | |||
847 | /** |
||
848 | * Release the map at (level, slice). |
||
849 | */ |
||
850 | static void |
||
851 | intel_miptree_release_map(struct intel_mipmap_tree *mt, |
||
852 | unsigned int level, |
||
853 | unsigned int slice) |
||
854 | { |
||
855 | struct intel_miptree_map **map; |
||
856 | |||
857 | map = &mt->level[level].slice[slice].map; |
||
858 | free(*map); |
||
859 | *map = NULL; |
||
860 | } |
||
861 | |||
862 | void |
||
863 | intel_miptree_map(struct intel_context *intel, |
||
864 | struct intel_mipmap_tree *mt, |
||
865 | unsigned int level, |
||
866 | unsigned int slice, |
||
867 | unsigned int x, |
||
868 | unsigned int y, |
||
869 | unsigned int w, |
||
870 | unsigned int h, |
||
871 | GLbitfield mode, |
||
872 | void **out_ptr, |
||
873 | int *out_stride) |
||
874 | { |
||
875 | struct intel_miptree_map *map; |
||
876 | |||
877 | map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode); |
||
878 | if (!map){ |
||
879 | *out_ptr = NULL; |
||
880 | *out_stride = 0; |
||
881 | return; |
||
882 | } |
||
883 | |||
884 | /* See intel_miptree_blit() for details on the 32k pitch limit. */ |
||
885 | if (mt->region->tiling != I915_TILING_NONE && |
||
886 | mt->region->bo->size >= intel->max_gtt_map_object_size) { |
||
887 | assert(mt->region->pitch < 32768); |
||
888 | intel_miptree_map_blit(intel, mt, map, level, slice); |
||
889 | } else { |
||
890 | intel_miptree_map_gtt(intel, mt, map, level, slice); |
||
891 | } |
||
892 | |||
893 | *out_ptr = map->ptr; |
||
894 | *out_stride = map->stride; |
||
895 | |||
896 | if (map->ptr == NULL) |
||
897 | intel_miptree_release_map(mt, level, slice); |
||
898 | } |
||
899 | |||
900 | void |
||
901 | intel_miptree_unmap(struct intel_context *intel, |
||
902 | struct intel_mipmap_tree *mt, |
||
903 | unsigned int level, |
||
904 | unsigned int slice) |
||
905 | { |
||
906 | struct intel_miptree_map *map = mt->level[level].slice[slice].map; |
||
907 | |||
908 | if (!map) |
||
909 | return; |
||
910 | |||
911 | DBG("%s: mt %p (%s) level %d slice %d\n", __FUNCTION__, |
||
912 | mt, _mesa_get_format_name(mt->format), level, slice); |
||
913 | |||
914 | if (map->mt) { |
||
915 | intel_miptree_unmap_blit(intel, mt, map, level, slice); |
||
916 | } else { |
||
917 | intel_miptree_unmap_gtt(intel, mt, map, level, slice); |
||
918 | } |
||
919 | |||
920 | intel_miptree_release_map(mt, level, slice); |
||
921 | }>>>>>>>=>>--> |