Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006 VMware, Inc.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include <GL/gl.h>
  29. #include <GL/internal/dri_interface.h>
  30.  
  31. #include "intel_batchbuffer.h"
  32. #include "intel_chipset.h"
  33. #include "intel_context.h"
  34. #include "intel_mipmap_tree.h"
  35. #include "intel_regions.h"
  36. #include "intel_tex_layout.h"
  37. #include "intel_tex.h"
  38. #include "intel_blit.h"
  39.  
  40. #include "main/enums.h"
  41. #include "main/formats.h"
  42. #include "main/glformats.h"
  43. #include "main/teximage.h"
  44.  
  45. #define FILE_DEBUG_FLAG DEBUG_MIPTREE
  46.  
  47. static GLenum
  48. target_to_target(GLenum target)
  49. {
  50.    switch (target) {
  51.    case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
  52.    case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
  53.    case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
  54.    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
  55.    case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
  56.    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
  57.       return GL_TEXTURE_CUBE_MAP_ARB;
  58.    default:
  59.       return target;
  60.    }
  61. }
  62.  
  63. /**
  64.  * @param for_bo Indicates that the caller is
  65.  *        intel_miptree_create_for_bo(). If true, then do not create
  66.  *        \c stencil_mt.
  67.  */
  68. struct intel_mipmap_tree *
  69. intel_miptree_create_layout(struct intel_context *intel,
  70.                             GLenum target,
  71.                             mesa_format format,
  72.                             GLuint first_level,
  73.                             GLuint last_level,
  74.                             GLuint width0,
  75.                             GLuint height0,
  76.                             GLuint depth0,
  77.                             bool for_bo)
  78. {
  79.    struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
  80.    if (!mt)
  81.       return NULL;
  82.  
  83.    DBG("%s target %s format %s level %d..%d <-- %p\n", __func__,
  84.        _mesa_lookup_enum_by_nr(target),
  85.        _mesa_get_format_name(format),
  86.        first_level, last_level, mt);
  87.  
  88.    mt->target = target_to_target(target);
  89.    mt->format = format;
  90.    mt->first_level = first_level;
  91.    mt->last_level = last_level;
  92.    mt->logical_width0 = width0;
  93.    mt->logical_height0 = height0;
  94.    mt->logical_depth0 = depth0;
  95.  
  96.    /* The cpp is bytes per (1, blockheight)-sized block for compressed
  97.     * textures.  This is why you'll see divides by blockheight all over
  98.     */
  99.    unsigned bw, bh;
  100.    _mesa_get_format_block_size(format, &bw, &bh);
  101.    assert(_mesa_get_format_bytes(mt->format) % bw == 0);
  102.    mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
  103.  
  104.    mt->compressed = _mesa_is_format_compressed(format);
  105.    mt->refcount = 1;
  106.  
  107.    if (target == GL_TEXTURE_CUBE_MAP) {
  108.       assert(depth0 == 1);
  109.       depth0 = 6;
  110.    }
  111.  
  112.    mt->physical_width0 = width0;
  113.    mt->physical_height0 = height0;
  114.    mt->physical_depth0 = depth0;
  115.  
  116.    intel_get_texture_alignment_unit(intel, mt->format,
  117.                                     &mt->align_w, &mt->align_h);
  118.  
  119.    (void) intel;
  120.    if (intel->is_945)
  121.       i945_miptree_layout(mt);
  122.    else
  123.       i915_miptree_layout(mt);
  124.  
  125.    return mt;
  126. }
  127.  
  128. /**
  129.  * \brief Helper function for intel_miptree_create().
  130.  */
  131. static uint32_t
  132. intel_miptree_choose_tiling(struct intel_context *intel,
  133.                             mesa_format format,
  134.                             uint32_t width0,
  135.                             enum intel_miptree_tiling_mode requested,
  136.                             struct intel_mipmap_tree *mt)
  137. {
  138.    /* Some usages may want only one type of tiling, like depth miptrees (Y
  139.     * tiled), or temporary BOs for uploading data once (linear).
  140.     */
  141.    switch (requested) {
  142.    case INTEL_MIPTREE_TILING_ANY:
  143.       break;
  144.    case INTEL_MIPTREE_TILING_Y:
  145.       return I915_TILING_Y;
  146.    case INTEL_MIPTREE_TILING_NONE:
  147.       return I915_TILING_NONE;
  148.    }
  149.  
  150.    int minimum_pitch = mt->total_width * mt->cpp;
  151.  
  152.    /* If the width is much smaller than a tile, don't bother tiling. */
  153.    if (minimum_pitch < 64)
  154.       return I915_TILING_NONE;
  155.  
  156.    if (ALIGN(minimum_pitch, 512) >= 32768) {
  157.       perf_debug("%dx%d miptree too large to blit, falling back to untiled",
  158.                  mt->total_width, mt->total_height);
  159.       return I915_TILING_NONE;
  160.    }
  161.  
  162.    /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
  163.    return I915_TILING_X;
  164. }
  165.  
  166. struct intel_mipmap_tree *
  167. intel_miptree_create(struct intel_context *intel,
  168.                      GLenum target,
  169.                      mesa_format format,
  170.                      GLuint first_level,
  171.                      GLuint last_level,
  172.                      GLuint width0,
  173.                      GLuint height0,
  174.                      GLuint depth0,
  175.                      bool expect_accelerated_upload,
  176.                      enum intel_miptree_tiling_mode requested_tiling)
  177. {
  178.    struct intel_mipmap_tree *mt;
  179.    GLuint total_width, total_height;
  180.  
  181.  
  182.    mt = intel_miptree_create_layout(intel, target, format,
  183.                                       first_level, last_level, width0,
  184.                                       height0, depth0,
  185.                                       false);
  186.    /*
  187.     * pitch == 0 || height == 0  indicates the null texture
  188.     */
  189.    if (!mt || !mt->total_width || !mt->total_height) {
  190.       intel_miptree_release(&mt);
  191.       return NULL;
  192.    }
  193.  
  194.    total_width = mt->total_width;
  195.    total_height = mt->total_height;
  196.  
  197.    uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
  198.                                                  requested_tiling,
  199.                                                  mt);
  200.    bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
  201.  
  202.    mt->region = intel_region_alloc(intel->intelScreen,
  203.                                    y_or_x ? I915_TILING_Y : tiling,
  204.                                    mt->cpp,
  205.                                    total_width,
  206.                                    total_height,
  207.                                    expect_accelerated_upload);
  208.  
  209.    /* If the region is too large to fit in the aperture, we need to use the
  210.     * BLT engine to support it.  The BLT paths can't currently handle Y-tiling,
  211.     * so we need to fall back to X.
  212.     */
  213.    if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
  214.       perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
  215.                  mt->total_width, mt->total_height);
  216.       intel_region_release(&mt->region);
  217.  
  218.       mt->region = intel_region_alloc(intel->intelScreen,
  219.                                       I915_TILING_X,
  220.                                       mt->cpp,
  221.                                       total_width,
  222.                                       total_height,
  223.                                       expect_accelerated_upload);
  224.    }
  225.  
  226.    mt->offset = 0;
  227.  
  228.    if (!mt->region) {
  229.        intel_miptree_release(&mt);
  230.        return NULL;
  231.    }
  232.  
  233.    return mt;
  234. }
  235.  
  236. struct intel_mipmap_tree *
  237. intel_miptree_create_for_bo(struct intel_context *intel,
  238.                             drm_intel_bo *bo,
  239.                             mesa_format format,
  240.                             uint32_t offset,
  241.                             uint32_t width,
  242.                             uint32_t height,
  243.                             int pitch,
  244.                             uint32_t tiling)
  245. {
  246.    struct intel_mipmap_tree *mt;
  247.  
  248.    struct intel_region *region = calloc(1, sizeof(*region));
  249.    if (!region)
  250.       return NULL;
  251.  
  252.    /* Nothing will be able to use this miptree with the BO if the offset isn't
  253.     * aligned.
  254.     */
  255.    if (tiling != I915_TILING_NONE)
  256.       assert(offset % 4096 == 0);
  257.  
  258.    /* miptrees can't handle negative pitch.  If you need flipping of images,
  259.     * that's outside of the scope of the mt.
  260.     */
  261.    assert(pitch >= 0);
  262.  
  263.    mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
  264.                                     0, 0,
  265.                                     width, height, 1,
  266.                                     true);
  267.    if (!mt) {
  268.       free(region);
  269.       return mt;
  270.    }
  271.  
  272.    region->cpp = mt->cpp;
  273.    region->width = width;
  274.    region->height = height;
  275.    region->pitch = pitch;
  276.    region->refcount = 1;
  277.    drm_intel_bo_reference(bo);
  278.    region->bo = bo;
  279.    region->tiling = tiling;
  280.  
  281.    mt->region = region;
  282.    mt->offset = offset;
  283.  
  284.    return mt;
  285. }
  286.  
  287.  
  288. /**
  289.  * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
  290.  *
  291.  * For a multisample DRI2 buffer, this wraps the given region with
  292.  * a singlesample miptree, then creates a multisample miptree into which the
  293.  * singlesample miptree is embedded as a child.
  294.  */
  295. struct intel_mipmap_tree*
  296. intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
  297.                                      unsigned dri_attachment,
  298.                                      mesa_format format,
  299.                                      struct intel_region *region)
  300. {
  301.    struct intel_mipmap_tree *mt = NULL;
  302.  
  303.    /* Only the front and back buffers, which are color buffers, are shared
  304.     * through DRI2.
  305.     */
  306.    assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
  307.           dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
  308.           dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
  309.    assert(_mesa_get_format_base_format(format) == GL_RGB ||
  310.           _mesa_get_format_base_format(format) == GL_RGBA);
  311.  
  312.    mt = intel_miptree_create_for_bo(intel,
  313.                                     region->bo,
  314.                                     format,
  315.                                     0,
  316.                                     region->width,
  317.                                     region->height,
  318.                                     region->pitch,
  319.                                     region->tiling);
  320.    if (!mt)
  321.       return NULL;
  322.    mt->region->name = region->name;
  323.  
  324.    return mt;
  325. }
  326.  
  327. /**
  328.  * For a singlesample image buffer, this simply wraps the given region with a miptree.
  329.  *
  330.  * For a multisample image buffer, this wraps the given region with
  331.  * a singlesample miptree, then creates a multisample miptree into which the
  332.  * singlesample miptree is embedded as a child.
  333.  */
  334. struct intel_mipmap_tree*
  335. intel_miptree_create_for_image_buffer(struct intel_context *intel,
  336.                                       enum __DRIimageBufferMask buffer_type,
  337.                                       mesa_format format,
  338.                                       uint32_t num_samples,
  339.                                       struct intel_region *region)
  340. {
  341.    struct intel_mipmap_tree *mt = NULL;
  342.  
  343.    /* Only the front and back buffers, which are color buffers, are allocated
  344.     * through the image loader.
  345.     */
  346.    assert(_mesa_get_format_base_format(format) == GL_RGB ||
  347.           _mesa_get_format_base_format(format) == GL_RGBA);
  348.  
  349.    mt = intel_miptree_create_for_bo(intel,
  350.                                     region->bo,
  351.                                     format,
  352.                                     0,
  353.                                     region->width,
  354.                                     region->height,
  355.                                     region->pitch,
  356.                                     region->tiling);
  357.    return mt;
  358. }
  359.  
  360. struct intel_mipmap_tree*
  361. intel_miptree_create_for_renderbuffer(struct intel_context *intel,
  362.                                       mesa_format format,
  363.                                       uint32_t width,
  364.                                       uint32_t height)
  365. {
  366.    uint32_t depth = 1;
  367.  
  368.    return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
  369.                                width, height, depth, true,
  370.                                INTEL_MIPTREE_TILING_ANY);
  371. }
  372.  
  373. void
  374. intel_miptree_reference(struct intel_mipmap_tree **dst,
  375.                         struct intel_mipmap_tree *src)
  376. {
  377.    if (*dst == src)
  378.       return;
  379.  
  380.    intel_miptree_release(dst);
  381.  
  382.    if (src) {
  383.       src->refcount++;
  384.       DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
  385.    }
  386.  
  387.    *dst = src;
  388. }
  389.  
  390.  
  391. void
  392. intel_miptree_release(struct intel_mipmap_tree **mt)
  393. {
  394.    if (!*mt)
  395.       return;
  396.  
  397.    DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
  398.    if (--(*mt)->refcount <= 0) {
  399.       GLuint i;
  400.  
  401.       DBG("%s deleting %p\n", __func__, *mt);
  402.  
  403.       intel_region_release(&((*mt)->region));
  404.  
  405.       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
  406.          free((*mt)->level[i].slice);
  407.       }
  408.  
  409.       free(*mt);
  410.    }
  411.    *mt = NULL;
  412. }
  413.  
  414. void
  415. intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
  416.                                        int *width, int *height, int *depth)
  417. {
  418.    switch (image->TexObject->Target) {
  419.    case GL_TEXTURE_1D_ARRAY:
  420.       *width = image->Width;
  421.       *height = 1;
  422.       *depth = image->Height;
  423.       break;
  424.    default:
  425.       *width = image->Width;
  426.       *height = image->Height;
  427.       *depth = image->Depth;
  428.       break;
  429.    }
  430. }
  431.  
  432. /**
  433.  * Can the image be pulled into a unified mipmap tree?  This mirrors
  434.  * the completeness test in a lot of ways.
  435.  *
  436.  * Not sure whether I want to pass gl_texture_image here.
  437.  */
  438. bool
  439. intel_miptree_match_image(struct intel_mipmap_tree *mt,
  440.                           struct gl_texture_image *image)
  441. {
  442.    struct intel_texture_image *intelImage = intel_texture_image(image);
  443.    GLuint level = intelImage->base.Base.Level;
  444.    int width, height, depth;
  445.  
  446.    /* glTexImage* choose the texture object based on the target passed in, and
  447.     * objects can't change targets over their lifetimes, so this should be
  448.     * true.
  449.     */
  450.    assert(target_to_target(image->TexObject->Target) == mt->target);
  451.  
  452.    mesa_format mt_format = mt->format;
  453.  
  454.    if (image->TexFormat != mt_format)
  455.       return false;
  456.  
  457.    intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
  458.  
  459.    if (mt->target == GL_TEXTURE_CUBE_MAP)
  460.       depth = 6;
  461.  
  462.    /* Test image dimensions against the base level image adjusted for
  463.     * minification.  This will also catch images not present in the
  464.     * tree, changed targets, etc.
  465.     */
  466.    if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
  467.          mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
  468.       /* nonzero level here is always bogus */
  469.       assert(level == 0);
  470.  
  471.       if (width != mt->logical_width0 ||
  472.             height != mt->logical_height0 ||
  473.             depth != mt->logical_depth0) {
  474.          return false;
  475.       }
  476.    }
  477.    else {
  478.       /* all normal textures, renderbuffers, etc */
  479.       if (width != mt->level[level].width ||
  480.           height != mt->level[level].height ||
  481.           depth != mt->level[level].depth) {
  482.          return false;
  483.       }
  484.    }
  485.  
  486.    return true;
  487. }
  488.  
  489.  
  490. void
  491. intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
  492.                              GLuint level,
  493.                              GLuint x, GLuint y,
  494.                              GLuint w, GLuint h, GLuint d)
  495. {
  496.    mt->level[level].width = w;
  497.    mt->level[level].height = h;
  498.    mt->level[level].depth = d;
  499.    mt->level[level].level_x = x;
  500.    mt->level[level].level_y = y;
  501.  
  502.    DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __func__,
  503.        level, w, h, d, x, y);
  504.  
  505.    assert(mt->level[level].slice == NULL);
  506.  
  507.    mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
  508.    mt->level[level].slice[0].x_offset = mt->level[level].level_x;
  509.    mt->level[level].slice[0].y_offset = mt->level[level].level_y;
  510. }
  511.  
  512.  
  513. void
  514. intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
  515.                                GLuint level, GLuint img,
  516.                                GLuint x, GLuint y)
  517. {
  518.    if (img == 0 && level == 0)
  519.       assert(x == 0 && y == 0);
  520.  
  521.    assert(img < mt->level[level].depth);
  522.  
  523.    mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
  524.    mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
  525.  
  526.    DBG("%s level %d img %d pos %d,%d\n",
  527.        __func__, level, img,
  528.        mt->level[level].slice[img].x_offset,
  529.        mt->level[level].slice[img].y_offset);
  530. }
  531.  
  532. void
  533. intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
  534.                                GLuint level, GLuint slice,
  535.                                GLuint *x, GLuint *y)
  536. {
  537.    assert(slice < mt->level[level].depth);
  538.  
  539.    *x = mt->level[level].slice[slice].x_offset;
  540.    *y = mt->level[level].slice[slice].y_offset;
  541. }
  542.  
  543. /**
  544.  * Rendering with tiled buffers requires that the base address of the buffer
  545.  * be aligned to a page boundary.  For renderbuffers, and sometimes with
  546.  * textures, we may want the surface to point at a texture image level that
  547.  * isn't at a page boundary.
  548.  *
  549.  * This function returns an appropriately-aligned base offset
  550.  * according to the tiling restrictions, plus any required x/y offset
  551.  * from there.
  552.  */
  553. uint32_t
  554. intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
  555.                                GLuint level, GLuint slice,
  556.                                uint32_t *tile_x,
  557.                                uint32_t *tile_y)
  558. {
  559.    struct intel_region *region = mt->region;
  560.    uint32_t x, y;
  561.    uint32_t mask_x, mask_y;
  562.  
  563.    intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
  564.    intel_miptree_get_image_offset(mt, level, slice, &x, &y);
  565.  
  566.    *tile_x = x & mask_x;
  567.    *tile_y = y & mask_y;
  568.  
  569.    return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
  570.                                           false);
  571. }
  572.  
  573. static void
  574. intel_miptree_copy_slice_sw(struct intel_context *intel,
  575.                             struct intel_mipmap_tree *dst_mt,
  576.                             struct intel_mipmap_tree *src_mt,
  577.                             int level,
  578.                             int slice,
  579.                             int width,
  580.                             int height)
  581. {
  582.    void *src, *dst;
  583.    int src_stride, dst_stride;
  584.    int cpp = dst_mt->cpp;
  585.  
  586.    intel_miptree_map(intel, src_mt,
  587.                      level, slice,
  588.                      0, 0,
  589.                      width, height,
  590.                      GL_MAP_READ_BIT,
  591.                      &src, &src_stride);
  592.  
  593.    intel_miptree_map(intel, dst_mt,
  594.                      level, slice,
  595.                      0, 0,
  596.                      width, height,
  597.                      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
  598.                      &dst, &dst_stride);
  599.  
  600.    DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
  601.        _mesa_get_format_name(src_mt->format),
  602.        src_mt, src, src_stride,
  603.        _mesa_get_format_name(dst_mt->format),
  604.        dst_mt, dst, dst_stride,
  605.        width, height);
  606.  
  607.    int row_size = cpp * width;
  608.    if (src_stride == row_size &&
  609.        dst_stride == row_size) {
  610.       memcpy(dst, src, row_size * height);
  611.    } else {
  612.       for (int i = 0; i < height; i++) {
  613.          memcpy(dst, src, row_size);
  614.          dst += dst_stride;
  615.          src += src_stride;
  616.       }
  617.    }
  618.  
  619.    intel_miptree_unmap(intel, dst_mt, level, slice);
  620.    intel_miptree_unmap(intel, src_mt, level, slice);
  621. }
  622.  
  623. static void
  624. intel_miptree_copy_slice(struct intel_context *intel,
  625.                          struct intel_mipmap_tree *dst_mt,
  626.                          struct intel_mipmap_tree *src_mt,
  627.                          int level,
  628.                          int face,
  629.                          int depth)
  630.  
  631. {
  632.    mesa_format format = src_mt->format;
  633.    uint32_t width = src_mt->level[level].width;
  634.    uint32_t height = src_mt->level[level].height;
  635.    int slice;
  636.  
  637.    if (face > 0)
  638.       slice = face;
  639.    else
  640.       slice = depth;
  641.  
  642.    assert(depth < src_mt->level[level].depth);
  643.    assert(src_mt->format == dst_mt->format);
  644.  
  645.    if (dst_mt->compressed) {
  646.       height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
  647.       width = ALIGN(width, dst_mt->align_w);
  648.    }
  649.  
  650.    uint32_t dst_x, dst_y, src_x, src_y;
  651.    intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
  652.    intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
  653.  
  654.    DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
  655.        _mesa_get_format_name(src_mt->format),
  656.        src_mt, src_x, src_y, src_mt->region->pitch,
  657.        _mesa_get_format_name(dst_mt->format),
  658.        dst_mt, dst_x, dst_y, dst_mt->region->pitch,
  659.        width, height);
  660.  
  661.    if (!intel_miptree_blit(intel,
  662.                            src_mt, level, slice, 0, 0, false,
  663.                            dst_mt, level, slice, 0, 0, false,
  664.                            width, height, GL_COPY)) {
  665.       perf_debug("miptree validate blit for %s failed\n",
  666.                  _mesa_get_format_name(format));
  667.  
  668.       intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
  669.                                   width, height);
  670.    }
  671. }
  672.  
  673. /**
  674.  * Copies the image's current data to the given miptree, and associates that
  675.  * miptree with the image.
  676.  *
  677.  * If \c invalidate is true, then the actual image data does not need to be
  678.  * copied, but the image still needs to be associated to the new miptree (this
  679.  * is set to true if we're about to clear the image).
  680.  */
  681. void
  682. intel_miptree_copy_teximage(struct intel_context *intel,
  683.                             struct intel_texture_image *intelImage,
  684.                             struct intel_mipmap_tree *dst_mt,
  685.                             bool invalidate)
  686. {
  687.    struct intel_mipmap_tree *src_mt = intelImage->mt;
  688.    struct intel_texture_object *intel_obj =
  689.       intel_texture_object(intelImage->base.Base.TexObject);
  690.    int level = intelImage->base.Base.Level;
  691.    int face = intelImage->base.Base.Face;
  692.    GLuint depth = intelImage->base.Base.Depth;
  693.  
  694.    if (!invalidate) {
  695.       for (int slice = 0; slice < depth; slice++) {
  696.          intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
  697.       }
  698.    }
  699.  
  700.    intel_miptree_reference(&intelImage->mt, dst_mt);
  701.    intel_obj->needs_validate = true;
  702. }
  703.  
  704. void *
  705. intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
  706. {
  707.    drm_intel_bo *bo = mt->region->bo;
  708.  
  709.    if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
  710.       if (drm_intel_bo_busy(bo)) {
  711.          perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
  712.       }
  713.    }
  714.  
  715.    intel_flush(&intel->ctx);
  716.  
  717.    if (mt->region->tiling != I915_TILING_NONE)
  718.       drm_intel_gem_bo_map_gtt(bo);
  719.    else
  720.       drm_intel_bo_map(bo, true);
  721.  
  722.    return bo->virtual;
  723. }
  724.  
  725. void
  726. intel_miptree_unmap_raw(struct intel_context *intel,
  727.                         struct intel_mipmap_tree *mt)
  728. {
  729.    drm_intel_bo_unmap(mt->region->bo);
  730. }
  731.  
  732. static void
  733. intel_miptree_map_gtt(struct intel_context *intel,
  734.                       struct intel_mipmap_tree *mt,
  735.                       struct intel_miptree_map *map,
  736.                       unsigned int level, unsigned int slice)
  737. {
  738.    unsigned int bw, bh;
  739.    void *base;
  740.    unsigned int image_x, image_y;
  741.    int x = map->x;
  742.    int y = map->y;
  743.  
  744.    /* For compressed formats, the stride is the number of bytes per
  745.     * row of blocks.  intel_miptree_get_image_offset() already does
  746.     * the divide.
  747.     */
  748.    _mesa_get_format_block_size(mt->format, &bw, &bh);
  749.    assert(y % bh == 0);
  750.    y /= bh;
  751.  
  752.    base = intel_miptree_map_raw(intel, mt) + mt->offset;
  753.  
  754.    if (base == NULL)
  755.       map->ptr = NULL;
  756.    else {
  757.       /* Note that in the case of cube maps, the caller must have passed the
  758.        * slice number referencing the face.
  759.       */
  760.       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
  761.       x += image_x;
  762.       y += image_y;
  763.  
  764.       map->stride = mt->region->pitch;
  765.       map->ptr = base + y * map->stride + x * mt->cpp;
  766.    }
  767.  
  768.    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
  769.        map->x, map->y, map->w, map->h,
  770.        mt, _mesa_get_format_name(mt->format),
  771.        x, y, map->ptr, map->stride);
  772. }
  773.  
  774. static void
  775. intel_miptree_unmap_gtt(struct intel_context *intel,
  776.                         struct intel_mipmap_tree *mt,
  777.                         struct intel_miptree_map *map,
  778.                         unsigned int level,
  779.                         unsigned int slice)
  780. {
  781.    intel_miptree_unmap_raw(intel, mt);
  782. }
  783.  
  784. static void
  785. intel_miptree_map_blit(struct intel_context *intel,
  786.                        struct intel_mipmap_tree *mt,
  787.                        struct intel_miptree_map *map,
  788.                        unsigned int level, unsigned int slice)
  789. {
  790.    map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
  791.                                   0, 0,
  792.                                   map->w, map->h, 1,
  793.                                   false,
  794.                                   INTEL_MIPTREE_TILING_NONE);
  795.    if (!map->mt) {
  796.       fprintf(stderr, "Failed to allocate blit temporary\n");
  797.       goto fail;
  798.    }
  799.    map->stride = map->mt->region->pitch;
  800.  
  801.    if (!intel_miptree_blit(intel,
  802.                            mt, level, slice,
  803.                            map->x, map->y, false,
  804.                            map->mt, 0, 0,
  805.                            0, 0, false,
  806.                            map->w, map->h, GL_COPY)) {
  807.       fprintf(stderr, "Failed to blit\n");
  808.       goto fail;
  809.    }
  810.  
  811.    intel_batchbuffer_flush(intel);
  812.    map->ptr = intel_miptree_map_raw(intel, map->mt);
  813.  
  814.    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
  815.        map->x, map->y, map->w, map->h,
  816.        mt, _mesa_get_format_name(mt->format),
  817.        level, slice, map->ptr, map->stride);
  818.  
  819.    return;
  820.  
  821. fail:
  822.    intel_miptree_release(&map->mt);
  823.    map->ptr = NULL;
  824.    map->stride = 0;
  825. }
  826.  
  827. static void
  828. intel_miptree_unmap_blit(struct intel_context *intel,
  829.                          struct intel_mipmap_tree *mt,
  830.                          struct intel_miptree_map *map,
  831.                          unsigned int level,
  832.                          unsigned int slice)
  833. {
  834.    struct gl_context *ctx = &intel->ctx;
  835.  
  836.    intel_miptree_unmap_raw(intel, map->mt);
  837.  
  838.    if (map->mode & GL_MAP_WRITE_BIT) {
  839.       bool ok = intel_miptree_blit(intel,
  840.                                    map->mt, 0, 0,
  841.                                    0, 0, false,
  842.                                    mt, level, slice,
  843.                                    map->x, map->y, false,
  844.                                    map->w, map->h, GL_COPY);
  845.       WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
  846.    }
  847.  
  848.    intel_miptree_release(&map->mt);
  849. }
  850.  
  851. /**
  852.  * Create and attach a map to the miptree at (level, slice). Return the
  853.  * attached map.
  854.  */
  855. static struct intel_miptree_map*
  856. intel_miptree_attach_map(struct intel_mipmap_tree *mt,
  857.                          unsigned int level,
  858.                          unsigned int slice,
  859.                          unsigned int x,
  860.                          unsigned int y,
  861.                          unsigned int w,
  862.                          unsigned int h,
  863.                          GLbitfield mode)
  864. {
  865.    struct intel_miptree_map *map = calloc(1, sizeof(*map));
  866.  
  867.    if (!map)
  868.       return NULL;
  869.  
  870.    assert(mt->level[level].slice[slice].map == NULL);
  871.    mt->level[level].slice[slice].map = map;
  872.  
  873.    map->mode = mode;
  874.    map->x = x;
  875.    map->y = y;
  876.    map->w = w;
  877.    map->h = h;
  878.  
  879.    return map;
  880. }
  881.  
  882. /**
  883.  * Release the map at (level, slice).
  884.  */
  885. static void
  886. intel_miptree_release_map(struct intel_mipmap_tree *mt,
  887.                          unsigned int level,
  888.                          unsigned int slice)
  889. {
  890.    struct intel_miptree_map **map;
  891.  
  892.    map = &mt->level[level].slice[slice].map;
  893.    free(*map);
  894.    *map = NULL;
  895. }
  896.  
  897. void
  898. intel_miptree_map(struct intel_context *intel,
  899.                   struct intel_mipmap_tree *mt,
  900.                   unsigned int level,
  901.                   unsigned int slice,
  902.                   unsigned int x,
  903.                   unsigned int y,
  904.                   unsigned int w,
  905.                   unsigned int h,
  906.                   GLbitfield mode,
  907.                   void **out_ptr,
  908.                   int *out_stride)
  909. {
  910.    struct intel_miptree_map *map;
  911.  
  912.    map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
  913.    if (!map){
  914.       *out_ptr = NULL;
  915.       *out_stride = 0;
  916.       return;
  917.    }
  918.  
  919.    /* See intel_miptree_blit() for details on the 32k pitch limit. */
  920.    if (mt->region->tiling != I915_TILING_NONE &&
  921.        mt->region->bo->size >= intel->max_gtt_map_object_size) {
  922.       assert(mt->region->pitch < 32768);
  923.       intel_miptree_map_blit(intel, mt, map, level, slice);
  924.    } else {
  925.       intel_miptree_map_gtt(intel, mt, map, level, slice);
  926.    }
  927.  
  928.    *out_ptr = map->ptr;
  929.    *out_stride = map->stride;
  930.  
  931.    if (map->ptr == NULL)
  932.       intel_miptree_release_map(mt, level, slice);
  933. }
  934.  
  935. void
  936. intel_miptree_unmap(struct intel_context *intel,
  937.                     struct intel_mipmap_tree *mt,
  938.                     unsigned int level,
  939.                     unsigned int slice)
  940. {
  941.    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
  942.  
  943.    if (!map)
  944.       return;
  945.  
  946.    DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
  947.        mt, _mesa_get_format_name(mt->format), level, slice);
  948.  
  949.    if (map->mt) {
  950.       intel_miptree_unmap_blit(intel, mt, map, level, slice);
  951.    } else {
  952.       intel_miptree_unmap_gtt(intel, mt, map, level, slice);
  953.    }
  954.  
  955.    intel_miptree_release_map(mt, level, slice);
  956. }
  957.