Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * on the rights to use, copy, modify, merge, publish, distribute, sub
  8.  * license, and/or sell copies of the Software, and to permit persons to whom
  9.  * the Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18.  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
  19.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  20.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  21.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *      Jerome Glisse
  25.  */
  26.  
  27. #include "sid.h"
  28. #include "si_pipe.h"
  29. #include "radeon/r600_cs.h"
  30.  
  31. #include "util/u_format.h"
  32.  
  33. static unsigned si_array_mode(unsigned mode)
  34. {
  35.         switch (mode) {
  36.         case RADEON_SURF_MODE_LINEAR_ALIGNED:
  37.                 return V_009910_ARRAY_LINEAR_ALIGNED;
  38.         case RADEON_SURF_MODE_1D:
  39.                 return V_009910_ARRAY_1D_TILED_THIN1;
  40.         case RADEON_SURF_MODE_2D:
  41.                 return V_009910_ARRAY_2D_TILED_THIN1;
  42.         default:
  43.         case RADEON_SURF_MODE_LINEAR:
  44.                 return V_009910_ARRAY_LINEAR_GENERAL;
  45.         }
  46. }
  47.  
  48. static uint32_t si_micro_tile_mode(struct si_screen *sscreen, unsigned tile_mode)
  49. {
  50.         if (sscreen->b.info.si_tile_mode_array_valid) {
  51.                 uint32_t gb_tile_mode = sscreen->b.info.si_tile_mode_array[tile_mode];
  52.  
  53.                 return G_009910_MICRO_TILE_MODE(gb_tile_mode);
  54.         }
  55.  
  56.         /* The kernel cannod return the tile mode array. Guess? */
  57.         return V_009910_ADDR_SURF_THIN_MICRO_TILING;
  58. }
  59.  
  60. static void si_dma_copy_buffer(struct si_context *ctx,
  61.                                 struct pipe_resource *dst,
  62.                                 struct pipe_resource *src,
  63.                                 uint64_t dst_offset,
  64.                                 uint64_t src_offset,
  65.                                 uint64_t size)
  66. {
  67.         struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
  68.         unsigned i, ncopy, csize, max_csize, sub_cmd, shift;
  69.         struct r600_resource *rdst = (struct r600_resource*)dst;
  70.         struct r600_resource *rsrc = (struct r600_resource*)src;
  71.  
  72.         /* Mark the buffer range of destination as valid (initialized),
  73.          * so that transfer_map knows it should wait for the GPU when mapping
  74.          * that range. */
  75.         util_range_add(&rdst->valid_buffer_range, dst_offset,
  76.                        dst_offset + size);
  77.  
  78.         dst_offset += rdst->gpu_address;
  79.         src_offset += rsrc->gpu_address;
  80.  
  81.         /* see if we use dword or byte copy */
  82.         if (!(dst_offset % 4) && !(src_offset % 4) && !(size % 4)) {
  83.                 size >>= 2;
  84.                 sub_cmd = SI_DMA_COPY_DWORD_ALIGNED;
  85.                 shift = 2;
  86.                 max_csize = SI_DMA_COPY_MAX_SIZE_DW;
  87.         } else {
  88.                 sub_cmd = SI_DMA_COPY_BYTE_ALIGNED;
  89.                 shift = 0;
  90.                 max_csize = SI_DMA_COPY_MAX_SIZE;
  91.         }
  92.         ncopy = (size / max_csize) + !!(size % max_csize);
  93.  
  94.         r600_need_dma_space(&ctx->b, ncopy * 5);
  95.  
  96.         r600_context_bo_reloc(&ctx->b, &ctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
  97.                               RADEON_PRIO_MIN);
  98.         r600_context_bo_reloc(&ctx->b, &ctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
  99.                               RADEON_PRIO_MIN);
  100.  
  101.         for (i = 0; i < ncopy; i++) {
  102.                 csize = size < max_csize ? size : max_csize;
  103.                 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, csize);
  104.                 cs->buf[cs->cdw++] = dst_offset & 0xffffffff;
  105.                 cs->buf[cs->cdw++] = src_offset & 0xffffffff;
  106.                 cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
  107.                 cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
  108.                 dst_offset += csize << shift;
  109.                 src_offset += csize << shift;
  110.                 size -= csize;
  111.         }
  112. }
  113.  
  114. static void si_dma_copy_tile(struct si_context *ctx,
  115.                              struct pipe_resource *dst,
  116.                              unsigned dst_level,
  117.                              unsigned dst_x,
  118.                              unsigned dst_y,
  119.                              unsigned dst_z,
  120.                              struct pipe_resource *src,
  121.                              unsigned src_level,
  122.                              unsigned src_x,
  123.                              unsigned src_y,
  124.                              unsigned src_z,
  125.                              unsigned copy_height,
  126.                              unsigned pitch,
  127.                              unsigned bpp)
  128. {
  129.         struct radeon_winsys_cs *cs = ctx->b.rings.dma.cs;
  130.         struct si_screen *sscreen = ctx->screen;
  131.         struct r600_texture *rsrc = (struct r600_texture*)src;
  132.         struct r600_texture *rdst = (struct r600_texture*)dst;
  133.         struct r600_texture *rlinear, *rtiled;
  134.         unsigned linear_lvl, tiled_lvl;
  135.         unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size;
  136.         unsigned ncopy, height, cheight, detile, i, src_mode, dst_mode;
  137.         unsigned linear_x, linear_y, linear_z,  tiled_x, tiled_y, tiled_z;
  138.         unsigned sub_cmd, bank_h, bank_w, mt_aspect, nbanks, tile_split, mt;
  139.         uint64_t base, addr;
  140.         unsigned pipe_config, tile_mode_index;
  141.  
  142.         dst_mode = rdst->surface.level[dst_level].mode;
  143.         src_mode = rsrc->surface.level[src_level].mode;
  144.         /* downcast linear aligned to linear to simplify test */
  145.         src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
  146.         dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
  147.         assert(dst_mode != src_mode);
  148.  
  149.         sub_cmd = SI_DMA_COPY_TILED;
  150.         lbpp = util_logbase2(bpp);
  151.         pitch_tile_max = ((pitch / bpp) / 8) - 1;
  152.  
  153.         detile = dst_mode == RADEON_SURF_MODE_LINEAR;
  154.         rlinear = detile ? rdst : rsrc;
  155.         rtiled = detile ? rsrc : rdst;
  156.         linear_lvl = detile ? dst_level : src_level;
  157.         tiled_lvl = detile ? src_level : dst_level;
  158.         linear_x = detile ? dst_x : src_x;
  159.         linear_y = detile ? dst_y : src_y;
  160.         linear_z = detile ? dst_z : src_z;
  161.         tiled_x = detile ? src_x : dst_x;
  162.         tiled_y = detile ? src_y : dst_y;
  163.         tiled_z = detile ? src_z : dst_z;
  164.  
  165.         assert(!util_format_is_depth_and_stencil(rtiled->resource.b.b.format));
  166.  
  167.         array_mode = si_array_mode(rtiled->surface.level[tiled_lvl].mode);
  168.         slice_tile_max = (rtiled->surface.level[tiled_lvl].nblk_x *
  169.                           rtiled->surface.level[tiled_lvl].nblk_y) / (8*8) - 1;
  170.         /* linear height must be the same as the slice tile max height, it's ok even
  171.          * if the linear destination/source have smaller heigh as the size of the
  172.          * dma packet will be using the copy_height which is always smaller or equal
  173.          * to the linear height
  174.          */
  175.         height = rtiled->surface.level[tiled_lvl].nblk_y;
  176.         base = rtiled->surface.level[tiled_lvl].offset;
  177.         addr = rlinear->surface.level[linear_lvl].offset;
  178.         addr += rlinear->surface.level[linear_lvl].slice_size * linear_z;
  179.         addr += linear_y * pitch + linear_x * bpp;
  180.         bank_h = cik_bank_wh(rtiled->surface.bankh);
  181.         bank_w = cik_bank_wh(rtiled->surface.bankw);
  182.         mt_aspect = cik_macro_tile_aspect(rtiled->surface.mtilea);
  183.         tile_split = cik_tile_split(rtiled->surface.tile_split);
  184.         tile_mode_index = si_tile_mode_index(rtiled, tiled_lvl, false);
  185.         nbanks = si_num_banks(sscreen, rtiled);
  186.         base += rtiled->resource.gpu_address;
  187.         addr += rlinear->resource.gpu_address;
  188.  
  189.         pipe_config = cik_db_pipe_config(sscreen, tile_mode_index);
  190.         mt = si_micro_tile_mode(sscreen, tile_mode_index);
  191.         size = (copy_height * pitch) / 4;
  192.         ncopy = (size / SI_DMA_COPY_MAX_SIZE_DW) + !!(size % SI_DMA_COPY_MAX_SIZE_DW);
  193.         r600_need_dma_space(&ctx->b, ncopy * 9);
  194.  
  195.         r600_context_bo_reloc(&ctx->b, &ctx->b.rings.dma, &rsrc->resource,
  196.                               RADEON_USAGE_READ, RADEON_PRIO_MIN);
  197.         r600_context_bo_reloc(&ctx->b, &ctx->b.rings.dma, &rdst->resource,
  198.                               RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
  199.  
  200.         for (i = 0; i < ncopy; i++) {
  201.                 cheight = copy_height;
  202.                 if (((cheight * pitch) / 4) > SI_DMA_COPY_MAX_SIZE_DW) {
  203.                         cheight = (SI_DMA_COPY_MAX_SIZE_DW * 4) / pitch;
  204.                 }
  205.                 size = (cheight * pitch) / 4;
  206.                 cs->buf[cs->cdw++] = SI_DMA_PACKET(SI_DMA_PACKET_COPY, sub_cmd, size);
  207.                 cs->buf[cs->cdw++] = base >> 8;
  208.                 cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) |
  209.                                         (lbpp << 24) | (bank_h << 21) |
  210.                                         (bank_w << 18) | (mt_aspect << 16);
  211.                 cs->buf[cs->cdw++] = (pitch_tile_max << 0) | ((height - 1) << 16);
  212.                 cs->buf[cs->cdw++] = (slice_tile_max << 0) | (pipe_config << 26);
  213.                 cs->buf[cs->cdw++] = (tiled_x << 0) | (tiled_z << 18);
  214.                 cs->buf[cs->cdw++] = (tiled_y << 0) | (tile_split << 21) | (nbanks << 25) | (mt << 27);
  215.                 cs->buf[cs->cdw++] = addr & 0xfffffffc;
  216.                 cs->buf[cs->cdw++] = (addr >> 32UL) & 0xff;
  217.                 copy_height -= cheight;
  218.                 addr += cheight * pitch;
  219.                 tiled_y += cheight;
  220.         }
  221. }
  222.  
  223. void si_dma_copy(struct pipe_context *ctx,
  224.                  struct pipe_resource *dst,
  225.                  unsigned dst_level,
  226.                  unsigned dstx, unsigned dsty, unsigned dstz,
  227.                  struct pipe_resource *src,
  228.                  unsigned src_level,
  229.                  const struct pipe_box *src_box)
  230. {
  231.         struct si_context *sctx = (struct si_context *)ctx;
  232.         struct r600_texture *rsrc = (struct r600_texture*)src;
  233.         struct r600_texture *rdst = (struct r600_texture*)dst;
  234.         unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode;
  235.         unsigned src_w, dst_w;
  236.         unsigned src_x, src_y;
  237.         unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz;
  238.  
  239.         if (sctx->b.rings.dma.cs == NULL) {
  240.                 goto fallback;
  241.         }
  242.  
  243.         /* TODO: Implement DMA copy for CIK */
  244.         if (sctx->b.chip_class >= CIK) {
  245.                 goto fallback;
  246.         }
  247.  
  248.         if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
  249.                 si_dma_copy_buffer(sctx, dst, src, dst_x, src_box->x, src_box->width);
  250.                 return;
  251.         }
  252.  
  253.         /* XXX: Using the asynchronous DMA engine for multi-dimensional
  254.          * operations seems to cause random GPU lockups for various people.
  255.          * While the root cause for this might need to be fixed in the kernel,
  256.          * let's disable it for now.
  257.          *
  258.          * Before re-enabling this, please make sure you can hit all newly
  259.          * enabled paths in your testing, preferably with both piglit and real
  260.          * world apps, and get in touch with people on the bug reports below
  261.          * for stability testing.
  262.          *
  263.          * https://bugs.freedesktop.org/show_bug.cgi?id=85647
  264.          * https://bugs.freedesktop.org/show_bug.cgi?id=83500
  265.          */
  266.         goto fallback;
  267.  
  268.         if (src->format != dst->format || src_box->depth > 1 ||
  269.             rdst->dirty_level_mask != 0 ||
  270.             rdst->cmask.size || rdst->fmask.size ||
  271.             rsrc->cmask.size || rsrc->fmask.size) {
  272.                 goto fallback;
  273.         }
  274.  
  275.         if (rsrc->dirty_level_mask) {
  276.                 ctx->flush_resource(ctx, src);
  277.         }
  278.  
  279.         src_x = util_format_get_nblocksx(src->format, src_box->x);
  280.         dst_x = util_format_get_nblocksx(src->format, dst_x);
  281.         src_y = util_format_get_nblocksy(src->format, src_box->y);
  282.         dst_y = util_format_get_nblocksy(src->format, dst_y);
  283.  
  284.         bpp = rdst->surface.bpe;
  285.         dst_pitch = rdst->surface.level[dst_level].pitch_bytes;
  286.         src_pitch = rsrc->surface.level[src_level].pitch_bytes;
  287.         src_w = rsrc->surface.level[src_level].npix_x;
  288.         dst_w = rdst->surface.level[dst_level].npix_x;
  289.  
  290.         dst_mode = rdst->surface.level[dst_level].mode;
  291.         src_mode = rsrc->surface.level[src_level].mode;
  292.         /* downcast linear aligned to linear to simplify test */
  293.         src_mode = src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : src_mode;
  294.         dst_mode = dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED ? RADEON_SURF_MODE_LINEAR : dst_mode;
  295.  
  296.         if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w ||
  297.             src_box->width != src_w ||
  298.             src_box->height != rsrc->surface.level[src_level].npix_y ||
  299.             src_box->height != rdst->surface.level[dst_level].npix_y ||
  300.             rsrc->surface.level[src_level].nblk_y !=
  301.             rdst->surface.level[dst_level].nblk_y) {
  302.                 /* FIXME si can do partial blit */
  303.                 goto fallback;
  304.         }
  305.         /* the x test here are currently useless (because we don't support partial blit)
  306.          * but keep them around so we don't forget about those
  307.          */
  308.         if ((src_pitch % 8) || (src_box->x % 8) || (dst_x % 8) ||
  309.             (src_box->y % 8) || (dst_y % 8) || (src_box->height % 8)) {
  310.                 goto fallback;
  311.         }
  312.  
  313.         if (src_mode == dst_mode) {
  314.                 uint64_t dst_offset, src_offset;
  315.                 /* simple dma blit would do NOTE code here assume :
  316.                  *   src_box.x/y == 0
  317.                  *   dst_x/y == 0
  318.                  *   dst_pitch == src_pitch
  319.                  */
  320.                 src_offset= rsrc->surface.level[src_level].offset;
  321.                 src_offset += rsrc->surface.level[src_level].slice_size * src_box->z;
  322.                 src_offset += src_y * src_pitch + src_x * bpp;
  323.                 dst_offset = rdst->surface.level[dst_level].offset;
  324.                 dst_offset += rdst->surface.level[dst_level].slice_size * dst_z;
  325.                 dst_offset += dst_y * dst_pitch + dst_x * bpp;
  326.                 si_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset,
  327.                                    rsrc->surface.level[src_level].slice_size);
  328.         } else {
  329.                 si_dma_copy_tile(sctx, dst, dst_level, dst_x, dst_y, dst_z,
  330.                                  src, src_level, src_x, src_y, src_box->z,
  331.                                  src_box->height / rsrc->surface.blk_h,
  332.                                  dst_pitch, bpp);
  333.         }
  334.         return;
  335.  
  336. fallback:
  337.         si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz,
  338.                                 src, src_level, src_box);
  339. }
  340.