Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**********************************************************
  2.  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person
  5.  * obtaining a copy of this software and associated documentation
  6.  * files (the "Software"), to deal in the Software without
  7.  * restriction, including without limitation the rights to use, copy,
  8.  * modify, merge, publish, distribute, sublicense, and/or sell copies
  9.  * of the Software, and to permit persons to whom the Software is
  10.  * furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice shall be
  13.  * included in all copies or substantial portions of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  18.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  19.  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  20.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21.  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  **********************************************************/
  25.  
  26. #include "svga3d_reg.h"
  27. #include "svga3d_surfacedefs.h"
  28.  
  29. #include "pipe/p_state.h"
  30. #include "pipe/p_defines.h"
  31. #include "os/os_thread.h"
  32. #include "util/u_format.h"
  33. #include "util/u_inlines.h"
  34. #include "util/u_math.h"
  35. #include "util/u_memory.h"
  36. #include "util/u_resource.h"
  37.  
  38. #include "svga_cmd.h"
  39. #include "svga_format.h"
  40. #include "svga_screen.h"
  41. #include "svga_context.h"
  42. #include "svga_resource_texture.h"
  43. #include "svga_resource_buffer.h"
  44. #include "svga_sampler_view.h"
  45. #include "svga_winsys.h"
  46. #include "svga_debug.h"
  47.  
  48.  
  49. /* XXX: This isn't a real hardware flag, but just a hack for kernel to
  50.  * know about primary surfaces. Find a better way to accomplish this.
  51.  */
  52. #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
  53.  
  54.  
  55. static void
  56. svga_transfer_dma_band(struct svga_context *svga,
  57.                        struct svga_transfer *st,
  58.                        SVGA3dTransferType transfer,
  59.                        unsigned y, unsigned h, unsigned srcy,
  60.                        SVGA3dSurfaceDMAFlags flags)
  61. {
  62.    struct svga_texture *texture = svga_texture(st->base.resource);
  63.    SVGA3dCopyBox box;
  64.    enum pipe_error ret;
  65.  
  66.    assert(!st->use_direct_map);
  67.  
  68.    box.x = st->base.box.x;
  69.    box.y = y;
  70.    box.z = st->base.box.z;
  71.    box.w = st->base.box.width;
  72.    box.h = h;
  73.    box.d = 1;
  74.    box.srcx = 0;
  75.    box.srcy = srcy;
  76.    box.srcz = 0;
  77.  
  78.    if (st->base.resource->target == PIPE_TEXTURE_CUBE) {
  79.       st->face = st->base.box.z;
  80.       box.z = 0;
  81.    }
  82.    else
  83.       st->face = 0;
  84.  
  85.    SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - (%u, %u, %u), %ubpp\n",
  86.                 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
  87.                 texture->handle,
  88.                 st->face,
  89.                 st->base.box.x,
  90.                 y,
  91.                 box.z,
  92.                 st->base.box.x + st->base.box.width,
  93.                 y + h,
  94.                 box.z + 1,
  95.                 util_format_get_blocksize(texture->b.b.format) * 8 /
  96.                 (util_format_get_blockwidth(texture->b.b.format)*util_format_get_blockheight(texture->b.b.format)));
  97.  
  98.    ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
  99.    if(ret != PIPE_OK) {
  100.       svga_context_flush(svga, NULL);
  101.       ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
  102.       assert(ret == PIPE_OK);
  103.    }
  104. }
  105.  
  106.  
  107. static void
  108. svga_transfer_dma(struct svga_context *svga,
  109.                   struct svga_transfer *st,
  110.                   SVGA3dTransferType transfer,
  111.                   SVGA3dSurfaceDMAFlags flags)
  112. {
  113.    struct svga_texture *texture = svga_texture(st->base.resource);
  114.    struct svga_screen *screen = svga_screen(texture->b.b.screen);
  115.    struct svga_winsys_screen *sws = screen->sws;
  116.    struct pipe_fence_handle *fence = NULL;
  117.  
  118.    assert(!st->use_direct_map);
  119.  
  120.    if (transfer == SVGA3D_READ_HOST_VRAM) {
  121.       SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
  122.    }
  123.  
  124.    /* Ensure any pending operations on host surfaces are queued on the command
  125.     * buffer first.
  126.     */
  127.    svga_surfaces_flush( svga );
  128.  
  129.    if(!st->swbuf) {
  130.       /* Do the DMA transfer in a single go */
  131.  
  132.       svga_transfer_dma_band(svga, st, transfer,
  133.                              st->base.box.y, st->base.box.height, 0,
  134.                              flags);
  135.  
  136.       if(transfer == SVGA3D_READ_HOST_VRAM) {
  137.          svga_context_flush(svga, &fence);
  138.          sws->fence_finish(sws, fence, 0);
  139.          sws->fence_reference(sws, &fence, NULL);
  140.       }
  141.    }
  142.    else {
  143.       int y, h, srcy;
  144.       unsigned blockheight = util_format_get_blockheight(st->base.resource->format);
  145.       h = st->hw_nblocksy * blockheight;
  146.       srcy = 0;
  147.       for(y = 0; y < st->base.box.height; y += h) {
  148.          unsigned offset, length;
  149.          void *hw, *sw;
  150.  
  151.          if (y + h > st->base.box.height)
  152.             h = st->base.box.height - y;
  153.  
  154.          /* Transfer band must be aligned to pixel block boundaries */
  155.          assert(y % blockheight == 0);
  156.          assert(h % blockheight == 0);
  157.  
  158.          offset = y * st->base.stride / blockheight;
  159.          length = h * st->base.stride / blockheight;
  160.  
  161.          sw = (uint8_t *)st->swbuf + offset;
  162.  
  163.          if (transfer == SVGA3D_WRITE_HOST_VRAM) {
  164.             unsigned usage = PIPE_TRANSFER_WRITE;
  165.  
  166.             /* Wait for the previous DMAs to complete */
  167.             /* TODO: keep one DMA (at half the size) in the background */
  168.             if (y) {
  169.                svga_context_flush(svga, NULL);
  170.                usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
  171.             }
  172.  
  173.             hw = sws->buffer_map(sws, st->hwbuf, usage);
  174.             assert(hw);
  175.             if (hw) {
  176.                memcpy(hw, sw, length);
  177.                sws->buffer_unmap(sws, st->hwbuf);
  178.             }
  179.          }
  180.  
  181.          svga_transfer_dma_band(svga, st, transfer, y, h, srcy, flags);
  182.  
  183.          /*
  184.           * Prevent the texture contents to be discarded on the next band
  185.           * upload.
  186.           */
  187.  
  188.          flags.discard = FALSE;
  189.  
  190.          if(transfer == SVGA3D_READ_HOST_VRAM) {
  191.             svga_context_flush(svga, &fence);
  192.             sws->fence_finish(sws, fence, 0);
  193.  
  194.             hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
  195.             assert(hw);
  196.             if(hw) {
  197.                memcpy(sw, hw, length);
  198.                sws->buffer_unmap(sws, st->hwbuf);
  199.             }
  200.          }
  201.       }
  202.    }
  203. }
  204.  
  205.  
  206. static boolean
  207. svga_texture_get_handle(struct pipe_screen *screen,
  208.                                struct pipe_resource *texture,
  209.                                struct winsys_handle *whandle)
  210. {
  211.    struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
  212.    unsigned stride;
  213.  
  214.    assert(svga_texture(texture)->key.cachable == 0);
  215.    svga_texture(texture)->key.cachable = 0;
  216.    stride = util_format_get_nblocksx(texture->format, texture->width0) *
  217.             util_format_get_blocksize(texture->format);
  218.    return sws->surface_get_handle(sws, svga_texture(texture)->handle, stride, whandle);
  219. }
  220.  
  221.  
  222. static void
  223. svga_texture_destroy(struct pipe_screen *screen,
  224.                      struct pipe_resource *pt)
  225. {
  226.    struct svga_screen *ss = svga_screen(screen);
  227.    struct svga_texture *tex = svga_texture(pt);
  228.  
  229.    ss->texture_timestamp++;
  230.  
  231.    svga_sampler_view_reference(&tex->cached_view, NULL);
  232.  
  233.    /*
  234.      DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
  235.    */
  236.    SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
  237.    svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
  238.  
  239.    ss->total_resource_bytes -= tex->size;
  240.  
  241.    FREE(tex->rendered_to);
  242.    FREE(tex);
  243. }
  244.  
  245.  
  246. /**
  247.  * Determine if we need to read back a texture image before mapping it.
  248.  */
  249. static boolean
  250. need_tex_readback(struct pipe_transfer *transfer)
  251. {
  252.    struct svga_texture *t = svga_texture(transfer->resource);
  253.  
  254.    if (transfer->usage & PIPE_TRANSFER_READ)
  255.       return TRUE;
  256.  
  257.    if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
  258.        ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
  259.       unsigned face;
  260.  
  261.       if (transfer->resource->target == PIPE_TEXTURE_CUBE) {
  262.          assert(transfer->box.depth == 1);
  263.          face = transfer->box.z;
  264.       }
  265.       else {
  266.          face = 0;
  267.       }
  268.       if (svga_was_texture_rendered_to(t, face, transfer->level)) {
  269.          return TRUE;
  270.       }
  271.    }
  272.  
  273.    return FALSE;
  274. }
  275.  
  276.  
  277.  
  278. /* XXX: Still implementing this as if it was a screen function, but
  279.  * can now modify it to queue transfers on the context.
  280.  */
  281. static void *
  282. svga_texture_transfer_map(struct pipe_context *pipe,
  283.                           struct pipe_resource *texture,
  284.                           unsigned level,
  285.                           unsigned usage,
  286.                           const struct pipe_box *box,
  287.                           struct pipe_transfer **ptransfer)
  288. {
  289.    struct svga_context *svga = svga_context(pipe);
  290.    struct svga_screen *ss = svga_screen(pipe->screen);
  291.    struct svga_winsys_screen *sws = ss->sws;
  292.    struct svga_transfer *st;
  293.    unsigned nblocksx, nblocksy;
  294.    boolean use_direct_map = svga_have_gb_objects(svga) &&
  295.       !svga_have_gb_dma(svga);
  296.    unsigned d;
  297.  
  298.    /* We can't map texture storage directly unless we have GB objects */
  299.    if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
  300.       if (svga_have_gb_objects(svga))
  301.          use_direct_map = TRUE;
  302.       else
  303.          return NULL;
  304.    }
  305.  
  306.    st = CALLOC_STRUCT(svga_transfer);
  307.    if (!st)
  308.       return NULL;
  309.  
  310.    {
  311.       unsigned w, h;
  312.       if (use_direct_map) {
  313.          /* we'll directly access the guest-backed surface */
  314.          w = u_minify(texture->width0, level);
  315.          h = u_minify(texture->height0, level);
  316.          d = u_minify(texture->depth0, level);
  317.       }
  318.       else {
  319.          /* we'll put the data into a tightly packed buffer */
  320.          w = box->width;
  321.          h = box->height;
  322.          d = box->depth;
  323.       }
  324.       nblocksx = util_format_get_nblocksx(texture->format, w);
  325.       nblocksy = util_format_get_nblocksy(texture->format, h);
  326.    }
  327.  
  328.    pipe_resource_reference(&st->base.resource, texture);
  329.    st->base.level = level;
  330.    st->base.usage = usage;
  331.    st->base.box = *box;
  332.    st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
  333.    st->base.layer_stride = st->base.stride * nblocksy;
  334.  
  335.    if (!use_direct_map) {
  336.       /* Use a DMA buffer */
  337.       st->hw_nblocksy = nblocksy;
  338.  
  339.       st->hwbuf = svga_winsys_buffer_create(svga,
  340.                                             1,
  341.                                             0,
  342.                                             st->hw_nblocksy * st->base.stride * d);
  343.       while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
  344.          st->hwbuf = svga_winsys_buffer_create(svga,
  345.                                                1,
  346.                                                0,
  347.                                                st->hw_nblocksy * st->base.stride * d);
  348.       }
  349.  
  350.       if (!st->hwbuf) {
  351.          FREE(st);
  352.          return NULL;
  353.       }
  354.  
  355.       if(st->hw_nblocksy < nblocksy) {
  356.          /* We couldn't allocate a hardware buffer big enough for the transfer,
  357.           * so allocate regular malloc memory instead */
  358.          if (0) {
  359.             debug_printf("%s: failed to allocate %u KB of DMA, "
  360.                          "splitting into %u x %u KB DMA transfers\n",
  361.                          __FUNCTION__,
  362.                          (nblocksy*st->base.stride + 1023)/1024,
  363.                          (nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy,
  364.                          (st->hw_nblocksy*st->base.stride + 1023)/1024);
  365.          }
  366.  
  367.          st->swbuf = MALLOC(nblocksy * st->base.stride * d);
  368.          if (!st->swbuf) {
  369.             sws->buffer_destroy(sws, st->hwbuf);
  370.             FREE(st);
  371.             return NULL;
  372.          }
  373.       }
  374.  
  375.       if (usage & PIPE_TRANSFER_READ) {
  376.          SVGA3dSurfaceDMAFlags flags;
  377.          memset(&flags, 0, sizeof flags);
  378.          svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
  379.       }
  380.    } else {
  381.       struct pipe_transfer *transfer = &st->base;
  382.       struct svga_texture *tex = svga_texture(transfer->resource);
  383.       struct svga_winsys_surface *surf = tex->handle;
  384.       unsigned face;
  385.  
  386.       assert(surf);
  387.  
  388.       if (tex->b.b.target == PIPE_TEXTURE_CUBE) {
  389.          face = transfer->box.z;
  390.       } else {
  391.          face = 0;
  392.       }
  393.  
  394.       if (need_tex_readback(transfer)) {
  395.          SVGA3dBox box;
  396.          enum pipe_error ret;
  397.  
  398.          box.x = transfer->box.x;
  399.          box.y = transfer->box.y;
  400.          box.w = transfer->box.width;
  401.          box.h = transfer->box.height;
  402.          box.d = transfer->box.depth;
  403.          if (tex->b.b.target == PIPE_TEXTURE_CUBE) {
  404.             box.z = 0;
  405.          }
  406.          else {
  407.             box.z = transfer->box.z;
  408.          }
  409.  
  410.          (void) box;  /* not used at this time */
  411.  
  412.          svga_surfaces_flush(svga);
  413.  
  414.          ret = SVGA3D_ReadbackGBImage(svga->swc, surf, face, transfer->level);
  415.  
  416.          if (ret != PIPE_OK) {
  417.             svga_context_flush(svga, NULL);
  418.             ret = SVGA3D_ReadbackGBImage(svga->swc, surf, face, transfer->level);
  419.             assert(ret == PIPE_OK);
  420.          }
  421.  
  422.          svga_context_flush(svga, NULL);
  423.  
  424.          /*
  425.           * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
  426.           * we could potentially clear the flag for all faces/layers/mips.
  427.           */
  428.          svga_clear_texture_rendered_to(tex, face, transfer->level);
  429.       }
  430.       else {
  431.          assert(transfer->usage & PIPE_TRANSFER_WRITE);
  432.          if ((transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
  433.             svga_surfaces_flush(svga);
  434.             if (!sws->surface_is_flushed(sws, surf))
  435.                svga_context_flush(svga, NULL);
  436.          }
  437.       }
  438.    }
  439.  
  440.    st->use_direct_map = use_direct_map;
  441.  
  442.    *ptransfer = &st->base;
  443.  
  444.    /*
  445.     * Begin mapping code
  446.     */
  447.    if (st->swbuf) {
  448.       return st->swbuf;
  449.    }
  450.    else if (!st->use_direct_map) {
  451.       return sws->buffer_map(sws, st->hwbuf, usage);
  452.    }
  453.    else {
  454.       struct svga_screen *screen = svga_screen(svga->pipe.screen);
  455.       SVGA3dSurfaceFormat format;
  456.       SVGA3dSize baseLevelSize;
  457.       struct svga_texture *tex = svga_texture(texture);
  458.       struct svga_winsys_surface *surf = tex->handle;
  459.       uint8_t *map;
  460.       boolean retry;
  461.       unsigned face, offset, mip_width, mip_height;
  462.       unsigned xoffset = box->x;
  463.       unsigned yoffset = box->y;
  464.       unsigned zoffset = box->z;
  465.  
  466.       map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
  467.       if (map == NULL && retry) {
  468.          /*
  469.           * At this point, the svga_surfaces_flush() should already have
  470.           * called in svga_texture_get_transfer().
  471.           */
  472.          svga_context_flush(svga, NULL);
  473.          map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
  474.       }
  475.  
  476.       /*
  477.        * Make sure we return NULL if the map fails
  478.        */
  479.       if (map == NULL) {
  480.          FREE(st);
  481.          return map;
  482.       }
  483.  
  484.       /**
  485.        * Compute the offset to the specific texture slice in the buffer.
  486.        */
  487.       if (tex->b.b.target == PIPE_TEXTURE_CUBE) {
  488.          face = zoffset;
  489.          zoffset = 0;
  490.       } else {
  491.          face = 0;
  492.       }
  493.  
  494.       format = svga_translate_format(screen, tex->b.b.format, 0);
  495.       baseLevelSize.width = tex->b.b.width0;
  496.       baseLevelSize.height = tex->b.b.height0;
  497.       baseLevelSize.depth = tex->b.b.depth0;
  498.  
  499.       offset = svga3dsurface_get_image_offset(format, baseLevelSize,
  500.                                               tex->b.b.last_level + 1, /* numMips */
  501.                                               face, level);
  502.       if (level > 0) {
  503.          assert(offset > 0);
  504.       }
  505.  
  506.       mip_width = u_minify(tex->b.b.width0, level);
  507.       mip_height = u_minify(tex->b.b.height0, level);
  508.  
  509.       offset += svga3dsurface_get_pixel_offset(format, mip_width, mip_height,
  510.                                                xoffset, yoffset, zoffset);
  511.  
  512.       return (void *) (map + offset);
  513.    }
  514. }
  515.  
  516.  
  517. /**
  518.  * Unmap a GB texture surface.
  519.  */
  520. static void
  521. svga_texture_surface_unmap(struct svga_context *svga,
  522.                            struct pipe_transfer *transfer)
  523. {
  524.    struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
  525.    struct svga_winsys_context *swc = svga->swc;
  526.    boolean rebind;
  527.  
  528.    assert(surf);
  529.  
  530.    swc->surface_unmap(swc, surf, &rebind);
  531.    if (rebind) {
  532.       enum pipe_error ret;
  533.       ret = SVGA3D_BindGBSurface(swc, surf);
  534.       if (ret != PIPE_OK) {
  535.          /* flush and retry */
  536.          svga_context_flush(svga, NULL);
  537.          ret = SVGA3D_BindGBSurface(swc, surf);
  538.          assert(ret == PIPE_OK);
  539.       }
  540.    }
  541. }
  542.  
  543.  
  544. /* XXX: Still implementing this as if it was a screen function, but
  545.  * can now modify it to queue transfers on the context.
  546.  */
  547. static void
  548. svga_texture_transfer_unmap(struct pipe_context *pipe,
  549.                             struct pipe_transfer *transfer)
  550. {
  551.    struct svga_context *svga = svga_context(pipe);
  552.    struct svga_screen *ss = svga_screen(pipe->screen);
  553.    struct svga_winsys_screen *sws = ss->sws;
  554.    struct svga_transfer *st = svga_transfer(transfer);
  555.    struct svga_texture *tex = svga_texture(transfer->resource);
  556.  
  557.    if (!st->swbuf) {
  558.       if (st->use_direct_map) {
  559.          svga_texture_surface_unmap(svga, transfer);
  560.       }
  561.       else {
  562.          sws->buffer_unmap(sws, st->hwbuf);
  563.       }
  564.    }
  565.  
  566.    if (!st->use_direct_map && (st->base.usage & PIPE_TRANSFER_WRITE)) {
  567.       /* Use DMA to transfer texture data */
  568.       SVGA3dSurfaceDMAFlags flags;
  569.  
  570.       memset(&flags, 0, sizeof flags);
  571.       if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
  572.          flags.discard = TRUE;
  573.       }
  574.       if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
  575.          flags.unsynchronized = TRUE;
  576.       }
  577.  
  578.       svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
  579.    } else if (transfer->usage & PIPE_TRANSFER_WRITE) {
  580.       struct svga_winsys_surface *surf =
  581.          svga_texture(transfer->resource)->handle;
  582.       unsigned face;
  583.       SVGA3dBox box;
  584.       enum pipe_error ret;
  585.  
  586.       assert(svga_have_gb_objects(svga));
  587.  
  588.       /* update the effected region */
  589.       if (tex->b.b.target == PIPE_TEXTURE_CUBE) {
  590.          face = transfer->box.z;
  591.       } else {
  592.          face = 0;
  593.       }
  594.  
  595.       box.x = transfer->box.x;
  596.       box.y = transfer->box.y;
  597.       if (tex->b.b.target == PIPE_TEXTURE_CUBE) {
  598.          box.z = 0;
  599.       }
  600.       else {
  601.          box.z = transfer->box.z;
  602.       }
  603.       box.w = transfer->box.width;
  604.       box.h = transfer->box.height;
  605.       box.d = transfer->box.depth;
  606.  
  607.       if (0)
  608.          debug_printf("%s %d, %d, %d  %d x %d x %d\n",
  609.                       __FUNCTION__,
  610.                       box.x, box.y, box.z,
  611.                       box.w, box.h, box.d);
  612.  
  613.       ret = SVGA3D_UpdateGBImage(svga->swc, surf, &box, face, transfer->level);
  614.       if (ret != PIPE_OK) {
  615.          svga_context_flush(svga, NULL);
  616.          ret = SVGA3D_UpdateGBImage(svga->swc, surf, &box, face, transfer->level);
  617.          assert(ret == PIPE_OK);
  618.       }
  619.    }
  620.  
  621.    ss->texture_timestamp++;
  622.    svga_age_texture_view(tex, transfer->level);
  623.    if (transfer->resource->target == PIPE_TEXTURE_CUBE)
  624.       svga_define_texture_level(tex, transfer->box.z, transfer->level);
  625.    else
  626.       svga_define_texture_level(tex, 0, transfer->level);
  627.  
  628.    pipe_resource_reference(&st->base.resource, NULL);
  629.  
  630.    FREE(st->swbuf);
  631.    if (!st->use_direct_map) {
  632.       sws->buffer_destroy(sws, st->hwbuf);
  633.    }
  634.    FREE(st);
  635. }
  636.  
  637.  
  638. struct u_resource_vtbl svga_texture_vtbl =
  639. {
  640.    svga_texture_get_handle,           /* get_handle */
  641.    svga_texture_destroy,              /* resource_destroy */
  642.    svga_texture_transfer_map,         /* transfer_map */
  643.    u_default_transfer_flush_region,   /* transfer_flush_region */
  644.    svga_texture_transfer_unmap,       /* transfer_unmap */
  645.    u_default_transfer_inline_write    /* transfer_inline_write */
  646. };
  647.  
  648.  
  649. struct pipe_resource *
  650. svga_texture_create(struct pipe_screen *screen,
  651.                     const struct pipe_resource *template)
  652. {
  653.    struct svga_screen *svgascreen = svga_screen(screen);
  654.    struct svga_texture *tex = CALLOC_STRUCT(svga_texture);
  655.  
  656.    if (!tex)
  657.       goto error1;
  658.  
  659.    tex->b.b = *template;
  660.    tex->b.vtbl = &svga_texture_vtbl;
  661.    pipe_reference_init(&tex->b.b.reference, 1);
  662.    tex->b.b.screen = screen;
  663.  
  664.    assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
  665.    if(template->last_level >= SVGA_MAX_TEXTURE_LEVELS)
  666.       goto error2;
  667.    
  668.    tex->key.flags = 0;
  669.    tex->key.size.width = template->width0;
  670.    tex->key.size.height = template->height0;
  671.    tex->key.size.depth = template->depth0;
  672.  
  673.    if(template->target == PIPE_TEXTURE_CUBE) {
  674.       tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
  675.       tex->key.numFaces = 6;
  676.    }
  677.    else {
  678.       tex->key.numFaces = 1;
  679.    }
  680.  
  681.    if (template->target == PIPE_TEXTURE_3D) {
  682.       tex->key.flags |= SVGA3D_SURFACE_VOLUME;
  683.    }
  684.  
  685.    tex->key.cachable = 1;
  686.  
  687.    if (template->bind & PIPE_BIND_SAMPLER_VIEW)
  688.       tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
  689.  
  690.    if (template->bind & PIPE_BIND_DISPLAY_TARGET) {
  691.       tex->key.cachable = 0;
  692.    }
  693.  
  694.    if (template->bind & PIPE_BIND_SHARED) {
  695.       tex->key.cachable = 0;
  696.    }
  697.  
  698.    if (template->bind & (PIPE_BIND_SCANOUT |
  699.                          PIPE_BIND_CURSOR)) {
  700.       tex->key.flags |= SVGA3D_SURFACE_HINT_SCANOUT;
  701.       tex->key.cachable = 0;
  702.    }
  703.  
  704.    /*
  705.     * Note: Previously we never passed the
  706.     * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
  707.     * know beforehand whether a texture will be used as a rendertarget or not
  708.     * and it always requests PIPE_BIND_RENDER_TARGET, therefore
  709.     * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
  710.     *
  711.     * However, this was changed since other state trackers
  712.     * (XA for example) uses it accurately and certain device versions
  713.     * relies on it in certain situations to render correctly.
  714.     */
  715.    if((template->bind & PIPE_BIND_RENDER_TARGET) &&
  716.       !util_format_is_s3tc(template->format))
  717.       tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
  718.    
  719.    if(template->bind & PIPE_BIND_DEPTH_STENCIL)
  720.       tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
  721.    
  722.    tex->key.numMipLevels = template->last_level + 1;
  723.    
  724.    tex->key.format = svga_translate_format(svgascreen, template->format, template->bind);
  725.    if(tex->key.format == SVGA3D_FORMAT_INVALID)
  726.       goto error2;
  727.  
  728.    SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
  729.    tex->handle = svga_screen_surface_create(svgascreen, &tex->key);
  730.    if (!tex->handle)
  731.        goto error2;
  732.  
  733.    SVGA_DBG(DEBUG_DMA, "  --> got sid %p (texture)\n", tex->handle);
  734.  
  735.    debug_reference(&tex->b.b.reference,
  736.                    (debug_reference_descriptor)debug_describe_resource, 0);
  737.  
  738.    tex->size = util_resource_size(template);
  739.    svgascreen->total_resource_bytes += tex->size;
  740.  
  741.    tex->rendered_to = CALLOC(template->depth0 * template->array_size,
  742.                              sizeof(tex->rendered_to[0]));
  743.    if (!tex->rendered_to)
  744.       goto error2;
  745.  
  746.    return &tex->b.b;
  747.  
  748. error2:
  749.    FREE(tex->rendered_to);
  750.    FREE(tex);
  751. error1:
  752.    return NULL;
  753. }
  754.  
  755.  
  756. struct pipe_resource *
  757. svga_texture_from_handle(struct pipe_screen *screen,
  758.                          const struct pipe_resource *template,
  759.                          struct winsys_handle *whandle)
  760. {
  761.    struct svga_winsys_screen *sws = svga_winsys_screen(screen);
  762.    struct svga_winsys_surface *srf;
  763.    struct svga_texture *tex;
  764.    enum SVGA3dSurfaceFormat format = 0;
  765.    assert(screen);
  766.  
  767.    /* Only supports one type */
  768.    if ((template->target != PIPE_TEXTURE_2D &&
  769.        template->target != PIPE_TEXTURE_RECT) ||
  770.        template->last_level != 0 ||
  771.        template->depth0 != 1) {
  772.       return NULL;
  773.    }
  774.  
  775.    srf = sws->surface_from_handle(sws, whandle, &format);
  776.  
  777.    if (!srf)
  778.       return NULL;
  779.  
  780.    if (svga_translate_format(svga_screen(screen), template->format, template->bind) != format) {
  781.       unsigned f1 = svga_translate_format(svga_screen(screen), template->format, template->bind);
  782.       unsigned f2 = format;
  783.  
  784.       /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up */
  785.       if ( !( (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
  786.               (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
  787.               (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
  788.               (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
  789.          debug_printf("%s wrong format %u != %u\n", __FUNCTION__, f1, f2);
  790.          return NULL;
  791.       }
  792.    }
  793.  
  794.    tex = CALLOC_STRUCT(svga_texture);
  795.    if (!tex)
  796.       return NULL;
  797.  
  798.    tex->b.b = *template;
  799.    tex->b.vtbl = &svga_texture_vtbl;
  800.    pipe_reference_init(&tex->b.b.reference, 1);
  801.    tex->b.b.screen = screen;
  802.  
  803.    SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
  804.  
  805.    tex->key.cachable = 0;
  806.    tex->handle = srf;
  807.  
  808.    tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
  809.  
  810.    return &tex->b.b;
  811. }
  812.