Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**********************************************************
  2.  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person
  5.  * obtaining a copy of this software and associated documentation
  6.  * files (the "Software"), to deal in the Software without
  7.  * restriction, including without limitation the rights to use, copy,
  8.  * modify, merge, publish, distribute, sublicense, and/or sell copies
  9.  * of the Software, and to permit persons to whom the Software is
  10.  * furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice shall be
  13.  * included in all copies or substantial portions of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  18.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  19.  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  20.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21.  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  **********************************************************/
  25.  
  26. #include "svga_cmd.h"
  27.  
  28. #include "pipe/p_state.h"
  29. #include "pipe/p_defines.h"
  30. #include "util/u_inlines.h"
  31. #include "os/os_thread.h"
  32. #include "util/u_math.h"
  33. #include "util/u_memory.h"
  34. #include "util/u_resource.h"
  35.  
  36. #include "svga_context.h"
  37. #include "svga_screen.h"
  38. #include "svga_resource_buffer.h"
  39. #include "svga_resource_buffer_upload.h"
  40. #include "svga_winsys.h"
  41. #include "svga_debug.h"
  42.  
  43.  
  44. /**
  45.  * Vertex and index buffers need hardware backing.  Constant buffers
  46.  * do not.  No other types of buffers currently supported.
  47.  */
  48. static INLINE boolean
  49. svga_buffer_needs_hw_storage(unsigned usage)
  50. {
  51.    return usage & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER);
  52. }
  53.  
  54.  
  55. /**
  56.  * Create a buffer transfer.
  57.  *
  58.  * Unlike texture DMAs (which are written immediately to the command buffer and
  59.  * therefore inherently serialized with other context operations), for buffers
  60.  * we try to coalesce multiple range mappings (i.e, multiple calls to this
  61.  * function) into a single DMA command, for better efficiency in command
  62.  * processing.  This means we need to exercise extra care here to ensure that
  63.  * the end result is exactly the same as if one DMA was used for every mapped
  64.  * range.
  65.  */
  66. static void *
  67. svga_buffer_transfer_map(struct pipe_context *pipe,
  68.                          struct pipe_resource *resource,
  69.                          unsigned level,
  70.                          unsigned usage,
  71.                          const struct pipe_box *box,
  72.                          struct pipe_transfer **ptransfer)
  73. {
  74.    struct svga_context *svga = svga_context(pipe);
  75.    struct svga_screen *ss = svga_screen(pipe->screen);
  76.    struct svga_buffer *sbuf = svga_buffer(resource);
  77.    struct pipe_transfer *transfer;
  78.    uint8_t *map;
  79.  
  80.    transfer = CALLOC_STRUCT(pipe_transfer);
  81.    if (transfer == NULL) {
  82.       return NULL;
  83.    }
  84.  
  85.    transfer->resource = resource;
  86.    transfer->level = level;
  87.    transfer->usage = usage;
  88.    transfer->box = *box;
  89.  
  90.    if (usage & PIPE_TRANSFER_WRITE) {
  91.       if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
  92.          /*
  93.           * Flush any pending primitives, finish writing any pending DMA
  94.           * commands, and tell the host to discard the buffer contents on
  95.           * the next DMA operation.
  96.           */
  97.  
  98.          svga_hwtnl_flush_buffer(svga, resource);
  99.  
  100.          if (sbuf->dma.pending) {
  101.             svga_buffer_upload_flush(svga, sbuf);
  102.  
  103.             /*
  104.              * Instead of flushing the context command buffer, simply discard
  105.              * the current hwbuf, and start a new one.
  106.              * With GB objects, the map operation takes care of this
  107.              * if passed the PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE flag,
  108.              * and the old backing store is busy.
  109.              */
  110.  
  111.             if (!svga_have_gb_objects(svga))
  112.                svga_buffer_destroy_hw_storage(ss, sbuf);
  113.          }
  114.  
  115.          sbuf->map.num_ranges = 0;
  116.          sbuf->dma.flags.discard = TRUE;
  117.       }
  118.  
  119.       if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
  120.          if (!sbuf->map.num_ranges) {
  121.             /*
  122.              * No pending ranges to upload so far, so we can tell the host to
  123.              * not synchronize on the next DMA command.
  124.              */
  125.  
  126.             sbuf->dma.flags.unsynchronized = TRUE;
  127.          }
  128.       } else {
  129.          /*
  130.           * Synchronizing, so flush any pending primitives, finish writing any
  131.           * pending DMA command, and ensure the next DMA will be done in order.
  132.           */
  133.  
  134.          svga_hwtnl_flush_buffer(svga, resource);
  135.  
  136.          if (sbuf->dma.pending) {
  137.             svga_buffer_upload_flush(svga, sbuf);
  138.  
  139.             if (svga_buffer_has_hw_storage(sbuf)) {
  140.                /*
  141.                 * We have a pending DMA upload from a hardware buffer, therefore
  142.                 * we need to ensure that the host finishes processing that DMA
  143.                 * command before the state tracker can start overwriting the
  144.                 * hardware buffer.
  145.                 *
  146.                 * XXX: This could be avoided by tying the hardware buffer to
  147.                 * the transfer (just as done with textures), which would allow
  148.                 * overlapping DMAs commands to be queued on the same context
  149.                 * buffer. However, due to the likelihood of software vertex
  150.                 * processing, it is more convenient to hold on to the hardware
  151.                 * buffer, allowing to quickly access the contents from the CPU
  152.                 * without having to do a DMA download from the host.
  153.                 */
  154.  
  155.                if (usage & PIPE_TRANSFER_DONTBLOCK) {
  156.                   /*
  157.                    * Flushing the command buffer here will most likely cause
  158.                    * the map of the hwbuf below to block, so preemptively
  159.                    * return NULL here if DONTBLOCK is set to prevent unnecessary
  160.                    * command buffer flushes.
  161.                    */
  162.  
  163.                   FREE(transfer);
  164.                   return NULL;
  165.                }
  166.  
  167.                svga_context_flush(svga, NULL);
  168.             }
  169.          }
  170.  
  171.          sbuf->dma.flags.unsynchronized = FALSE;
  172.       }
  173.    }
  174.  
  175.    if (!sbuf->swbuf && !svga_buffer_has_hw_storage(sbuf)) {
  176.       if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
  177.          /*
  178.           * We can't create a hardware buffer big enough, so create a malloc
  179.           * buffer instead.
  180.           */
  181.          if (0) {
  182.             debug_printf("%s: failed to allocate %u KB of DMA, "
  183.                          "splitting DMA transfers\n",
  184.                          __FUNCTION__,
  185.                          (sbuf->b.b.width0 + 1023)/1024);
  186.          }
  187.  
  188.          sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
  189.          if (!sbuf->swbuf) {
  190.             FREE(transfer);
  191.             return NULL;
  192.          }
  193.       }
  194.    }
  195.  
  196.    if (sbuf->swbuf) {
  197.       /* User/malloc buffer */
  198.       map = sbuf->swbuf;
  199.    }
  200.    else if (svga_buffer_has_hw_storage(sbuf)) {
  201.       boolean retry;
  202.  
  203.       map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
  204.       if (map == NULL && retry) {
  205.          /*
  206.           * At this point, svga_buffer_get_transfer() has already
  207.           * hit the DISCARD_WHOLE_RESOURCE path and flushed HWTNL
  208.           * for this buffer.
  209.           */
  210.          svga_context_flush(svga, NULL);
  211.          map = svga_buffer_hw_storage_map(svga, sbuf, transfer->usage, &retry);
  212.       }
  213.    }
  214.    else {
  215.       map = NULL;
  216.    }
  217.  
  218.    if (map) {
  219.       ++sbuf->map.count;
  220.       map += transfer->box.x;
  221.       *ptransfer = transfer;
  222.    } else {
  223.       FREE(transfer);
  224.    }
  225.    
  226.    return map;
  227. }
  228.  
  229.  
  230. static void
  231. svga_buffer_transfer_flush_region( struct pipe_context *pipe,
  232.                                    struct pipe_transfer *transfer,
  233.                                    const struct pipe_box *box)
  234. {
  235.    struct svga_screen *ss = svga_screen(pipe->screen);
  236.    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
  237.  
  238.    unsigned offset = transfer->box.x + box->x;
  239.    unsigned length = box->width;
  240.  
  241.    assert(transfer->usage & PIPE_TRANSFER_WRITE);
  242.    assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
  243.  
  244.    pipe_mutex_lock(ss->swc_mutex);
  245.    svga_buffer_add_range(sbuf, offset, offset + length);
  246.    pipe_mutex_unlock(ss->swc_mutex);
  247. }
  248.  
  249.  
  250. static void
  251. svga_buffer_transfer_unmap( struct pipe_context *pipe,
  252.                             struct pipe_transfer *transfer )
  253. {
  254.    struct svga_screen *ss = svga_screen(pipe->screen);
  255.    struct svga_context *svga = svga_context(pipe);
  256.    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
  257.    
  258.    pipe_mutex_lock(ss->swc_mutex);
  259.    
  260.    assert(sbuf->map.count);
  261.    if (sbuf->map.count) {
  262.       --sbuf->map.count;
  263.    }
  264.  
  265.    if (svga_buffer_has_hw_storage(sbuf)) {
  266.       svga_buffer_hw_storage_unmap(svga, sbuf);
  267.    }
  268.  
  269.    if (transfer->usage & PIPE_TRANSFER_WRITE) {
  270.       if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
  271.          /*
  272.           * Mapped range not flushed explicitly, so flush the whole buffer,
  273.           * and tell the host to discard the contents when processing the DMA
  274.           * command.
  275.           */
  276.  
  277.          SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
  278.    
  279.          sbuf->dma.flags.discard = TRUE;
  280.  
  281.          svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
  282.       }
  283.    }
  284.  
  285.    pipe_mutex_unlock(ss->swc_mutex);
  286.    FREE(transfer);
  287. }
  288.  
  289.  
  290. static void
  291. svga_buffer_destroy( struct pipe_screen *screen,
  292.                      struct pipe_resource *buf )
  293. {
  294.    struct svga_screen *ss = svga_screen(screen);
  295.    struct svga_buffer *sbuf = svga_buffer( buf );
  296.  
  297.    assert(!p_atomic_read(&buf->reference.count));
  298.    
  299.    assert(!sbuf->dma.pending);
  300.  
  301.    if(sbuf->handle)
  302.       svga_buffer_destroy_host_surface(ss, sbuf);
  303.    
  304.    if(sbuf->uploaded.buffer)
  305.       pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
  306.  
  307.    if(sbuf->hwbuf)
  308.       svga_buffer_destroy_hw_storage(ss, sbuf);
  309.    
  310.    if(sbuf->swbuf && !sbuf->user)
  311.       align_free(sbuf->swbuf);
  312.    
  313.    ss->total_resource_bytes -= sbuf->size;
  314.  
  315.    FREE(sbuf);
  316. }
  317.  
  318.  
  319. struct u_resource_vtbl svga_buffer_vtbl =
  320. {
  321.    u_default_resource_get_handle,      /* get_handle */
  322.    svga_buffer_destroy,              /* resource_destroy */
  323.    svga_buffer_transfer_map,         /* transfer_map */
  324.    svga_buffer_transfer_flush_region,  /* transfer_flush_region */
  325.    svga_buffer_transfer_unmap,       /* transfer_unmap */
  326.    u_default_transfer_inline_write   /* transfer_inline_write */
  327. };
  328.  
  329.  
  330.  
  331. struct pipe_resource *
  332. svga_buffer_create(struct pipe_screen *screen,
  333.                    const struct pipe_resource *template)
  334. {
  335.    struct svga_screen *ss = svga_screen(screen);
  336.    struct svga_buffer *sbuf;
  337.    
  338.    sbuf = CALLOC_STRUCT(svga_buffer);
  339.    if(!sbuf)
  340.       goto error1;
  341.    
  342.    sbuf->b.b = *template;
  343.    sbuf->b.vtbl = &svga_buffer_vtbl;
  344.    pipe_reference_init(&sbuf->b.b.reference, 1);
  345.    sbuf->b.b.screen = screen;
  346.  
  347.    if(svga_buffer_needs_hw_storage(template->bind)) {
  348.       if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
  349.          goto error2;
  350.    }
  351.    else {
  352.       sbuf->swbuf = align_malloc(template->width0, 64);
  353.       if(!sbuf->swbuf)
  354.          goto error2;
  355.    }
  356.      
  357.    debug_reference(&sbuf->b.b.reference,
  358.                    (debug_reference_descriptor)debug_describe_resource, 0);
  359.  
  360.    sbuf->size = util_resource_size(template);
  361.    ss->total_resource_bytes += sbuf->size;
  362.  
  363.    return &sbuf->b.b;
  364.  
  365. error2:
  366.    FREE(sbuf);
  367. error1:
  368.    return NULL;
  369. }
  370.  
  371. struct pipe_resource *
  372. svga_user_buffer_create(struct pipe_screen *screen,
  373.                         void *ptr,
  374.                         unsigned bytes,
  375.                         unsigned bind)
  376. {
  377.    struct svga_buffer *sbuf;
  378.    
  379.    sbuf = CALLOC_STRUCT(svga_buffer);
  380.    if(!sbuf)
  381.       goto no_sbuf;
  382.      
  383.    pipe_reference_init(&sbuf->b.b.reference, 1);
  384.    sbuf->b.vtbl = &svga_buffer_vtbl;
  385.    sbuf->b.b.screen = screen;
  386.    sbuf->b.b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
  387.    sbuf->b.b.usage = PIPE_USAGE_IMMUTABLE;
  388.    sbuf->b.b.bind = bind;
  389.    sbuf->b.b.width0 = bytes;
  390.    sbuf->b.b.height0 = 1;
  391.    sbuf->b.b.depth0 = 1;
  392.    sbuf->b.b.array_size = 1;
  393.  
  394.    sbuf->swbuf = ptr;
  395.    sbuf->user = TRUE;
  396.  
  397.    debug_reference(&sbuf->b.b.reference,
  398.                    (debug_reference_descriptor)debug_describe_resource, 0);
  399.    
  400.    return &sbuf->b.b;
  401.  
  402. no_sbuf:
  403.    return NULL;
  404. }
  405.  
  406.  
  407.  
  408.