Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /**********************************************************
  2.  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person
  5.  * obtaining a copy of this software and associated documentation
  6.  * files (the "Software"), to deal in the Software without
  7.  * restriction, including without limitation the rights to use, copy,
  8.  * modify, merge, publish, distribute, sublicense, and/or sell copies
  9.  * of the Software, and to permit persons to whom the Software is
  10.  * furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice shall be
  13.  * included in all copies or substantial portions of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  18.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  19.  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  20.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21.  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  **********************************************************/
  25.  
  26. #include "svga_cmd.h"
  27.  
  28. #include "pipe/p_state.h"
  29. #include "pipe/p_defines.h"
  30. #include "util/u_inlines.h"
  31. #include "os/os_thread.h"
  32. #include "util/u_math.h"
  33. #include "util/u_memory.h"
  34. #include "util/u_resource.h"
  35.  
  36. #include "svga_context.h"
  37. #include "svga_screen.h"
  38. #include "svga_resource_buffer.h"
  39. #include "svga_resource_buffer_upload.h"
  40. #include "svga_winsys.h"
  41. #include "svga_debug.h"
  42.  
  43.  
  44. /**
  45.  * Vertex and index buffers need hardware backing.  Constant buffers
  46.  * do not.  No other types of buffers currently supported.
  47.  */
  48. static INLINE boolean
  49. svga_buffer_needs_hw_storage(unsigned usage)
  50. {
  51.    return usage & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER);
  52. }
  53.  
  54.  
  55. /**
  56.  * Create a buffer transfer.
  57.  *
  58.  * Unlike texture DMAs (which are written immediately to the command buffer and
  59.  * therefore inherently serialized with other context operations), for buffers
  60.  * we try to coalesce multiple range mappings (i.e, multiple calls to this
  61.  * function) into a single DMA command, for better efficiency in command
  62.  * processing.  This means we need to exercise extra care here to ensure that
  63.  * the end result is exactly the same as if one DMA was used for every mapped
  64.  * range.
  65.  */
  66. static void *
  67. svga_buffer_transfer_map(struct pipe_context *pipe,
  68.                          struct pipe_resource *resource,
  69.                          unsigned level,
  70.                          unsigned usage,
  71.                          const struct pipe_box *box,
  72.                          struct pipe_transfer **ptransfer)
  73. {
  74.    struct svga_context *svga = svga_context(pipe);
  75.    struct svga_screen *ss = svga_screen(pipe->screen);
  76.    struct svga_buffer *sbuf = svga_buffer(resource);
  77.    struct pipe_transfer *transfer;
  78.    uint8_t *map;
  79.  
  80.    transfer = CALLOC_STRUCT(pipe_transfer);
  81.    if (transfer == NULL) {
  82.       return NULL;
  83.    }
  84.  
  85.    transfer->resource = resource;
  86.    transfer->level = level;
  87.    transfer->usage = usage;
  88.    transfer->box = *box;
  89.  
  90.    if (usage & PIPE_TRANSFER_WRITE) {
  91.       if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
  92.          /*
  93.           * Flush any pending primitives, finish writing any pending DMA
  94.           * commands, and tell the host to discard the buffer contents on
  95.           * the next DMA operation.
  96.           */
  97.  
  98.          svga_hwtnl_flush_buffer(svga, resource);
  99.  
  100.          if (sbuf->dma.pending) {
  101.             svga_buffer_upload_flush(svga, sbuf);
  102.  
  103.             /*
  104.              * Instead of flushing the context command buffer, simply discard
  105.              * the current hwbuf, and start a new one.
  106.              */
  107.  
  108.             svga_buffer_destroy_hw_storage(ss, sbuf);
  109.          }
  110.  
  111.          sbuf->map.num_ranges = 0;
  112.          sbuf->dma.flags.discard = TRUE;
  113.       }
  114.  
  115.       if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
  116.          if (!sbuf->map.num_ranges) {
  117.             /*
  118.              * No pending ranges to upload so far, so we can tell the host to
  119.              * not synchronize on the next DMA command.
  120.              */
  121.  
  122.             sbuf->dma.flags.unsynchronized = TRUE;
  123.          }
  124.       } else {
  125.          /*
  126.           * Synchronizing, so flush any pending primitives, finish writing any
  127.           * pending DMA command, and ensure the next DMA will be done in order.
  128.           */
  129.  
  130.          svga_hwtnl_flush_buffer(svga, resource);
  131.  
  132.          if (sbuf->dma.pending) {
  133.             svga_buffer_upload_flush(svga, sbuf);
  134.  
  135.             if (sbuf->hwbuf) {
  136.                /*
  137.                 * We have a pending DMA upload from a hardware buffer, therefore
  138.                 * we need to ensure that the host finishes processing that DMA
  139.                 * command before the state tracker can start overwriting the
  140.                 * hardware buffer.
  141.                 *
  142.                 * XXX: This could be avoided by tying the hardware buffer to
  143.                 * the transfer (just as done with textures), which would allow
  144.                 * overlapping DMAs commands to be queued on the same context
  145.                 * buffer. However, due to the likelihood of software vertex
  146.                 * processing, it is more convenient to hold on to the hardware
  147.                 * buffer, allowing to quickly access the contents from the CPU
  148.                 * without having to do a DMA download from the host.
  149.                 */
  150.  
  151.                if (usage & PIPE_TRANSFER_DONTBLOCK) {
  152.                   /*
  153.                    * Flushing the command buffer here will most likely cause
  154.                    * the map of the hwbuf below to block, so preemptively
  155.                    * return NULL here if DONTBLOCK is set to prevent unnecessary
  156.                    * command buffer flushes.
  157.                    */
  158.  
  159.                   FREE(transfer);
  160.                   return NULL;
  161.                }
  162.  
  163.                svga_context_flush(svga, NULL);
  164.             }
  165.          }
  166.  
  167.          sbuf->dma.flags.unsynchronized = FALSE;
  168.       }
  169.    }
  170.  
  171.    if (!sbuf->swbuf && !sbuf->hwbuf) {
  172.       if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
  173.          /*
  174.           * We can't create a hardware buffer big enough, so create a malloc
  175.           * buffer instead.
  176.           */
  177.          if (0) {
  178.             debug_printf("%s: failed to allocate %u KB of DMA, "
  179.                          "splitting DMA transfers\n",
  180.                          __FUNCTION__,
  181.                          (sbuf->b.b.width0 + 1023)/1024);
  182.          }
  183.  
  184.          sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
  185.          if (!sbuf->swbuf) {
  186.             FREE(transfer);
  187.             return NULL;
  188.          }
  189.       }
  190.    }
  191.  
  192.    if (sbuf->swbuf) {
  193.       /* User/malloc buffer */
  194.       map = sbuf->swbuf;
  195.    }
  196.    else if (sbuf->hwbuf) {
  197.       struct svga_screen *ss = svga_screen(pipe->screen);
  198.       struct svga_winsys_screen *sws = ss->sws;
  199.  
  200.       map = sws->buffer_map(sws, sbuf->hwbuf, transfer->usage);
  201.    }
  202.    else {
  203.       map = NULL;
  204.    }
  205.  
  206.    if (map) {
  207.       ++sbuf->map.count;
  208.       map += transfer->box.x;
  209.       *ptransfer = transfer;
  210.    } else {
  211.       FREE(transfer);
  212.    }
  213.    
  214.    return map;
  215. }
  216.  
  217.  
  218. static void
  219. svga_buffer_transfer_flush_region( struct pipe_context *pipe,
  220.                                    struct pipe_transfer *transfer,
  221.                                    const struct pipe_box *box)
  222. {
  223.    struct svga_screen *ss = svga_screen(pipe->screen);
  224.    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
  225.  
  226.    unsigned offset = transfer->box.x + box->x;
  227.    unsigned length = box->width;
  228.  
  229.    assert(transfer->usage & PIPE_TRANSFER_WRITE);
  230.    assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
  231.  
  232.    pipe_mutex_lock(ss->swc_mutex);
  233.    svga_buffer_add_range(sbuf, offset, offset + length);
  234.    pipe_mutex_unlock(ss->swc_mutex);
  235. }
  236.  
  237.  
  238. static void
  239. svga_buffer_transfer_unmap( struct pipe_context *pipe,
  240.                             struct pipe_transfer *transfer )
  241. {
  242.    struct svga_screen *ss = svga_screen(pipe->screen);
  243.    struct svga_winsys_screen *sws = ss->sws;
  244.    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
  245.    
  246.    pipe_mutex_lock(ss->swc_mutex);
  247.    
  248.    assert(sbuf->map.count);
  249.    if (sbuf->map.count) {
  250.       --sbuf->map.count;
  251.    }
  252.  
  253.    if (sbuf->hwbuf) {
  254.       sws->buffer_unmap(sws, sbuf->hwbuf);
  255.    }
  256.  
  257.    if (transfer->usage & PIPE_TRANSFER_WRITE) {
  258.       if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
  259.          /*
  260.           * Mapped range not flushed explicitly, so flush the whole buffer,
  261.           * and tell the host to discard the contents when processing the DMA
  262.           * command.
  263.           */
  264.  
  265.          SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
  266.    
  267.          sbuf->dma.flags.discard = TRUE;
  268.  
  269.          svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
  270.       }
  271.    }
  272.  
  273.    pipe_mutex_unlock(ss->swc_mutex);
  274.    FREE(transfer);
  275. }
  276.  
  277.  
  278. static void
  279. svga_buffer_destroy( struct pipe_screen *screen,
  280.                      struct pipe_resource *buf )
  281. {
  282.    struct svga_screen *ss = svga_screen(screen);
  283.    struct svga_buffer *sbuf = svga_buffer( buf );
  284.  
  285.    assert(!p_atomic_read(&buf->reference.count));
  286.    
  287.    assert(!sbuf->dma.pending);
  288.  
  289.    if(sbuf->handle)
  290.       svga_buffer_destroy_host_surface(ss, sbuf);
  291.    
  292.    if(sbuf->uploaded.buffer)
  293.       pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
  294.  
  295.    if(sbuf->hwbuf)
  296.       svga_buffer_destroy_hw_storage(ss, sbuf);
  297.    
  298.    if(sbuf->swbuf && !sbuf->user)
  299.       align_free(sbuf->swbuf);
  300.    
  301.    ss->total_resource_bytes -= sbuf->size;
  302.  
  303.    FREE(sbuf);
  304. }
  305.  
  306.  
  307. struct u_resource_vtbl svga_buffer_vtbl =
  308. {
  309.    u_default_resource_get_handle,      /* get_handle */
  310.    svga_buffer_destroy,              /* resource_destroy */
  311.    svga_buffer_transfer_map,         /* transfer_map */
  312.    svga_buffer_transfer_flush_region,  /* transfer_flush_region */
  313.    svga_buffer_transfer_unmap,       /* transfer_unmap */
  314.    u_default_transfer_inline_write   /* transfer_inline_write */
  315. };
  316.  
  317.  
  318.  
  319. struct pipe_resource *
  320. svga_buffer_create(struct pipe_screen *screen,
  321.                    const struct pipe_resource *template)
  322. {
  323.    struct svga_screen *ss = svga_screen(screen);
  324.    struct svga_buffer *sbuf;
  325.    
  326.    sbuf = CALLOC_STRUCT(svga_buffer);
  327.    if(!sbuf)
  328.       goto error1;
  329.    
  330.    sbuf->b.b = *template;
  331.    sbuf->b.vtbl = &svga_buffer_vtbl;
  332.    pipe_reference_init(&sbuf->b.b.reference, 1);
  333.    sbuf->b.b.screen = screen;
  334.  
  335.    if(svga_buffer_needs_hw_storage(template->bind)) {
  336.       if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
  337.          goto error2;
  338.    }
  339.    else {
  340.       sbuf->swbuf = align_malloc(template->width0, 64);
  341.       if(!sbuf->swbuf)
  342.          goto error2;
  343.    }
  344.      
  345.    debug_reference(&sbuf->b.b.reference,
  346.                    (debug_reference_descriptor)debug_describe_resource, 0);
  347.  
  348.    sbuf->size = util_resource_size(template);
  349.    ss->total_resource_bytes += sbuf->size;
  350.  
  351.    return &sbuf->b.b;
  352.  
  353. error2:
  354.    FREE(sbuf);
  355. error1:
  356.    return NULL;
  357. }
  358.  
  359. struct pipe_resource *
  360. svga_user_buffer_create(struct pipe_screen *screen,
  361.                         void *ptr,
  362.                         unsigned bytes,
  363.                         unsigned bind)
  364. {
  365.    struct svga_buffer *sbuf;
  366.    
  367.    sbuf = CALLOC_STRUCT(svga_buffer);
  368.    if(!sbuf)
  369.       goto no_sbuf;
  370.      
  371.    pipe_reference_init(&sbuf->b.b.reference, 1);
  372.    sbuf->b.vtbl = &svga_buffer_vtbl;
  373.    sbuf->b.b.screen = screen;
  374.    sbuf->b.b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
  375.    sbuf->b.b.usage = PIPE_USAGE_IMMUTABLE;
  376.    sbuf->b.b.bind = bind;
  377.    sbuf->b.b.width0 = bytes;
  378.    sbuf->b.b.height0 = 1;
  379.    sbuf->b.b.depth0 = 1;
  380.    sbuf->b.b.array_size = 1;
  381.  
  382.    sbuf->swbuf = ptr;
  383.    sbuf->user = TRUE;
  384.  
  385.    debug_reference(&sbuf->b.b.reference,
  386.                    (debug_reference_descriptor)debug_describe_resource, 0);
  387.    
  388.    return &sbuf->b.b;
  389.  
  390. no_sbuf:
  391.    return NULL;
  392. }
  393.  
  394.  
  395.  
  396.