Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /**********************************************************
  2.  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person
  5.  * obtaining a copy of this software and associated documentation
  6.  * files (the "Software"), to deal in the Software without
  7.  * restriction, including without limitation the rights to use, copy,
  8.  * modify, merge, publish, distribute, sublicense, and/or sell copies
  9.  * of the Software, and to permit persons to whom the Software is
  10.  * furnished to do so, subject to the following conditions:
  11.  *
  12.  * The above copyright notice and this permission notice shall be
  13.  * included in all copies or substantial portions of the Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  16.  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  17.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  18.  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  19.  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  20.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  21.  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22.  * SOFTWARE.
  23.  *
  24.  **********************************************************/
  25.  
  26.  
  27. #include "os/os_thread.h"
  28. #include "pipe/p_state.h"
  29. #include "pipe/p_defines.h"
  30. #include "util/u_inlines.h"
  31. #include "util/u_math.h"
  32. #include "util/u_memory.h"
  33.  
  34. #include "svga_cmd.h"
  35. #include "svga_context.h"
  36. #include "svga_debug.h"
  37. #include "svga_resource_buffer.h"
  38. #include "svga_resource_buffer_upload.h"
  39. #include "svga_screen.h"
  40. #include "svga_winsys.h"
  41.  
  42. /**
  43.  * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
  44.  *
  45.  */
  46. struct svga_3d_update_gb_image {
  47.    SVGA3dCmdHeader header;
  48.    SVGA3dCmdUpdateGBImage body;
  49. };
  50.  
  51. struct svga_3d_invalidate_gb_image {
  52.    SVGA3dCmdHeader header;
  53.    SVGA3dCmdInvalidateGBImage body;
  54. };
  55.  
  56.  
  57. /**
  58.  * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
  59.  *
  60.  * It will flush and retry in case the first attempt to create a DMA buffer
  61.  * fails, so it should not be called from any function involved in flushing
  62.  * to avoid recursion.
  63.  */
  64. struct svga_winsys_buffer *
  65. svga_winsys_buffer_create( struct svga_context *svga,
  66.                            unsigned alignment,
  67.                            unsigned usage,
  68.                            unsigned size )
  69. {
  70.    struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
  71.    struct svga_winsys_screen *sws = svgascreen->sws;
  72.    struct svga_winsys_buffer *buf;
  73.  
  74.    /* Just try */
  75.    buf = sws->buffer_create(sws, alignment, usage, size);
  76.    if (!buf) {
  77.       SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
  78.                size);
  79.  
  80.       /* Try flushing all pending DMAs */
  81.       svga_context_flush(svga, NULL);
  82.       buf = sws->buffer_create(sws, alignment, usage, size);
  83.    }
  84.  
  85.    return buf;
  86. }
  87.  
  88.  
  89. /**
  90.  * Destroy HW storage if separate from the host surface.
  91.  * In the GB case, the HW storage is associated with the host surface
  92.  * and is therefore a No-op.
  93.  */
  94. void
  95. svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
  96. {
  97.    struct svga_winsys_screen *sws = ss->sws;
  98.  
  99.    assert(!sbuf->map.count);
  100.    assert(sbuf->hwbuf);
  101.    if (sbuf->hwbuf) {
  102.       sws->buffer_destroy(sws, sbuf->hwbuf);
  103.       sbuf->hwbuf = NULL;
  104.    }
  105. }
  106.  
  107.  
  108.  
  109. /**
  110.  * Allocate DMA'ble or Updatable storage for the buffer.
  111.  *
  112.  * Called before mapping a buffer.
  113.  */
  114. enum pipe_error
  115. svga_buffer_create_hw_storage(struct svga_screen *ss,
  116.                               struct svga_buffer *sbuf)
  117. {
  118.    assert(!sbuf->user);
  119.  
  120.    if (ss->sws->have_gb_objects) {
  121.       assert(sbuf->handle || !sbuf->dma.pending);
  122.       return svga_buffer_create_host_surface(ss, sbuf);
  123.    }
  124.    if (!sbuf->hwbuf) {
  125.       struct svga_winsys_screen *sws = ss->sws;
  126.       unsigned alignment = 16;
  127.       unsigned usage = 0;
  128.       unsigned size = sbuf->b.b.width0;
  129.  
  130.       sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
  131.       if (!sbuf->hwbuf)
  132.          return PIPE_ERROR_OUT_OF_MEMORY;
  133.  
  134.       assert(!sbuf->dma.pending);
  135.    }
  136.  
  137.    return PIPE_OK;
  138. }
  139.  
  140.  
  141.  
  142. enum pipe_error
  143. svga_buffer_create_host_surface(struct svga_screen *ss,
  144.                                 struct svga_buffer *sbuf)
  145. {
  146.    assert(!sbuf->user);
  147.  
  148.    if (!sbuf->handle) {
  149.       sbuf->key.flags = 0;
  150.  
  151.       sbuf->key.format = SVGA3D_BUFFER;
  152.       if (sbuf->b.b.bind & PIPE_BIND_VERTEX_BUFFER)
  153.          sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
  154.       if (sbuf->b.b.bind & PIPE_BIND_INDEX_BUFFER)
  155.          sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
  156.  
  157.       sbuf->key.size.width = sbuf->b.b.width0;
  158.       sbuf->key.size.height = 1;
  159.       sbuf->key.size.depth = 1;
  160.  
  161.       sbuf->key.numFaces = 1;
  162.       sbuf->key.numMipLevels = 1;
  163.       sbuf->key.cachable = 1;
  164.  
  165.       SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
  166.  
  167.       sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
  168.       if (!sbuf->handle)
  169.          return PIPE_ERROR_OUT_OF_MEMORY;
  170.  
  171.       /* Always set the discard flag on the first time the buffer is written
  172.        * as svga_screen_surface_create might have passed a recycled host
  173.        * buffer.
  174.        */
  175.       sbuf->dma.flags.discard = TRUE;
  176.  
  177.       SVGA_DBG(DEBUG_DMA, "   --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
  178.    }
  179.  
  180.    return PIPE_OK;
  181. }
  182.  
  183.  
  184. void
  185. svga_buffer_destroy_host_surface(struct svga_screen *ss,
  186.                                  struct svga_buffer *sbuf)
  187. {
  188.    if (sbuf->handle) {
  189.       SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
  190.       svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
  191.    }
  192. }
  193.  
  194.  
  195. /**
  196.  * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
  197.  * command buffer, equal to the current number of mapped ranges.
  198.  * The UPDATE_GB_IMAGE commands will be patched with the
  199.  * actual ranges just before flush.
  200.  */
  201. static enum pipe_error
  202. svga_buffer_upload_gb_command(struct svga_context *svga,
  203.                               struct svga_buffer *sbuf)
  204. {
  205.    struct svga_winsys_context *swc = svga->swc;
  206.    SVGA3dCmdUpdateGBImage *cmd;
  207.    struct svga_3d_update_gb_image *ccmd = NULL;
  208.    uint32 numBoxes = sbuf->map.num_ranges;
  209.    struct pipe_resource *dummy;
  210.    unsigned int i;
  211.  
  212.    assert(numBoxes);
  213.    assert(sbuf->dma.updates == NULL);
  214.  
  215.    if (sbuf->dma.flags.discard) {
  216.       struct svga_3d_invalidate_gb_image *cicmd = NULL;
  217.       SVGA3dCmdInvalidateGBImage *icmd;
  218.  
  219.       /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
  220.        * 'numBoxes' UPDATE_GB_IMAGE commands.  Allocate all at once rather
  221.        * than with separate commands because we need to properly deal with
  222.        * filling the command buffer.
  223.        */
  224.       icmd = SVGA3D_FIFOReserve(swc,
  225.                                 SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
  226.                                 sizeof *icmd + numBoxes * sizeof *ccmd,
  227.                                 2);
  228.       if (!icmd)
  229.          return PIPE_ERROR_OUT_OF_MEMORY;
  230.  
  231.       cicmd = container_of(icmd, cicmd, body);
  232.       cicmd->header.size = sizeof *icmd;
  233.       swc->surface_relocation(swc, &icmd->image.sid, NULL, sbuf->handle,
  234.                               (SVGA_RELOC_WRITE |
  235.                                SVGA_RELOC_INTERNAL |
  236.                                SVGA_RELOC_DMA));
  237.       icmd->image.face = 0;
  238.       icmd->image.mipmap = 0;
  239.  
  240.       /* initialize the first UPDATE_GB_IMAGE command */
  241.       ccmd = (struct svga_3d_update_gb_image *) &icmd[1];
  242.       ccmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
  243.       cmd = &ccmd->body;
  244.  
  245.    } else {
  246.       /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
  247.       cmd = SVGA3D_FIFOReserve(swc,
  248.                                SVGA_3D_CMD_UPDATE_GB_IMAGE,
  249.                                sizeof *cmd + (numBoxes - 1) * sizeof *ccmd,
  250.                                1);
  251.       if (!cmd)
  252.          return PIPE_ERROR_OUT_OF_MEMORY;
  253.  
  254.       ccmd = container_of(cmd, ccmd, body);
  255.    }
  256.  
  257.    /* Init the first UPDATE_GB_IMAGE command */
  258.    ccmd->header.size = sizeof *cmd;
  259.    swc->surface_relocation(swc, &cmd->image.sid, NULL, sbuf->handle,
  260.                            SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
  261.    cmd->image.face = 0;
  262.    cmd->image.mipmap = 0;
  263.  
  264.    /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
  265.     * fill in the box info below.
  266.     */
  267.    sbuf->dma.updates = ccmd;
  268.  
  269.    /*
  270.     * Copy the relocation info, face and mipmap to all
  271.     * subsequent commands. NOTE: For winsyses that actually
  272.     * patch the image.sid member at flush time, this will fail
  273.     * miserably. For those we need to add as many relocations
  274.     * as there are copy boxes.
  275.     */
  276.  
  277.    for (i = 1; i < numBoxes; ++i) {
  278.       memcpy(++ccmd, sbuf->dma.updates, sizeof *ccmd);
  279.    }
  280.  
  281.    /* Increment reference count */
  282.    sbuf->dma.svga = svga;
  283.    dummy = NULL;
  284.    pipe_resource_reference(&dummy, &sbuf->b.b);
  285.    SVGA_FIFOCommitAll(swc);
  286.  
  287.    sbuf->dma.flags.discard = FALSE;
  288.  
  289.    return PIPE_OK;
  290. }
  291.  
  292.  
  293. /**
  294.  * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
  295.  */
  296. static enum pipe_error
  297. svga_buffer_upload_command(struct svga_context *svga,
  298.                            struct svga_buffer *sbuf)
  299. {
  300.    struct svga_winsys_context *swc = svga->swc;
  301.    struct svga_winsys_buffer *guest = sbuf->hwbuf;
  302.    struct svga_winsys_surface *host = sbuf->handle;
  303.    SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
  304.    SVGA3dCmdSurfaceDMA *cmd;
  305.    uint32 numBoxes = sbuf->map.num_ranges;
  306.    SVGA3dCopyBox *boxes;
  307.    SVGA3dCmdSurfaceDMASuffix *pSuffix;
  308.    unsigned region_flags;
  309.    unsigned surface_flags;
  310.    struct pipe_resource *dummy;
  311.  
  312.    if (svga_have_gb_objects(svga))
  313.       return svga_buffer_upload_gb_command(svga, sbuf);
  314.  
  315.    if (transfer == SVGA3D_WRITE_HOST_VRAM) {
  316.       region_flags = SVGA_RELOC_READ;
  317.       surface_flags = SVGA_RELOC_WRITE;
  318.    }
  319.    else if (transfer == SVGA3D_READ_HOST_VRAM) {
  320.       region_flags = SVGA_RELOC_WRITE;
  321.       surface_flags = SVGA_RELOC_READ;
  322.    }
  323.    else {
  324.       assert(0);
  325.       return PIPE_ERROR_BAD_INPUT;
  326.    }
  327.  
  328.    assert(numBoxes);
  329.  
  330.    cmd = SVGA3D_FIFOReserve(swc,
  331.                             SVGA_3D_CMD_SURFACE_DMA,
  332.                             sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
  333.                             2);
  334.    if (!cmd)
  335.       return PIPE_ERROR_OUT_OF_MEMORY;
  336.  
  337.    swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
  338.    cmd->guest.pitch = 0;
  339.  
  340.    swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
  341.    cmd->host.face = 0;
  342.    cmd->host.mipmap = 0;
  343.  
  344.    cmd->transfer = transfer;
  345.  
  346.    sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
  347.    sbuf->dma.svga = svga;
  348.  
  349.    /* Increment reference count */
  350.    dummy = NULL;
  351.    pipe_resource_reference(&dummy, &sbuf->b.b);
  352.  
  353.    pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
  354.    pSuffix->suffixSize = sizeof *pSuffix;
  355.    pSuffix->maximumOffset = sbuf->b.b.width0;
  356.    pSuffix->flags = sbuf->dma.flags;
  357.  
  358.    SVGA_FIFOCommitAll(swc);
  359.  
  360.    sbuf->dma.flags.discard = FALSE;
  361.  
  362.    return PIPE_OK;
  363. }
  364.  
  365.  
  366. /**
  367.  * Patch up the upload DMA command reserved by svga_buffer_upload_command
  368.  * with the final ranges.
  369.  */
  370. void
  371. svga_buffer_upload_flush(struct svga_context *svga,
  372.                          struct svga_buffer *sbuf)
  373. {
  374.    unsigned i;
  375.    struct pipe_resource *dummy;
  376.  
  377.    if (!sbuf->dma.pending) {
  378.       //debug_printf("no dma pending on buffer\n");
  379.       return;
  380.    }
  381.  
  382.    assert(sbuf->handle);
  383.    assert(sbuf->map.num_ranges);
  384.    assert(sbuf->dma.svga == svga);
  385.  
  386.    /*
  387.     * Patch the DMA/update command with the final copy box.
  388.     */
  389.    if (svga_have_gb_objects(svga)) {
  390.       struct svga_3d_update_gb_image *update = sbuf->dma.updates;
  391.       assert(update);
  392.  
  393.       for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
  394.          SVGA3dBox *box = &update->body.box;
  395.  
  396.          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
  397.                   sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
  398.  
  399.          box->x = sbuf->map.ranges[i].start;
  400.          box->y = 0;
  401.          box->z = 0;
  402.          box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
  403.          box->h = 1;
  404.          box->d = 1;
  405.  
  406.          assert(box->x <= sbuf->b.b.width0);
  407.          assert(box->x + box->w <= sbuf->b.b.width0);
  408.       }
  409.    }
  410.    else {
  411.       assert(sbuf->hwbuf);
  412.       assert(sbuf->dma.boxes);
  413.       SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
  414.  
  415.       for (i = 0; i < sbuf->map.num_ranges; ++i) {
  416.          SVGA3dCopyBox *box = sbuf->dma.boxes + i;
  417.  
  418.          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
  419.                sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
  420.  
  421.          box->x = sbuf->map.ranges[i].start;
  422.          box->y = 0;
  423.          box->z = 0;
  424.          box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
  425.          box->h = 1;
  426.          box->d = 1;
  427.          box->srcx = sbuf->map.ranges[i].start;
  428.          box->srcy = 0;
  429.          box->srcz = 0;
  430.  
  431.          assert(box->x <= sbuf->b.b.width0);
  432.          assert(box->x + box->w <= sbuf->b.b.width0);
  433.       }
  434.    }
  435.  
  436.    /* Reset sbuf for next use/upload */
  437.  
  438.    sbuf->map.num_ranges = 0;
  439.  
  440.    assert(sbuf->head.prev && sbuf->head.next);
  441.    LIST_DEL(&sbuf->head);  /* remove from svga->dirty_buffers list */
  442. #ifdef DEBUG
  443.    sbuf->head.next = sbuf->head.prev = NULL;
  444. #endif
  445.    sbuf->dma.pending = FALSE;
  446.    sbuf->dma.flags.discard = FALSE;
  447.    sbuf->dma.flags.unsynchronized = FALSE;
  448.  
  449.    sbuf->dma.svga = NULL;
  450.    sbuf->dma.boxes = NULL;
  451.    sbuf->dma.updates = NULL;
  452.  
  453.    /* Decrement reference count (and potentially destroy) */
  454.    dummy = &sbuf->b.b;
  455.    pipe_resource_reference(&dummy, NULL);
  456. }
  457.  
  458.  
  459. /**
  460.  * Note a dirty range.
  461.  *
  462.  * This function only notes the range down. It doesn't actually emit a DMA
  463.  * upload command. That only happens when a context tries to refer to this
  464.  * buffer, and the DMA upload command is added to that context's command
  465.  * buffer.
  466.  *
  467.  * We try to lump as many contiguous DMA transfers together as possible.
  468.  */
  469. void
  470. svga_buffer_add_range(struct svga_buffer *sbuf,
  471.                       unsigned start,
  472.                       unsigned end)
  473. {
  474.    unsigned i;
  475.    unsigned nearest_range;
  476.    unsigned nearest_dist;
  477.  
  478.    assert(end > start);
  479.  
  480.    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
  481.       nearest_range = sbuf->map.num_ranges;
  482.       nearest_dist = ~0;
  483.    } else {
  484.       nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
  485.       nearest_dist = 0;
  486.    }
  487.  
  488.    /*
  489.     * Try to grow one of the ranges.
  490.     */
  491.  
  492.    for (i = 0; i < sbuf->map.num_ranges; ++i) {
  493.       int left_dist;
  494.       int right_dist;
  495.       int dist;
  496.  
  497.       left_dist = start - sbuf->map.ranges[i].end;
  498.       right_dist = sbuf->map.ranges[i].start - end;
  499.       dist = MAX2(left_dist, right_dist);
  500.  
  501.       if (dist <= 0) {
  502.          /*
  503.           * Ranges are contiguous or overlapping -- extend this one and return.
  504.           *
  505.           * Note that it is not this function's task to prevent overlapping
  506.           * ranges, as the GMR was already given so it is too late to do
  507.           * anything.  If the ranges overlap here it must surely be because
  508.           * PIPE_TRANSFER_UNSYNCHRONIZED was set.
  509.           */
  510.  
  511.          sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
  512.          sbuf->map.ranges[i].end   = MAX2(sbuf->map.ranges[i].end,   end);
  513.          return;
  514.       }
  515.       else {
  516.          /*
  517.           * Discontiguous ranges -- keep track of the nearest range.
  518.           */
  519.  
  520.          if (dist < nearest_dist) {
  521.             nearest_range = i;
  522.             nearest_dist = dist;
  523.          }
  524.       }
  525.    }
  526.  
  527.    /*
  528.     * We cannot add a new range to an existing DMA command, so patch-up the
  529.     * pending DMA upload and start clean.
  530.     */
  531.  
  532.    svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
  533.  
  534.    assert(!sbuf->dma.pending);
  535.    assert(!sbuf->dma.svga);
  536.    assert(!sbuf->dma.boxes);
  537.  
  538.    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
  539.       /*
  540.        * Add a new range.
  541.        */
  542.  
  543.       sbuf->map.ranges[sbuf->map.num_ranges].start = start;
  544.       sbuf->map.ranges[sbuf->map.num_ranges].end = end;
  545.       ++sbuf->map.num_ranges;
  546.    } else {
  547.       /*
  548.        * Everything else failed, so just extend the nearest range.
  549.        *
  550.        * It is OK to do this because we always keep a local copy of the
  551.        * host buffer data, for SW TNL, and the host never modifies the buffer.
  552.        */
  553.  
  554.       assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
  555.       assert(nearest_range < sbuf->map.num_ranges);
  556.       sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
  557.       sbuf->map.ranges[nearest_range].end   = MAX2(sbuf->map.ranges[nearest_range].end,   end);
  558.    }
  559. }
  560.  
  561.  
  562.  
  563. /**
  564.  * Copy the contents of the malloc buffer to a hardware buffer.
  565.  */
  566. static enum pipe_error
  567. svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
  568. {
  569.    assert(!sbuf->user);
  570.    if (!svga_buffer_has_hw_storage(sbuf)) {
  571.       struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
  572.       enum pipe_error ret;
  573.       boolean retry;
  574.       void *map;
  575.  
  576.       assert(sbuf->swbuf);
  577.       if (!sbuf->swbuf)
  578.          return PIPE_ERROR;
  579.  
  580.       ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen),
  581.                                           sbuf);
  582.       if (ret != PIPE_OK)
  583.          return ret;
  584.  
  585.       pipe_mutex_lock(ss->swc_mutex);
  586.       map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
  587.       assert(map);
  588.       assert(!retry);
  589.       if (!map) {
  590.          pipe_mutex_unlock(ss->swc_mutex);
  591.          svga_buffer_destroy_hw_storage(ss, sbuf);
  592.          return PIPE_ERROR;
  593.       }
  594.  
  595.       memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
  596.       svga_buffer_hw_storage_unmap(svga, sbuf);
  597.  
  598.       /* This user/malloc buffer is now indistinguishable from a gpu buffer */
  599.       assert(!sbuf->map.count);
  600.       if (!sbuf->map.count) {
  601.          if (sbuf->user)
  602.             sbuf->user = FALSE;
  603.          else
  604.             align_free(sbuf->swbuf);
  605.          sbuf->swbuf = NULL;
  606.       }
  607.  
  608.       pipe_mutex_unlock(ss->swc_mutex);
  609.    }
  610.  
  611.    return PIPE_OK;
  612. }
  613.  
  614.  
  615. /**
  616.  * Upload the buffer to the host in a piecewise fashion.
  617.  *
  618.  * Used when the buffer is too big to fit in the GMR aperture.
  619.  * This function should never get called in the guest-backed case
  620.  * since we always have a full-sized hardware storage backing the
  621.  * host surface.
  622.  */
  623. static enum pipe_error
  624. svga_buffer_upload_piecewise(struct svga_screen *ss,
  625.                              struct svga_context *svga,
  626.                              struct svga_buffer *sbuf)
  627. {
  628.    struct svga_winsys_screen *sws = ss->sws;
  629.    const unsigned alignment = sizeof(void *);
  630.    const unsigned usage = 0;
  631.    unsigned i;
  632.  
  633.    assert(sbuf->map.num_ranges);
  634.    assert(!sbuf->dma.pending);
  635.    assert(!svga_have_gb_objects(svga));
  636.  
  637.    SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
  638.  
  639.    for (i = 0; i < sbuf->map.num_ranges; ++i) {
  640.       struct svga_buffer_range *range = &sbuf->map.ranges[i];
  641.       unsigned offset = range->start;
  642.       unsigned size = range->end - range->start;
  643.  
  644.       while (offset < range->end) {
  645.          struct svga_winsys_buffer *hwbuf;
  646.          uint8_t *map;
  647.          enum pipe_error ret;
  648.  
  649.          if (offset + size > range->end)
  650.             size = range->end - offset;
  651.  
  652.          hwbuf = sws->buffer_create(sws, alignment, usage, size);
  653.          while (!hwbuf) {
  654.             size /= 2;
  655.             if (!size)
  656.                return PIPE_ERROR_OUT_OF_MEMORY;
  657.             hwbuf = sws->buffer_create(sws, alignment, usage, size);
  658.          }
  659.  
  660.          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
  661.                   offset, offset + size);
  662.  
  663.          map = sws->buffer_map(sws, hwbuf,
  664.                                PIPE_TRANSFER_WRITE |
  665.                                PIPE_TRANSFER_DISCARD_RANGE);
  666.          assert(map);
  667.          if (map) {
  668.             memcpy(map, (const char *) sbuf->swbuf + offset, size);
  669.             sws->buffer_unmap(sws, hwbuf);
  670.          }
  671.  
  672.          ret = SVGA3D_BufferDMA(svga->swc,
  673.                                 hwbuf, sbuf->handle,
  674.                                 SVGA3D_WRITE_HOST_VRAM,
  675.                                 size, 0, offset, sbuf->dma.flags);
  676.          if (ret != PIPE_OK) {
  677.             svga_context_flush(svga, NULL);
  678.             ret =  SVGA3D_BufferDMA(svga->swc,
  679.                                     hwbuf, sbuf->handle,
  680.                                     SVGA3D_WRITE_HOST_VRAM,
  681.                                     size, 0, offset, sbuf->dma.flags);
  682.             assert(ret == PIPE_OK);
  683.          }
  684.  
  685.          sbuf->dma.flags.discard = FALSE;
  686.  
  687.          sws->buffer_destroy(sws, hwbuf);
  688.  
  689.          offset += size;
  690.       }
  691.    }
  692.  
  693.    sbuf->map.num_ranges = 0;
  694.  
  695.    return PIPE_OK;
  696. }
  697.  
  698.  
  699. /**
  700.  * Get (or create/upload) the winsys surface handle so that we can
  701.  * refer to this buffer in fifo commands.
  702.  * This function will create the host surface, and in the GB case also the
  703.  * hardware storage. In the non-GB case, the hardware storage will be created
  704.  * if there are mapped ranges and the data is currently in a malloc'ed buffer.
  705.  */
  706. struct svga_winsys_surface *
  707. svga_buffer_handle(struct svga_context *svga,
  708.                    struct pipe_resource *buf)
  709. {
  710.    struct pipe_screen *screen = svga->pipe.screen;
  711.    struct svga_screen *ss = svga_screen(screen);
  712.    struct svga_buffer *sbuf;
  713.    enum pipe_error ret;
  714.  
  715.    if (!buf)
  716.       return NULL;
  717.  
  718.    sbuf = svga_buffer(buf);
  719.  
  720.    assert(!sbuf->user);
  721.  
  722.    if (!sbuf->handle) {
  723.       /* This call will set sbuf->handle */
  724.       if (svga_have_gb_objects(svga)) {
  725.          ret = svga_buffer_update_hw(svga, sbuf);
  726.       } else {
  727.          ret = svga_buffer_create_host_surface(ss, sbuf);
  728.       }
  729.       if (ret != PIPE_OK)
  730.          return NULL;
  731.    }
  732.  
  733.    assert(sbuf->handle);
  734.  
  735.    if (sbuf->map.num_ranges) {
  736.       if (!sbuf->dma.pending) {
  737.          /*
  738.           * No pending DMA upload yet, so insert a DMA upload command now.
  739.           */
  740.  
  741.          /*
  742.           * Migrate the data from swbuf -> hwbuf if necessary.
  743.           */
  744.          ret = svga_buffer_update_hw(svga, sbuf);
  745.          if (ret == PIPE_OK) {
  746.             /*
  747.              * Queue a dma command.
  748.              */
  749.  
  750.             ret = svga_buffer_upload_command(svga, sbuf);
  751.             if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
  752.                svga_context_flush(svga, NULL);
  753.                ret = svga_buffer_upload_command(svga, sbuf);
  754.                assert(ret == PIPE_OK);
  755.             }
  756.             if (ret == PIPE_OK) {
  757.                sbuf->dma.pending = TRUE;
  758.                assert(!sbuf->head.prev && !sbuf->head.next);
  759.                LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
  760.             }
  761.          }
  762.          else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
  763.             /*
  764.              * The buffer is too big to fit in the GMR aperture, so break it in
  765.              * smaller pieces.
  766.              */
  767.             ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
  768.          }
  769.  
  770.          if (ret != PIPE_OK) {
  771.             /*
  772.              * Something unexpected happened above. There is very little that
  773.              * we can do other than proceeding while ignoring the dirty ranges.
  774.              */
  775.             assert(0);
  776.             sbuf->map.num_ranges = 0;
  777.          }
  778.       }
  779.       else {
  780.          /*
  781.           * There a pending dma already. Make sure it is from this context.
  782.           */
  783.          assert(sbuf->dma.svga == svga);
  784.       }
  785.    }
  786.  
  787.    assert(!sbuf->map.num_ranges || sbuf->dma.pending);
  788.  
  789.    return sbuf->handle;
  790. }
  791.  
  792.  
  793.  
  794. void
  795. svga_context_flush_buffers(struct svga_context *svga)
  796. {
  797.    struct list_head *curr, *next;
  798.    struct svga_buffer *sbuf;
  799.  
  800.    curr = svga->dirty_buffers.next;
  801.    next = curr->next;
  802.    while(curr != &svga->dirty_buffers) {
  803.       sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
  804.  
  805.       assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
  806.       assert(sbuf->dma.pending);
  807.  
  808.       svga_buffer_upload_flush(svga, sbuf);
  809.  
  810.       curr = next;
  811.       next = curr->next;
  812.    }
  813. }
  814.