Subversion Repositories Kolibri OS

Rev

Rev 4358 | Blame | Last modification | View Log | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  19.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  21.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  22.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  23.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  24.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #include "intel_context.h"
  29. #include "intel_batchbuffer.h"
  30. #include "intel_buffer_objects.h"
  31. #include "intel_reg.h"
  32. #include "intel_bufmgr.h"
  33. #include "intel_buffers.h"
  34.  
  35. static void
  36. intel_batchbuffer_reset(struct intel_context *intel);
  37.  
  38. void
  39. intel_batchbuffer_init(struct intel_context *intel)
  40. {
  41.    intel_batchbuffer_reset(intel);
  42.  
  43.    intel->batch.cpu_map = malloc(intel->maxBatchSize);
  44.    intel->batch.map = intel->batch.cpu_map;
  45. }
  46.  
  47. static void
  48. intel_batchbuffer_reset(struct intel_context *intel)
  49. {
  50.    if (intel->batch.last_bo != NULL) {
  51.       drm_intel_bo_unreference(intel->batch.last_bo);
  52.       intel->batch.last_bo = NULL;
  53.    }
  54.    intel->batch.last_bo = intel->batch.bo;
  55.  
  56.    intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
  57.                                         intel->maxBatchSize, 4096);
  58.  
  59.    intel->batch.reserved_space = BATCH_RESERVED;
  60.    intel->batch.used = 0;
  61. }
  62.  
  63. void
  64. intel_batchbuffer_free(struct intel_context *intel)
  65. {
  66.    free(intel->batch.cpu_map);
  67.    drm_intel_bo_unreference(intel->batch.last_bo);
  68.    drm_intel_bo_unreference(intel->batch.bo);
  69. }
  70.  
  71. #if 0
  72. static void
  73. do_batch_dump(struct intel_context *intel)
  74. {
  75.    struct drm_intel_decode *decode;
  76.    struct intel_batchbuffer *batch = &intel->batch;
  77.    int ret;
  78.  
  79.    decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID);
  80.    if (!decode)
  81.       return;
  82.  
  83.    ret = drm_intel_bo_map(batch->bo, false);
  84.    if (ret == 0) {
  85.       drm_intel_decode_set_batch_pointer(decode,
  86.                                          batch->bo->virtual,
  87.                                          batch->bo->offset,
  88.                                          batch->used);
  89.    } else {
  90.       fprintf(stderr,
  91.               "WARNING: failed to map batchbuffer (%s), "
  92.               "dumping uploaded data instead.\n", strerror(ret));
  93.  
  94.       drm_intel_decode_set_batch_pointer(decode,
  95.                                          batch->map,
  96.                                          batch->bo->offset,
  97.                                          batch->used);
  98.    }
  99.  
  100.    drm_intel_decode(decode);
  101.  
  102.    drm_intel_decode_context_free(decode);
  103.  
  104.    if (ret == 0) {
  105.       drm_intel_bo_unmap(batch->bo);
  106.  
  107.       if (intel->vtbl.debug_batch != NULL)
  108.          intel->vtbl.debug_batch(intel);
  109.    }
  110. }
  111. #endif
  112.  
  113. /* TODO: Push this whole function into bufmgr.
  114.  */
  115. static int
  116. do_flush_locked(struct intel_context *intel)
  117. {
  118.    struct intel_batchbuffer *batch = &intel->batch;
  119.    int ret = 0;
  120.  
  121.    ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map);
  122.  
  123.    if (!intel->intelScreen->no_hw) {
  124.       if (ret == 0) {
  125.          if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub)
  126.             intel->vtbl.annotate_aub(intel);
  127.          ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0,
  128.                                      I915_EXEC_RENDER);
  129.       }
  130.    }
  131.  
  132. //   if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
  133. //      do_batch_dump(intel);
  134.  
  135.    if (ret != 0) {
  136.       fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
  137.       exit(1);
  138.    }
  139.    intel->vtbl.new_batch(intel);
  140.  
  141.    return ret;
  142. }
  143.  
  144. int
  145. _intel_batchbuffer_flush(struct intel_context *intel,
  146.                          const char *file, int line)
  147. {
  148.    int ret;
  149.  
  150.    if (intel->batch.used == 0)
  151.       return 0;
  152.  
  153.    if (intel->first_post_swapbuffers_batch == NULL) {
  154.       intel->first_post_swapbuffers_batch = intel->batch.bo;
  155.       drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
  156.    }
  157.  
  158.    if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
  159.       fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
  160.               4*intel->batch.used);
  161.  
  162.    intel->batch.reserved_space = 0;
  163.  
  164.    if (intel->vtbl.finish_batch)
  165.       intel->vtbl.finish_batch(intel);
  166.  
  167.    /* Mark the end of the buffer. */
  168.    intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END);
  169.    if (intel->batch.used & 1) {
  170.       /* Round batchbuffer usage to 2 DWORDs. */
  171.       intel_batchbuffer_emit_dword(intel, MI_NOOP);
  172.    }
  173.  
  174.    intel_upload_finish(intel);
  175.  
  176.    /* Check that we didn't just wrap our batchbuffer at a bad time. */
  177.    assert(!intel->no_batch_wrap);
  178.  
  179.    ret = do_flush_locked(intel);
  180.  
  181.    if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
  182.       fprintf(stderr, "waiting for idle\n");
  183.       drm_intel_bo_wait_rendering(intel->batch.bo);
  184.    }
  185.  
  186.    /* Reset the buffer:
  187.     */
  188.    intel_batchbuffer_reset(intel);
  189.  
  190.    return ret;
  191. }
  192.  
  193.  
  194. /*  This is the only way buffers get added to the validate list.
  195.  */
  196. bool
  197. intel_batchbuffer_emit_reloc(struct intel_context *intel,
  198.                              drm_intel_bo *buffer,
  199.                              uint32_t read_domains, uint32_t write_domain,
  200.                              uint32_t delta)
  201. {
  202.    int ret;
  203.  
  204.    ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used,
  205.                                  buffer, delta,
  206.                                  read_domains, write_domain);
  207.    assert(ret == 0);
  208.    (void)ret;
  209.  
  210.    /*
  211.     * Using the old buffer offset, write in what the right data would be, in case
  212.     * the buffer doesn't move and we can short-circuit the relocation processing
  213.     * in the kernel
  214.     */
  215.    intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
  216.  
  217.    return true;
  218. }
  219.  
  220. bool
  221. intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
  222.                                     drm_intel_bo *buffer,
  223.                                     uint32_t read_domains,
  224.                                     uint32_t write_domain,
  225.                                     uint32_t delta)
  226. {
  227.    int ret;
  228.  
  229.    ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used,
  230.                                        buffer, delta,
  231.                                        read_domains, write_domain);
  232.    assert(ret == 0);
  233.    (void)ret;
  234.  
  235.    /*
  236.     * Using the old buffer offset, write in what the right data would
  237.     * be, in case the buffer doesn't move and we can short-circuit the
  238.     * relocation processing in the kernel
  239.     */
  240.    intel_batchbuffer_emit_dword(intel, buffer->offset + delta);
  241.  
  242.    return true;
  243. }
  244.  
  245. void
  246. intel_batchbuffer_data(struct intel_context *intel,
  247.                        const void *data, GLuint bytes)
  248. {
  249.    assert((bytes & 3) == 0);
  250.    intel_batchbuffer_require_space(intel, bytes);
  251.    __memcpy(intel->batch.map + intel->batch.used, data, bytes);
  252.    intel->batch.used += bytes >> 2;
  253. }
  254.  
  255. /* Emit a pipelined flush to either flush render and texture cache for
  256.  * reading from a FBO-drawn texture, or flush so that frontbuffer
  257.  * render appears on the screen in DRI1.
  258.  *
  259.  * This is also used for the always_flush_cache driconf debug option.
  260.  */
  261. void
  262. intel_batchbuffer_emit_mi_flush(struct intel_context *intel)
  263. {
  264.    BEGIN_BATCH(1);
  265.    OUT_BATCH(MI_FLUSH);
  266.    ADVANCE_BATCH();
  267. }
  268.