Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24. #include <sys/errno.h>
  25.  
  26. #include "main/condrender.h"
  27. #include "main/glheader.h"
  28. #include "main/mtypes.h"
  29. #include "main/state.h"
  30. #include "brw_context.h"
  31. #include "brw_draw.h"
  32. #include "brw_state.h"
  33. #include "intel_batchbuffer.h"
  34. #include "brw_defines.h"
  35.  
  36.  
  37. static void
  38. brw_emit_gpgpu_walker(struct brw_context *brw, const GLuint *num_groups)
  39. {
  40.    const struct brw_cs_prog_data *prog_data = brw->cs.prog_data;
  41.  
  42.    const unsigned simd_size = prog_data->simd_size;
  43.    unsigned group_size = prog_data->local_size[0] *
  44.       prog_data->local_size[1] * prog_data->local_size[2];
  45.    unsigned thread_width_max =
  46.       (group_size + simd_size - 1) / simd_size;
  47.  
  48.    uint32_t right_mask = (1u << simd_size) - 1;
  49.    const unsigned right_non_aligned = group_size & (simd_size - 1);
  50.    if (right_non_aligned != 0)
  51.       right_mask >>= (simd_size - right_non_aligned);
  52.  
  53.    uint32_t dwords = brw->gen < 8 ? 11 : 15;
  54.    BEGIN_BATCH(dwords);
  55.    OUT_BATCH(GPGPU_WALKER << 16 | (dwords - 2));
  56.    OUT_BATCH(0);
  57.    if (brw->gen >= 8) {
  58.       OUT_BATCH(0);                     /* Indirect Data Length */
  59.       OUT_BATCH(0);                     /* Indirect Data Start Address */
  60.    }
  61.    assert(thread_width_max <= brw->max_cs_threads);
  62.    OUT_BATCH(SET_FIELD(simd_size / 16, GPGPU_WALKER_SIMD_SIZE) |
  63.              SET_FIELD(thread_width_max - 1, GPGPU_WALKER_THREAD_WIDTH_MAX));
  64.    OUT_BATCH(0);                        /* Thread Group ID Starting X */
  65.    if (brw->gen >= 8)
  66.       OUT_BATCH(0);                     /* MBZ */
  67.    OUT_BATCH(num_groups[0]);            /* Thread Group ID X Dimension */
  68.    OUT_BATCH(0);                        /* Thread Group ID Starting Y */
  69.    if (brw->gen >= 8)
  70.       OUT_BATCH(0);                     /* MBZ */
  71.    OUT_BATCH(num_groups[1]);            /* Thread Group ID Y Dimension */
  72.    OUT_BATCH(0);                        /* Thread Group ID Starting/Resume Z */
  73.    OUT_BATCH(num_groups[2]);            /* Thread Group ID Z Dimension */
  74.    OUT_BATCH(right_mask);               /* Right Execution Mask */
  75.    OUT_BATCH(0xffffffff);               /* Bottom Execution Mask */
  76.    ADVANCE_BATCH();
  77.  
  78.    BEGIN_BATCH(2);
  79.    OUT_BATCH(MEDIA_STATE_FLUSH << 16 | (2 - 2));
  80.    OUT_BATCH(0);
  81.    ADVANCE_BATCH();
  82. }
  83.  
  84.  
  85. static void
  86. brw_dispatch_compute(struct gl_context *ctx, const GLuint *num_groups)
  87. {
  88.    struct brw_context *brw = brw_context(ctx);
  89.    int estimated_buffer_space_needed;
  90.    bool fail_next = false;
  91.  
  92.    if (!_mesa_check_conditional_render(ctx))
  93.       return;
  94.  
  95.    if (ctx->NewState)
  96.       _mesa_update_state(ctx);
  97.  
  98.    brw_validate_textures(brw);
  99.  
  100.    const int sampler_state_size = 16; /* 16 bytes */
  101.    estimated_buffer_space_needed = 512; /* batchbuffer commands */
  102.    estimated_buffer_space_needed += (BRW_MAX_TEX_UNIT *
  103.                                      (sampler_state_size +
  104.                                       sizeof(struct gen5_sampler_default_color)));
  105.    estimated_buffer_space_needed += 1024; /* push constants */
  106.    estimated_buffer_space_needed += 512; /* misc. pad */
  107.  
  108.    /* Flush the batch if it's approaching full, so that we don't wrap while
  109.     * we've got validated state that needs to be in the same batch as the
  110.     * primitives.
  111.     */
  112.    intel_batchbuffer_require_space(brw, estimated_buffer_space_needed,
  113.                                    RENDER_RING);
  114.    intel_batchbuffer_save_state(brw);
  115.  
  116.  retry:
  117.    brw->no_batch_wrap = true;
  118.    brw_upload_compute_state(brw);
  119.  
  120.    brw_emit_gpgpu_walker(brw, num_groups);
  121.  
  122.    brw->no_batch_wrap = false;
  123.  
  124.    if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
  125.       if (!fail_next) {
  126.          intel_batchbuffer_reset_to_saved(brw);
  127.          intel_batchbuffer_flush(brw);
  128.          fail_next = true;
  129.          goto retry;
  130.       } else {
  131.          if (intel_batchbuffer_flush(brw) == -ENOSPC) {
  132.             static bool warned = false;
  133.  
  134.             if (!warned) {
  135.                fprintf(stderr, "i965: Single compute shader dispatch "
  136.                        "exceeded available aperture space\n");
  137.                warned = true;
  138.             }
  139.          }
  140.       }
  141.    }
  142.  
  143.    /* Now that we know we haven't run out of aperture space, we can safely
  144.     * reset the dirty bits.
  145.     */
  146.    brw_compute_state_finished(brw);
  147.  
  148.    if (brw->always_flush_batch)
  149.       intel_batchbuffer_flush(brw);
  150.  
  151.    brw_state_cache_check_size(brw);
  152.  
  153.    /* Note: since compute shaders can't write to framebuffers, there's no need
  154.     * to call brw_postdraw_set_buffers_need_resolve().
  155.     */
  156. }
  157.  
  158.  
  159. void
  160. brw_init_compute_functions(struct dd_function_table *functions)
  161. {
  162.    functions->DispatchCompute = brw_dispatch_compute;
  163. }
  164.