Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright (c) 2014 - 2015 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24.  
  25. #include "util/ralloc.h"
  26. #include "brw_context.h"
  27. #include "brw_cs.h"
  28. #include "brw_fs.h"
  29. #include "brw_eu.h"
  30. #include "brw_wm.h"
  31. #include "intel_mipmap_tree.h"
  32. #include "brw_state.h"
  33. #include "intel_batchbuffer.h"
  34.  
  35. extern "C"
  36. bool
  37. brw_cs_prog_data_compare(const void *in_a, const void *in_b)
  38. {
  39.    const struct brw_cs_prog_data *a =
  40.       (const struct brw_cs_prog_data *)in_a;
  41.    const struct brw_cs_prog_data *b =
  42.       (const struct brw_cs_prog_data *)in_b;
  43.  
  44.    /* Compare the base structure. */
  45.    if (!brw_stage_prog_data_compare(&a->base, &b->base))
  46.       return false;
  47.  
  48.    /* Compare the rest of the structure. */
  49.    const unsigned offset = sizeof(struct brw_stage_prog_data);
  50.    if (memcmp(((char *) a) + offset, ((char *) b) + offset,
  51.               sizeof(struct brw_cs_prog_data) - offset))
  52.       return false;
  53.  
  54.    return true;
  55. }
  56.  
  57.  
  58. static const unsigned *
  59. brw_cs_emit(struct brw_context *brw,
  60.             void *mem_ctx,
  61.             const struct brw_cs_prog_key *key,
  62.             struct brw_cs_prog_data *prog_data,
  63.             struct gl_compute_program *cp,
  64.             struct gl_shader_program *prog,
  65.             unsigned *final_assembly_size)
  66. {
  67.    bool start_busy = false;
  68.    double start_time = 0;
  69.  
  70.    if (unlikely(brw->perf_debug)) {
  71.       start_busy = (brw->batch.last_bo &&
  72.                     drm_intel_bo_busy(brw->batch.last_bo));
  73.       start_time = get_time();
  74.    }
  75.  
  76.    struct brw_shader *shader =
  77.       (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_COMPUTE];
  78.  
  79.    if (unlikely(INTEL_DEBUG & DEBUG_CS))
  80.       brw_dump_ir("compute", prog, &shader->base, &cp->Base);
  81.  
  82.    prog_data->local_size[0] = cp->LocalSize[0];
  83.    prog_data->local_size[1] = cp->LocalSize[1];
  84.    prog_data->local_size[2] = cp->LocalSize[2];
  85.    int local_workgroup_size =
  86.       cp->LocalSize[0] * cp->LocalSize[1] * cp->LocalSize[2];
  87.  
  88.    cfg_t *cfg = NULL;
  89.    const char *fail_msg = NULL;
  90.  
  91.    /* Now the main event: Visit the shader IR and generate our CS IR for it.
  92.     */
  93.    fs_visitor v8(brw, mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
  94.                  &cp->Base, 8);
  95.    if (!v8.run_cs()) {
  96.       fail_msg = v8.fail_msg;
  97.    } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
  98.       cfg = v8.cfg;
  99.       prog_data->simd_size = 8;
  100.    }
  101.  
  102.    fs_visitor v16(brw, mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
  103.                   &cp->Base, 16);
  104.    if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
  105.        !fail_msg && !v8.simd16_unsupported &&
  106.        local_workgroup_size <= 16 * brw->max_cs_threads) {
  107.       /* Try a SIMD16 compile */
  108.       v16.import_uniforms(&v8);
  109.       if (!v16.run_cs()) {
  110.          perf_debug("SIMD16 shader failed to compile: %s", v16.fail_msg);
  111.          if (!cfg) {
  112.             fail_msg =
  113.                "Couldn't generate SIMD16 program and not "
  114.                "enough threads for SIMD8";
  115.          }
  116.       } else {
  117.          cfg = v16.cfg;
  118.          prog_data->simd_size = 16;
  119.       }
  120.    }
  121.  
  122.    if (unlikely(cfg == NULL)) {
  123.       assert(fail_msg);
  124.       prog->LinkStatus = false;
  125.       ralloc_strcat(&prog->InfoLog, fail_msg);
  126.       _mesa_problem(NULL, "Failed to compile compute shader: %s\n",
  127.                     fail_msg);
  128.       return NULL;
  129.    }
  130.  
  131.    fs_generator g(brw, mem_ctx, (void*) key, &prog_data->base, &cp->Base,
  132.                   v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
  133.    if (INTEL_DEBUG & DEBUG_CS) {
  134.       char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
  135.                                    prog->Label ? prog->Label : "unnamed",
  136.                                    prog->Name);
  137.       g.enable_debug(name);
  138.    }
  139.  
  140.    g.generate_code(cfg, prog_data->simd_size);
  141.  
  142.    if (unlikely(brw->perf_debug) && shader) {
  143.       if (shader->compiled_once) {
  144.          _mesa_problem(&brw->ctx, "CS programs shouldn't need recompiles");
  145.       }
  146.       shader->compiled_once = true;
  147.  
  148.       if (start_busy && !drm_intel_bo_busy(brw->batch.last_bo)) {
  149.          perf_debug("CS compile took %.03f ms and stalled the GPU\n",
  150.                     (get_time() - start_time) * 1000);
  151.       }
  152.    }
  153.  
  154.    return g.get_assembly(final_assembly_size);
  155. }
  156.  
  157. static bool
  158. brw_codegen_cs_prog(struct brw_context *brw,
  159.                     struct gl_shader_program *prog,
  160.                     struct brw_compute_program *cp,
  161.                     struct brw_cs_prog_key *key)
  162. {
  163.    struct gl_context *ctx = &brw->ctx;
  164.    const GLuint *program;
  165.    void *mem_ctx = ralloc_context(NULL);
  166.    GLuint program_size;
  167.    struct brw_cs_prog_data prog_data;
  168.  
  169.    struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
  170.    assert (cs);
  171.  
  172.    memset(&prog_data, 0, sizeof(prog_data));
  173.  
  174.    /* Allocate the references to the uniforms that will end up in the
  175.     * prog_data associated with the compiled program, and which will be freed
  176.     * by the state cache.
  177.     */
  178.    int param_count = cs->num_uniform_components;
  179.  
  180.    /* The backend also sometimes adds params for texture size. */
  181.    param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
  182.    prog_data.base.param =
  183.       rzalloc_array(NULL, const gl_constant_value *, param_count);
  184.    prog_data.base.pull_param =
  185.       rzalloc_array(NULL, const gl_constant_value *, param_count);
  186.    prog_data.base.nr_params = param_count;
  187.  
  188.    program = brw_cs_emit(brw, mem_ctx, key, &prog_data,
  189.                          &cp->program, prog, &program_size);
  190.    if (program == NULL) {
  191.       ralloc_free(mem_ctx);
  192.       return false;
  193.    }
  194.  
  195.    if (prog_data.base.total_scratch) {
  196.       brw_get_scratch_bo(brw, &brw->cs.base.scratch_bo,
  197.                          prog_data.base.total_scratch * brw->max_cs_threads);
  198.    }
  199.  
  200.    if (unlikely(INTEL_DEBUG & DEBUG_CS))
  201.       fprintf(stderr, "\n");
  202.  
  203.    brw_upload_cache(&brw->cache, BRW_CACHE_CS_PROG,
  204.                     key, sizeof(*key),
  205.                     program, program_size,
  206.                     &prog_data, sizeof(prog_data),
  207.                     &brw->cs.base.prog_offset, &brw->cs.prog_data);
  208.    ralloc_free(mem_ctx);
  209.  
  210.    return true;
  211. }
  212.  
  213.  
  214. static void
  215. brw_cs_populate_key(struct brw_context *brw, struct brw_cs_prog_key *key)
  216. {
  217.    /* BRW_NEW_COMPUTE_PROGRAM */
  218.    const struct brw_compute_program *cp =
  219.       (struct brw_compute_program *) brw->compute_program;
  220.  
  221.    memset(key, 0, sizeof(*key));
  222.  
  223.    /* The unique compute program ID */
  224.    key->program_string_id = cp->id;
  225. }
  226.  
  227.  
  228. extern "C"
  229. void
  230. brw_upload_cs_prog(struct brw_context *brw)
  231. {
  232.    struct gl_context *ctx = &brw->ctx;
  233.    struct brw_cs_prog_key key;
  234.    struct brw_compute_program *cp = (struct brw_compute_program *)
  235.       brw->compute_program;
  236.  
  237.    if (!cp)
  238.       return;
  239.  
  240.    if (!brw_state_dirty(brw, 0, BRW_NEW_COMPUTE_PROGRAM))
  241.       return;
  242.  
  243.    brw_cs_populate_key(brw, &key);
  244.  
  245.    if (!brw_search_cache(&brw->cache, BRW_CACHE_CS_PROG,
  246.                          &key, sizeof(key),
  247.                          &brw->cs.base.prog_offset, &brw->cs.prog_data)) {
  248.       bool success =
  249.          brw_codegen_cs_prog(brw,
  250.                              ctx->Shader.CurrentProgram[MESA_SHADER_COMPUTE],
  251.                              cp, &key);
  252.       (void) success;
  253.       assert(success);
  254.    }
  255.    brw->cs.base.prog_data = &brw->cs.prog_data->base;
  256. }
  257.  
  258.  
  259. extern "C" bool
  260. brw_cs_precompile(struct gl_context *ctx,
  261.                   struct gl_shader_program *shader_prog,
  262.                   struct gl_program *prog)
  263. {
  264.    struct brw_context *brw = brw_context(ctx);
  265.    struct brw_cs_prog_key key;
  266.  
  267.    struct gl_compute_program *cp = (struct gl_compute_program *) prog;
  268.    struct brw_compute_program *bcp = brw_compute_program(cp);
  269.  
  270.    memset(&key, 0, sizeof(key));
  271.    key.program_string_id = bcp->id;
  272.  
  273.    brw_setup_tex_for_precompile(brw, &key.tex, prog);
  274.  
  275.    uint32_t old_prog_offset = brw->cs.base.prog_offset;
  276.    struct brw_cs_prog_data *old_prog_data = brw->cs.prog_data;
  277.  
  278.    bool success = brw_codegen_cs_prog(brw, shader_prog, bcp, &key);
  279.  
  280.    brw->cs.base.prog_offset = old_prog_offset;
  281.    brw->cs.prog_data = old_prog_data;
  282.  
  283.    return success;
  284. }
  285.  
  286.  
  287. static void
  288. brw_upload_cs_state(struct brw_context *brw)
  289. {
  290.    if (!brw->cs.prog_data)
  291.       return;
  292.  
  293.    uint32_t offset;
  294.    uint32_t *desc = (uint32_t*) brw_state_batch(brw, AUB_TRACE_SURFACE_STATE,
  295.                                                 8 * 4, 64, &offset);
  296.    struct brw_stage_state *stage_state = &brw->cs.base;
  297.    struct brw_cs_prog_data *cs_prog_data = brw->cs.prog_data;
  298.    struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
  299.  
  300.    if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
  301.       brw->vtbl.emit_buffer_surface_state(
  302.          brw, &stage_state->surf_offset[
  303.                  prog_data->binding_table.shader_time_start],
  304.          brw->shader_time.bo, 0, BRW_SURFACEFORMAT_RAW,
  305.          brw->shader_time.bo->size, 1, true);
  306.    }
  307.  
  308.    uint32_t *bind = (uint32_t*) brw_state_batch(brw, AUB_TRACE_BINDING_TABLE,
  309.                                             prog_data->binding_table.size_bytes,
  310.                                             32, &stage_state->bind_bo_offset);
  311.  
  312.    uint32_t dwords = brw->gen < 8 ? 8 : 9;
  313.    BEGIN_BATCH(dwords);
  314.    OUT_BATCH(MEDIA_VFE_STATE << 16 | (dwords - 2));
  315.  
  316.    if (prog_data->total_scratch) {
  317.       if (brw->gen >= 8)
  318.          OUT_RELOC64(stage_state->scratch_bo,
  319.                      I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
  320.                      ffs(prog_data->total_scratch) - 11);
  321.       else
  322.          OUT_RELOC(stage_state->scratch_bo,
  323.                    I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
  324.                    ffs(prog_data->total_scratch) - 11);
  325.    } else {
  326.       OUT_BATCH(0);
  327.       if (brw->gen >= 8)
  328.          OUT_BATCH(0);
  329.    }
  330.  
  331.    const uint32_t vfe_num_urb_entries = brw->gen >= 8 ? 2 : 0;
  332.    const uint32_t vfe_gpgpu_mode =
  333.       brw->gen == 7 ? SET_FIELD(1, GEN7_MEDIA_VFE_STATE_GPGPU_MODE) : 0;
  334.    OUT_BATCH(SET_FIELD(brw->max_cs_threads - 1, MEDIA_VFE_STATE_MAX_THREADS) |
  335.              SET_FIELD(vfe_num_urb_entries, MEDIA_VFE_STATE_URB_ENTRIES) |
  336.              SET_FIELD(1, MEDIA_VFE_STATE_RESET_GTW_TIMER) |
  337.              SET_FIELD(1, MEDIA_VFE_STATE_BYPASS_GTW) |
  338.              vfe_gpgpu_mode);
  339.  
  340.    OUT_BATCH(0);
  341.    const uint32_t vfe_urb_allocation = brw->gen >= 8 ? 2 : 0;
  342.    OUT_BATCH(SET_FIELD(vfe_urb_allocation, MEDIA_VFE_STATE_URB_ALLOC));
  343.    OUT_BATCH(0);
  344.    OUT_BATCH(0);
  345.    OUT_BATCH(0);
  346.    ADVANCE_BATCH();
  347.  
  348.    /* BRW_NEW_SURFACES and BRW_NEW_*_CONSTBUF */
  349.    memcpy(bind, stage_state->surf_offset,
  350.           prog_data->binding_table.size_bytes);
  351.  
  352.    memset(desc, 0, 8 * 4);
  353.  
  354.    int dw = 0;
  355.    desc[dw++] = brw->cs.base.prog_offset;
  356.    if (brw->gen >= 8)
  357.       desc[dw++] = 0; /* Kernel Start Pointer High */
  358.    desc[dw++] = 0;
  359.    desc[dw++] = 0;
  360.    desc[dw++] = stage_state->bind_bo_offset;
  361.  
  362.    BEGIN_BATCH(4);
  363.    OUT_BATCH(MEDIA_INTERFACE_DESCRIPTOR_LOAD << 16 | (4 - 2));
  364.    OUT_BATCH(0);
  365.    OUT_BATCH(8 * 4);
  366.    OUT_BATCH(offset);
  367.    ADVANCE_BATCH();
  368. }
  369.  
  370.  
  371. extern "C"
  372. const struct brw_tracked_state brw_cs_state = {
  373.    /* explicit initialisers aren't valid C++, comment
  374.     * them for documentation purposes */
  375.    /* .dirty = */{
  376.       /* .mesa = */ 0,
  377.       /* .brw = */  BRW_NEW_CS_PROG_DATA,
  378.    },
  379.    /* .emit = */ brw_upload_cs_state
  380. };
  381.