Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2011 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "main/macros.h"
  25. #include "util/register_allocate.h"
  26. #include "brw_vec4.h"
  27. #include "brw_vs.h"
  28. #include "brw_cfg.h"
  29.  
  30. using namespace brw;
  31.  
  32. namespace brw {
  33.  
  34. static void
  35. assign(unsigned int *reg_hw_locations, backend_reg *reg)
  36. {
  37.    if (reg->file == GRF) {
  38.       reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset;
  39.       reg->reg_offset = 0;
  40.    }
  41. }
  42.  
  43. bool
  44. vec4_visitor::reg_allocate_trivial()
  45. {
  46.    unsigned int hw_reg_mapping[this->alloc.count];
  47.    bool virtual_grf_used[this->alloc.count];
  48.    int next;
  49.  
  50.    /* Calculate which virtual GRFs are actually in use after whatever
  51.     * optimization passes have occurred.
  52.     */
  53.    for (unsigned i = 0; i < this->alloc.count; i++) {
  54.       virtual_grf_used[i] = false;
  55.    }
  56.  
  57.    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
  58.       if (inst->dst.file == GRF)
  59.          virtual_grf_used[inst->dst.reg] = true;
  60.  
  61.       for (unsigned i = 0; i < 3; i++) {
  62.          if (inst->src[i].file == GRF)
  63.             virtual_grf_used[inst->src[i].reg] = true;
  64.       }
  65.    }
  66.  
  67.    hw_reg_mapping[0] = this->first_non_payload_grf;
  68.    next = hw_reg_mapping[0] + this->alloc.sizes[0];
  69.    for (unsigned i = 1; i < this->alloc.count; i++) {
  70.       if (virtual_grf_used[i]) {
  71.          hw_reg_mapping[i] = next;
  72.          next += this->alloc.sizes[i];
  73.       }
  74.    }
  75.    prog_data->total_grf = next;
  76.  
  77.    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
  78.       assign(hw_reg_mapping, &inst->dst);
  79.       assign(hw_reg_mapping, &inst->src[0]);
  80.       assign(hw_reg_mapping, &inst->src[1]);
  81.       assign(hw_reg_mapping, &inst->src[2]);
  82.    }
  83.  
  84.    if (prog_data->total_grf > max_grf) {
  85.       fail("Ran out of regs on trivial allocator (%d/%d)\n",
  86.            prog_data->total_grf, max_grf);
  87.       return false;
  88.    }
  89.  
  90.    return true;
  91. }
  92.  
  93. extern "C" void
  94. brw_vec4_alloc_reg_set(struct brw_compiler *compiler)
  95. {
  96.    int base_reg_count =
  97.       compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
  98.  
  99.    /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
  100.     * SEND-from-GRF sources cannot be split, so we also need classes for each
  101.     * potential message length.
  102.     */
  103.    const int class_count = MAX_VGRF_SIZE;
  104.    int class_sizes[MAX_VGRF_SIZE];
  105.  
  106.    for (int i = 0; i < class_count; i++)
  107.       class_sizes[i] = i + 1;
  108.  
  109.    /* Compute the total number of registers across all classes. */
  110.    int ra_reg_count = 0;
  111.    for (int i = 0; i < class_count; i++) {
  112.       ra_reg_count += base_reg_count - (class_sizes[i] - 1);
  113.    }
  114.  
  115.    ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf);
  116.    compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
  117.    ralloc_free(compiler->vec4_reg_set.regs);
  118.    compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count);
  119.    if (compiler->devinfo->gen >= 6)
  120.       ra_set_allocate_round_robin(compiler->vec4_reg_set.regs);
  121.    ralloc_free(compiler->vec4_reg_set.classes);
  122.    compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count);
  123.  
  124.    /* Now, add the registers to their classes, and add the conflicts
  125.     * between them and the base GRF registers (and also each other).
  126.     */
  127.    int reg = 0;
  128.    unsigned *q_values[MAX_VGRF_SIZE];
  129.    for (int i = 0; i < class_count; i++) {
  130.       int class_reg_count = base_reg_count - (class_sizes[i] - 1);
  131.       compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs);
  132.  
  133.       q_values[i] = new unsigned[MAX_VGRF_SIZE];
  134.  
  135.       for (int j = 0; j < class_reg_count; j++) {
  136.          ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg);
  137.  
  138.          compiler->vec4_reg_set.ra_reg_to_grf[reg] = j;
  139.  
  140.          for (int base_reg = j;
  141.               base_reg < j + class_sizes[i];
  142.               base_reg++) {
  143.             ra_add_transitive_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg);
  144.          }
  145.  
  146.          reg++;
  147.       }
  148.  
  149.       for (int j = 0; j < class_count; j++) {
  150.          /* Calculate the q values manually because the algorithm used by
  151.           * ra_set_finalize() to do it has higher complexity affecting the
  152.           * start-up time of some applications.  q(i, j) is just the maximum
  153.           * number of registers from class i a register from class j can
  154.           * conflict with.
  155.           */
  156.          q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
  157.       }
  158.    }
  159.    assert(reg == ra_reg_count);
  160.  
  161.    ra_set_finalize(compiler->vec4_reg_set.regs, q_values);
  162.  
  163.    for (int i = 0; i < MAX_VGRF_SIZE; i++)
  164.       delete[] q_values[i];
  165. }
  166.  
  167. void
  168. vec4_visitor::setup_payload_interference(struct ra_graph *g,
  169.                                          int first_payload_node,
  170.                                          int reg_node_count)
  171. {
  172.    int payload_node_count = this->first_non_payload_grf;
  173.  
  174.    for (int i = 0; i < payload_node_count; i++) {
  175.       /* Mark each payload reg node as being allocated to its physical register.
  176.        *
  177.        * The alternative would be to have per-physical register classes, which
  178.        * would just be silly.
  179.        */
  180.       ra_set_node_reg(g, first_payload_node + i, i);
  181.  
  182.       /* For now, just mark each payload node as interfering with every other
  183.        * node to be allocated.
  184.        */
  185.       for (int j = 0; j < reg_node_count; j++) {
  186.          ra_add_node_interference(g, first_payload_node + i, j);
  187.       }
  188.    }
  189. }
  190.  
  191. bool
  192. vec4_visitor::reg_allocate()
  193. {
  194.    struct brw_compiler *compiler = brw->intelScreen->compiler;
  195.    unsigned int hw_reg_mapping[alloc.count];
  196.    int payload_reg_count = this->first_non_payload_grf;
  197.  
  198.    /* Using the trivial allocator can be useful in debugging undefined
  199.     * register access as a result of broken optimization passes.
  200.     */
  201.    if (0)
  202.       return reg_allocate_trivial();
  203.  
  204.    calculate_live_intervals();
  205.  
  206.    int node_count = alloc.count;
  207.    int first_payload_node = node_count;
  208.    node_count += payload_reg_count;
  209.    struct ra_graph *g =
  210.       ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count);
  211.  
  212.    for (unsigned i = 0; i < alloc.count; i++) {
  213.       int size = this->alloc.sizes[i];
  214.       assert(size >= 1 && size <= MAX_VGRF_SIZE);
  215.       ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
  216.  
  217.       for (unsigned j = 0; j < i; j++) {
  218.          if (virtual_grf_interferes(i, j)) {
  219.             ra_add_node_interference(g, i, j);
  220.          }
  221.       }
  222.    }
  223.  
  224.    setup_payload_interference(g, first_payload_node, node_count);
  225.  
  226.    if (!ra_allocate(g)) {
  227.       /* Failed to allocate registers.  Spill a reg, and the caller will
  228.        * loop back into here to try again.
  229.        */
  230.       int reg = choose_spill_reg(g);
  231.       if (this->no_spills) {
  232.          fail("Failure to register allocate.  Reduce number of live "
  233.               "values to avoid this.");
  234.       } else if (reg == -1) {
  235.          fail("no register to spill\n");
  236.       } else {
  237.          spill_reg(reg);
  238.       }
  239.       ralloc_free(g);
  240.       return false;
  241.    }
  242.  
  243.    /* Get the chosen virtual registers for each node, and map virtual
  244.     * regs in the register classes back down to real hardware reg
  245.     * numbers.
  246.     */
  247.    prog_data->total_grf = payload_reg_count;
  248.    for (unsigned i = 0; i < alloc.count; i++) {
  249.       int reg = ra_get_node_reg(g, i);
  250.  
  251.       hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg];
  252.       prog_data->total_grf = MAX2(prog_data->total_grf,
  253.                                   hw_reg_mapping[i] + alloc.sizes[i]);
  254.    }
  255.  
  256.    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
  257.       assign(hw_reg_mapping, &inst->dst);
  258.       assign(hw_reg_mapping, &inst->src[0]);
  259.       assign(hw_reg_mapping, &inst->src[1]);
  260.       assign(hw_reg_mapping, &inst->src[2]);
  261.    }
  262.  
  263.    ralloc_free(g);
  264.  
  265.    return true;
  266. }
  267.  
  268. void
  269. vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill)
  270. {
  271.    float loop_scale = 1.0;
  272.  
  273.    for (unsigned i = 0; i < this->alloc.count; i++) {
  274.       spill_costs[i] = 0.0;
  275.       no_spill[i] = alloc.sizes[i] != 1;
  276.    }
  277.  
  278.    /* Calculate costs for spilling nodes.  Call it a cost of 1 per
  279.     * spill/unspill we'll have to do, and guess that the insides of
  280.     * loops run 10 times.
  281.     */
  282.    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
  283.       for (unsigned int i = 0; i < 3; i++) {
  284.          if (inst->src[i].file == GRF) {
  285.             spill_costs[inst->src[i].reg] += loop_scale;
  286.             if (inst->src[i].reladdr)
  287.                no_spill[inst->src[i].reg] = true;
  288.          }
  289.       }
  290.  
  291.       if (inst->dst.file == GRF) {
  292.          spill_costs[inst->dst.reg] += loop_scale;
  293.          if (inst->dst.reladdr)
  294.             no_spill[inst->dst.reg] = true;
  295.       }
  296.  
  297.       switch (inst->opcode) {
  298.  
  299.       case BRW_OPCODE_DO:
  300.          loop_scale *= 10;
  301.          break;
  302.  
  303.       case BRW_OPCODE_WHILE:
  304.          loop_scale /= 10;
  305.          break;
  306.  
  307.       case SHADER_OPCODE_GEN4_SCRATCH_READ:
  308.       case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
  309.          for (int i = 0; i < 3; i++) {
  310.             if (inst->src[i].file == GRF)
  311.                no_spill[inst->src[i].reg] = true;
  312.          }
  313.          if (inst->dst.file == GRF)
  314.             no_spill[inst->dst.reg] = true;
  315.          break;
  316.  
  317.       default:
  318.          break;
  319.       }
  320.    }
  321. }
  322.  
  323. int
  324. vec4_visitor::choose_spill_reg(struct ra_graph *g)
  325. {
  326.    float spill_costs[this->alloc.count];
  327.    bool no_spill[this->alloc.count];
  328.  
  329.    evaluate_spill_costs(spill_costs, no_spill);
  330.  
  331.    for (unsigned i = 0; i < this->alloc.count; i++) {
  332.       if (!no_spill[i])
  333.          ra_set_node_spill_cost(g, i, spill_costs[i]);
  334.    }
  335.  
  336.    return ra_get_best_spill_node(g);
  337. }
  338.  
  339. void
  340. vec4_visitor::spill_reg(int spill_reg_nr)
  341. {
  342.    assert(alloc.sizes[spill_reg_nr] == 1);
  343.    unsigned int spill_offset = c->last_scratch++;
  344.  
  345.    /* Generate spill/unspill instructions for the objects being spilled. */
  346.    foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
  347.       for (unsigned int i = 0; i < 3; i++) {
  348.          if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg_nr) {
  349.             src_reg spill_reg = inst->src[i];
  350.             inst->src[i].reg = alloc.allocate(1);
  351.             dst_reg temp = dst_reg(inst->src[i]);
  352.  
  353.             emit_scratch_read(block, inst, temp, spill_reg, spill_offset);
  354.          }
  355.       }
  356.  
  357.       if (inst->dst.file == GRF && inst->dst.reg == spill_reg_nr) {
  358.          emit_scratch_write(block, inst, spill_offset);
  359.       }
  360.    }
  361.  
  362.    invalidate_live_intervals();
  363. }
  364.  
  365. } /* namespace brw */
  366.