Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2013 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24. #include "glsl_parser_extras.h"
  25. #include "ir.h"
  26. #include "ir_uniform.h"
  27. #include "linker.h"
  28. #include "program/hash_table.h"
  29. #include "main/macros.h"
  30.  
  31. namespace {
  32.    /*
  33.     * Atomic counter as seen by the program.
  34.     */
  35.    struct active_atomic_counter {
  36.       unsigned id;
  37.       ir_variable *var;
  38.    };
  39.  
  40.    /*
  41.     * Atomic counter buffer referenced by the program.  There is a one
  42.     * to one correspondence between these and the objects that can be
  43.     * queried using glGetActiveAtomicCounterBufferiv().
  44.     */
  45.    struct active_atomic_buffer {
  46.       active_atomic_buffer()
  47.          : counters(0), num_counters(0), stage_references(), size(0)
  48.       {}
  49.  
  50.       ~active_atomic_buffer()
  51.       {
  52.          free(counters);
  53.       }
  54.  
  55.       void push_back(unsigned id, ir_variable *var)
  56.       {
  57.          active_atomic_counter *new_counters;
  58.  
  59.          new_counters = (active_atomic_counter *)
  60.             realloc(counters, sizeof(active_atomic_counter) *
  61.                     (num_counters + 1));
  62.  
  63.          if (new_counters == NULL) {
  64.             _mesa_error_no_memory(__func__);
  65.             return;
  66.          }
  67.  
  68.          counters = new_counters;
  69.          counters[num_counters].id = id;
  70.          counters[num_counters].var = var;
  71.          num_counters++;
  72.       }
  73.  
  74.       active_atomic_counter *counters;
  75.       unsigned num_counters;
  76.       unsigned stage_references[MESA_SHADER_STAGES];
  77.       unsigned size;
  78.    };
  79.  
  80.    int
  81.    cmp_actives(const void *a, const void *b)
  82.    {
  83.       const active_atomic_counter *const first = (active_atomic_counter *) a;
  84.       const active_atomic_counter *const second = (active_atomic_counter *) b;
  85.  
  86.       return int(first->var->data.atomic.offset) - int(second->var->data.atomic.offset);
  87.    }
  88.  
  89.    bool
  90.    check_atomic_counters_overlap(const ir_variable *x, const ir_variable *y)
  91.    {
  92.       return ((x->data.atomic.offset >= y->data.atomic.offset &&
  93.                x->data.atomic.offset < y->data.atomic.offset + y->type->atomic_size()) ||
  94.               (y->data.atomic.offset >= x->data.atomic.offset &&
  95.                y->data.atomic.offset < x->data.atomic.offset + x->type->atomic_size()));
  96.    }
  97.  
  98.    active_atomic_buffer *
  99.    find_active_atomic_counters(struct gl_context *ctx,
  100.                                struct gl_shader_program *prog,
  101.                                unsigned *num_buffers)
  102.    {
  103.       active_atomic_buffer *const buffers =
  104.          new active_atomic_buffer[ctx->Const.MaxAtomicBufferBindings];
  105.  
  106.       *num_buffers = 0;
  107.  
  108.       for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i) {
  109.          struct gl_shader *sh = prog->_LinkedShaders[i];
  110.          if (sh == NULL)
  111.             continue;
  112.  
  113.          foreach_in_list(ir_instruction, node, sh->ir) {
  114.             ir_variable *var = node->as_variable();
  115.  
  116.             if (var && var->type->contains_atomic()) {
  117.                unsigned id = 0;
  118.                bool found = prog->UniformHash->get(id, var->name);
  119.                assert(found);
  120.                (void) found;
  121.                active_atomic_buffer *buf = &buffers[var->data.binding];
  122.  
  123.                /* If this is the first time the buffer is used, increment
  124.                 * the counter of buffers used.
  125.                 */
  126.                if (buf->size == 0)
  127.                   (*num_buffers)++;
  128.  
  129.                buf->push_back(id, var);
  130.  
  131.                buf->stage_references[i]++;
  132.                buf->size = MAX2(buf->size, var->data.atomic.offset +
  133.                                 var->type->atomic_size());
  134.             }
  135.          }
  136.       }
  137.  
  138.       for (unsigned i = 0; i < ctx->Const.MaxAtomicBufferBindings; i++) {
  139.          if (buffers[i].size == 0)
  140.             continue;
  141.  
  142.          qsort(buffers[i].counters, buffers[i].num_counters,
  143.                sizeof(active_atomic_counter),
  144.                cmp_actives);
  145.  
  146.          for (unsigned j = 1; j < buffers[i].num_counters; j++) {
  147.             /* If an overlapping counter found, it must be a reference to the
  148.              * same counter from a different shader stage.
  149.              */
  150.             if (check_atomic_counters_overlap(buffers[i].counters[j-1].var,
  151.                                               buffers[i].counters[j].var)
  152.                 && strcmp(buffers[i].counters[j-1].var->name,
  153.                           buffers[i].counters[j].var->name) != 0) {
  154.                linker_error(prog, "Atomic counter %s declared at offset %d "
  155.                             "which is already in use.",
  156.                             buffers[i].counters[j].var->name,
  157.                             buffers[i].counters[j].var->data.atomic.offset);
  158.             }
  159.          }
  160.       }
  161.       return buffers;
  162.    }
  163. }
  164.  
  165. void
  166. link_assign_atomic_counter_resources(struct gl_context *ctx,
  167.                                      struct gl_shader_program *prog)
  168. {
  169.    unsigned num_buffers;
  170.    active_atomic_buffer *abs =
  171.       find_active_atomic_counters(ctx, prog, &num_buffers);
  172.  
  173.    prog->AtomicBuffers = rzalloc_array(prog, gl_active_atomic_buffer,
  174.                                        num_buffers);
  175.    prog->NumAtomicBuffers = num_buffers;
  176.  
  177.    unsigned i = 0;
  178.    for (unsigned binding = 0;
  179.         binding < ctx->Const.MaxAtomicBufferBindings;
  180.         binding++) {
  181.  
  182.       /* If the binding was not used, skip.
  183.        */
  184.       if (abs[binding].size == 0)
  185.          continue;
  186.  
  187.       active_atomic_buffer &ab = abs[binding];
  188.       gl_active_atomic_buffer &mab = prog->AtomicBuffers[i];
  189.  
  190.       /* Assign buffer-specific fields. */
  191.       mab.Binding = binding;
  192.       mab.MinimumSize = ab.size;
  193.       mab.Uniforms = rzalloc_array(prog->AtomicBuffers, GLuint,
  194.                                    ab.num_counters);
  195.       mab.NumUniforms = ab.num_counters;
  196.  
  197.       /* Assign counter-specific fields. */
  198.       for (unsigned j = 0; j < ab.num_counters; j++) {
  199.          ir_variable *const var = ab.counters[j].var;
  200.          const unsigned id = ab.counters[j].id;
  201.          gl_uniform_storage *const storage = &prog->UniformStorage[id];
  202.  
  203.          mab.Uniforms[j] = id;
  204.          if (!var->data.explicit_binding)
  205.             var->data.binding = i;
  206.  
  207.          storage->atomic_buffer_index = i;
  208.          storage->offset = var->data.atomic.offset;
  209.          storage->array_stride = (var->type->is_array() ?
  210.                                   var->type->element_type()->atomic_size() : 0);
  211.       }
  212.  
  213.       /* Assign stage-specific fields. */
  214.       for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j)
  215.          mab.StageReferences[j] =
  216.             (ab.stage_references[j] ? GL_TRUE : GL_FALSE);
  217.  
  218.       i++;
  219.    }
  220.  
  221.    delete [] abs;
  222.    assert(i == num_buffers);
  223. }
  224.  
  225. void
  226. link_check_atomic_counter_resources(struct gl_context *ctx,
  227.                                     struct gl_shader_program *prog)
  228. {
  229.    unsigned num_buffers;
  230.    active_atomic_buffer *const abs =
  231.       find_active_atomic_counters(ctx, prog, &num_buffers);
  232.    unsigned atomic_counters[MESA_SHADER_STAGES] = {};
  233.    unsigned atomic_buffers[MESA_SHADER_STAGES] = {};
  234.    unsigned total_atomic_counters = 0;
  235.    unsigned total_atomic_buffers = 0;
  236.  
  237.    /* Sum the required resources.  Note that this counts buffers and
  238.     * counters referenced by several shader stages multiple times
  239.     * against the combined limit -- That's the behavior the spec
  240.     * requires.
  241.     */
  242.    for (unsigned i = 0; i < ctx->Const.MaxAtomicBufferBindings; i++) {
  243.       if (abs[i].size == 0)
  244.          continue;
  245.  
  246.       for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
  247.          const unsigned n = abs[i].stage_references[j];
  248.  
  249.          if (n) {
  250.             atomic_counters[j] += n;
  251.             total_atomic_counters += n;
  252.             atomic_buffers[j]++;
  253.             total_atomic_buffers++;
  254.          }
  255.       }
  256.    }
  257.  
  258.    /* Check that they are within the supported limits. */
  259.    for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
  260.       if (atomic_counters[i] > ctx->Const.Program[i].MaxAtomicCounters)
  261.          linker_error(prog, "Too many %s shader atomic counters",
  262.                       _mesa_shader_stage_to_string(i));
  263.  
  264.       if (atomic_buffers[i] > ctx->Const.Program[i].MaxAtomicBuffers)
  265.          linker_error(prog, "Too many %s shader atomic counter buffers",
  266.                       _mesa_shader_stage_to_string(i));
  267.    }
  268.  
  269.    if (total_atomic_counters > ctx->Const.MaxCombinedAtomicCounters)
  270.       linker_error(prog, "Too many combined atomic counters");
  271.  
  272.    if (total_atomic_buffers > ctx->Const.MaxCombinedAtomicBuffers)
  273.       linker_error(prog, "Too many combined atomic buffers");
  274.  
  275.    delete [] abs;
  276. }
  277.