Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2013 Vadim Girlin <vadimgirlin@gmail.com>
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * on the rights to use, copy, modify, merge, publish, distribute, sub
  8.  * license, and/or sell copies of the Software, and to permit persons to whom
  9.  * the Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18.  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
  19.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  20.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  21.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *      Vadim Girlin
  25.  */
  26.  
  27. #ifndef SB_SHADER_H_
  28. #define SB_SHADER_H_
  29.  
  30. #include <list>
  31. #include <string>
  32. #include <map>
  33.  
  34. #include "sb_ir.h"
  35. #include "sb_expr.h"
  36.  
  37. namespace r600_sb {
  38.  
  39. struct shader_input {
  40.         unsigned comp_mask;
  41.         unsigned preloaded;
  42. };
  43.  
  44. struct error_info {
  45.         node *n;
  46.         unsigned arg_index;
  47.         std::string message;
  48. };
  49.  
  50. typedef std::multimap<node*, error_info> error_map;
  51.  
  52. class sb_context;
  53.  
  54. typedef std::vector<shader_input> inputs_vec;
  55. typedef std::vector<gpr_array*> gpr_array_vec;
  56.  
  57. struct ra_edge {
  58.         value *a, *b;
  59.         unsigned cost;
  60.  
  61.         ra_edge(value *a, value *b, unsigned cost) : a(a), b(b), cost(cost) {}
  62. };
  63.  
  64. enum chunk_flags {
  65.         RCF_GLOBAL = (1 << 0),
  66.         RCF_PIN_CHAN = (1 << 1),
  67.         RCF_PIN_REG = (1 << 2),
  68.  
  69.         RCF_FIXED = (1 << 3),
  70.  
  71.         RCF_PREALLOC = (1 << 4)
  72. };
  73.  
  74. inline chunk_flags operator |(chunk_flags l, chunk_flags r) {
  75.         return (chunk_flags)((unsigned)l|(unsigned)r);
  76. }
  77. inline chunk_flags& operator |=(chunk_flags &l, chunk_flags r) {
  78.         l = l | r;
  79.         return l;
  80. }
  81.  
  82. inline chunk_flags& operator &=(chunk_flags &l, chunk_flags r) {
  83.         l = (chunk_flags)((unsigned)l & (unsigned)r);
  84.         return l;
  85. }
  86.  
  87. inline chunk_flags operator ~(chunk_flags r) {
  88.         return (chunk_flags)~(unsigned)r;
  89. }
  90.  
  91. struct ra_chunk {
  92.         vvec values;
  93.         chunk_flags flags;
  94.         unsigned cost;
  95.         sel_chan pin;
  96.  
  97.         ra_chunk() : values(), flags(), cost(), pin() {}
  98.  
  99.         bool is_fixed() { return flags & RCF_FIXED; }
  100.         void fix() { flags |= RCF_FIXED; }
  101.  
  102.         bool is_global() { return flags & RCF_GLOBAL; }
  103.         void set_global() {     flags |= RCF_GLOBAL; }
  104.  
  105.         bool is_reg_pinned() { return flags & RCF_PIN_REG; }
  106.         bool is_chan_pinned() { return flags & RCF_PIN_CHAN; }
  107.  
  108.         bool is_prealloc() { return flags & RCF_PREALLOC; }
  109.         void set_prealloc() { flags |= RCF_PREALLOC; }
  110. };
  111.  
  112. typedef std::vector<ra_chunk*> chunk_vector;
  113.  
  114. class ra_constraint {
  115. public:
  116.         ra_constraint(constraint_kind kind) : kind(kind), cost(0) {}
  117.  
  118.         constraint_kind kind;
  119.         vvec values;
  120.         unsigned cost;
  121.  
  122.         void update_values();
  123.         bool check();
  124. };
  125.  
  126. typedef std::vector<ra_constraint*> constraint_vec;
  127. typedef std::vector<ra_chunk*> chunk_vec;
  128.  
  129. // priority queue
  130. // FIXME use something more suitale or custom class ?
  131.  
  132. template <class T>
  133. struct cost_compare {
  134.         bool operator ()(const T& t1, const T& t2) {
  135.                 return t1->cost > t2->cost;
  136.         }
  137. };
  138.  
  139. template <class T, class Comp>
  140. class queue {
  141.         typedef std::vector<T> container;
  142.         container cont;
  143.  
  144. public:
  145.         queue() : cont() {}
  146.  
  147.         typedef typename container::iterator iterator;
  148.  
  149.         iterator begin() { return cont.begin(); }
  150.         iterator end() { return cont.end(); }
  151.  
  152.         iterator insert(const T& t) {
  153.                 iterator I = std::upper_bound(begin(), end(), t, Comp());
  154.                 if (I == end())
  155.                         cont.push_back(t);
  156.                 else
  157.                         cont.insert(I, t);
  158.  
  159.                 return I;
  160.         }
  161.  
  162.         void erase(const T& t) {
  163.                 std::pair<iterator, iterator> R =
  164.                                 std::equal_range(begin(), end(), t, Comp());
  165.                 iterator F = std::find(R.first, R.second, t);
  166.                 if (F != R.second)
  167.                         cont.erase(F);
  168.         }
  169. };
  170.  
  171. typedef queue<ra_chunk*, cost_compare<ra_chunk*> > chunk_queue;
  172. typedef queue<ra_edge*, cost_compare<ra_edge*> > edge_queue;
  173. typedef queue<ra_constraint*, cost_compare<ra_constraint*> > constraint_queue;
  174.  
  175. typedef std::set<ra_chunk*> chunk_set;
  176.  
  177. class shader;
  178.  
  179. class coalescer {
  180.  
  181.         shader &sh;
  182.  
  183.         edge_queue edges;
  184.         chunk_queue chunks;
  185.         constraint_queue constraints;
  186.  
  187.         constraint_vec all_constraints;
  188.         chunk_vec all_chunks;
  189.  
  190. public:
  191.  
  192.         coalescer(shader &sh) : sh(sh), edges(), chunks(), constraints() {}
  193.         ~coalescer();
  194.  
  195.         int run();
  196.  
  197.         void add_edge(value *a, value *b, unsigned cost);
  198.         void build_chunks();
  199.         void build_constraint_queue();
  200.         void build_chunk_queue();
  201.         int color_constraints();
  202.         void color_chunks();
  203.  
  204.         ra_constraint* create_constraint(constraint_kind kind);
  205.  
  206.         enum ac_cost {
  207.                 phi_cost = 10000,
  208.                 copy_cost = 1,
  209.         };
  210.  
  211.         void dump_edges();
  212.         void dump_chunks();
  213.         void dump_constraint_queue();
  214.  
  215.         static void dump_chunk(ra_chunk *c);
  216.         static void dump_constraint(ra_constraint* c);
  217.  
  218.         void get_chunk_interferences(ra_chunk *c, val_set &s);
  219.  
  220. private:
  221.  
  222.         void create_chunk(value *v);
  223.         void unify_chunks(ra_edge *e);
  224.         bool chunks_interference(ra_chunk *c1, ra_chunk *c2);
  225.  
  226.         int color_reg_constraint(ra_constraint *c);
  227.         void color_phi_constraint(ra_constraint *c);
  228.  
  229.  
  230.         void init_reg_bitset(sb_bitset &bs, val_set &vs);
  231.  
  232.         void color_chunk(ra_chunk *c, sel_chan color);
  233.  
  234.         ra_chunk* detach_value(value *v);
  235. };
  236.  
  237.  
  238.  
  239. class shader {
  240.  
  241.         sb_context &ctx;
  242.  
  243.         typedef sb_map<uint32_t, value*> value_map;
  244.         value_map reg_values;
  245.  
  246.         // read-only values
  247.         value_map const_values; // immediate constants key -const  value (uint32_t)
  248.         value_map special_ro_values; //  key - hw alu_sel & chan
  249.         value_map kcache_values;
  250.  
  251.         gpr_array_vec gpr_arrays;
  252.  
  253.         unsigned next_temp_value_index;
  254.  
  255.         unsigned prep_regs_count;
  256.  
  257.         value* pred_sels[2];
  258.  
  259.         regions_vec regions;
  260.         inputs_vec inputs;
  261.  
  262.         value *undef;
  263.  
  264.         sb_value_pool val_pool;
  265.         sb_pool pool;
  266.  
  267.         std::vector<node*> all_nodes;
  268.  
  269. public:
  270.         shader_stats src_stats, opt_stats;
  271.  
  272.         error_map errors;
  273.  
  274.         bool optimized;
  275.  
  276.         unsigned id;
  277.  
  278.         coalescer coal;
  279.  
  280.         static const unsigned temp_regid_offset = 512;
  281.  
  282.         bbs_vec bbs;
  283.  
  284.         const shader_target target;
  285.  
  286.         value_table vt;
  287.         expr_handler ex;
  288.  
  289.         container_node *root;
  290.  
  291.         bool compute_interferences;
  292.  
  293.         bool has_alu_predication;
  294.         bool uses_gradients;
  295.  
  296.         bool safe_math;
  297.  
  298.         unsigned ngpr, nstack;
  299.  
  300.         shader(sb_context &sctx, shader_target t, unsigned id);
  301.  
  302.         ~shader();
  303.  
  304.         sb_context &get_ctx() const { return ctx; }
  305.  
  306.         value* get_const_value(const literal & v);
  307.         value* get_special_value(unsigned sv_id, unsigned version = 0);
  308.         value* create_temp_value();
  309.         value* get_gpr_value(bool src, unsigned reg, unsigned chan, bool rel,
  310.                          unsigned version = 0);
  311.  
  312.  
  313.         value* get_special_ro_value(unsigned sel);
  314.         value* get_kcache_value(unsigned bank, unsigned index, unsigned chan);
  315.  
  316.         value* get_value_version(value* v, unsigned ver);
  317.  
  318.         void init();
  319.         void add_pinned_gpr_values(vvec& vec, unsigned gpr, unsigned comp_mask, bool src);
  320.  
  321.         void dump_ir();
  322.  
  323.         void add_gpr_array(unsigned gpr_start, unsigned gpr_count,
  324.                            unsigned comp_mask);
  325.  
  326.         value* get_pred_sel(int sel);
  327.         bool assign_slot(alu_node *n, alu_node *slots[5]);
  328.  
  329.         gpr_array* get_gpr_array(unsigned reg, unsigned chan);
  330.  
  331.         void add_input(unsigned gpr, bool preloaded = false,
  332.                        unsigned comp_mask = 0xF);
  333.  
  334.         const inputs_vec & get_inputs() {return inputs; }
  335.  
  336.         regions_vec & get_regions() { return regions; }
  337.  
  338.         void init_call_fs(cf_node *cf);
  339.  
  340.         value *get_undef_value();
  341.         void set_undef(val_set &s);
  342.  
  343.         node* create_node(node_type nt, node_subtype nst,
  344.                           node_flags flags = NF_EMPTY);
  345.         alu_node* create_alu();
  346.         alu_group_node* create_alu_group();
  347.         alu_packed_node* create_alu_packed();
  348.         cf_node* create_cf();
  349.         cf_node* create_cf(unsigned op);
  350.         fetch_node* create_fetch();
  351.         region_node* create_region();
  352.         depart_node* create_depart(region_node *target);
  353.         repeat_node* create_repeat(region_node *target);
  354.         container_node* create_container(node_type nt = NT_LIST,
  355.                                          node_subtype nst = NST_LIST,
  356.                                          node_flags flags = NF_EMPTY);
  357.         if_node* create_if();
  358.         bb_node* create_bb(unsigned id, unsigned loop_level);
  359.  
  360.         value* get_value_by_uid(unsigned id) { return val_pool[id - 1]; }
  361.  
  362.         cf_node* create_clause(node_subtype nst);
  363.  
  364.         void create_bbs();
  365.         void expand_bbs();
  366.  
  367.         alu_node* create_mov(value* dst, value* src);
  368.         alu_node* create_copy_mov(value *dst, value *src, unsigned affcost = 1);
  369.  
  370.         const char * get_shader_target_name();
  371.  
  372.         std::string get_full_target_name();
  373.  
  374.         void create_bbs(container_node* n, bbs_vec &bbs, int loop_level = 0);
  375.         void expand_bbs(bbs_vec &bbs);
  376.  
  377.         sched_queue_id get_queue_id(node* n);
  378.  
  379.         void simplify_dep_rep(node *dr);
  380.  
  381.         unsigned first_temp_gpr();
  382.         unsigned num_nontemp_gpr();
  383.  
  384.         gpr_array_vec& arrays() { return gpr_arrays; }
  385.  
  386.         void set_uses_kill();
  387.  
  388.         void fill_array_values(gpr_array *a, vvec &vv);
  389.  
  390.         alu_node* clone(alu_node *n);
  391.  
  392.         sb_value_pool& get_value_pool() { return val_pool; }
  393.  
  394.         void collect_stats(bool opt);
  395.  
  396. private:
  397.         value* create_value(value_kind k, sel_chan regid, unsigned ver);
  398.         value* get_value(value_kind kind, sel_chan id,
  399.                                  unsigned version = 0);
  400.         value* get_ro_value(value_map &vm, value_kind vk, unsigned key);
  401. };
  402.  
  403. }
  404.  
  405. #endif /* SHADER_H_ */
  406.