Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2012 Advanced Micro Devices, Inc.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * on the rights to use, copy, modify, merge, publish, distribute, sub
  8.  * license, and/or sell copies of the Software, and to permit persons to whom
  9.  * the Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  18.  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
  19.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  20.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  21.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *      Christian König <christian.koenig@amd.com>
  25.  */
  26.  
  27. #include "util/u_memory.h"
  28. #include "radeonsi_pipe.h"
  29. #include "radeonsi_pm4.h"
  30. #include "sid.h"
  31. #include "r600_hw_context_priv.h"
  32.  
  33. #define NUMBER_OF_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
  34.  
  35. void si_pm4_cmd_begin(struct si_pm4_state *state, unsigned opcode)
  36. {
  37.         state->last_opcode = opcode;
  38.         state->last_pm4 = state->ndw++;
  39. }
  40.  
  41. void si_pm4_cmd_add(struct si_pm4_state *state, uint32_t dw)
  42. {
  43.         state->pm4[state->ndw++] = dw;
  44. }
  45.  
  46. void si_pm4_cmd_end(struct si_pm4_state *state, bool predicate)
  47. {
  48.         unsigned count;
  49.         count = state->ndw - state->last_pm4 - 2;
  50.         state->pm4[state->last_pm4] =
  51.                 PKT3(state->last_opcode, count, predicate)
  52.                    | PKT3_SHADER_TYPE_S(state->compute_pkt);
  53.  
  54.         assert(state->ndw <= SI_PM4_MAX_DW);
  55. }
  56.  
  57. void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val)
  58. {
  59.         unsigned opcode;
  60.  
  61.         if (reg >= SI_CONFIG_REG_OFFSET && reg < SI_CONFIG_REG_END) {
  62.                 opcode = PKT3_SET_CONFIG_REG;
  63.                 reg -= SI_CONFIG_REG_OFFSET;
  64.  
  65.         } else if (reg >= SI_SH_REG_OFFSET && reg < SI_SH_REG_END) {
  66.                 opcode = PKT3_SET_SH_REG;
  67.                 reg -= SI_SH_REG_OFFSET;
  68.  
  69.         } else if (reg >= SI_CONTEXT_REG_OFFSET && reg < SI_CONTEXT_REG_END) {
  70.                 opcode = PKT3_SET_CONTEXT_REG;
  71.                 reg -= SI_CONTEXT_REG_OFFSET;
  72.  
  73.         } else if (reg >= CIK_UCONFIG_REG_OFFSET && reg < CIK_UCONFIG_REG_END) {
  74.                 opcode = PKT3_SET_UCONFIG_REG;
  75.                 reg -= CIK_UCONFIG_REG_OFFSET;
  76.  
  77.         } else {
  78.                 R600_ERR("Invalid register offset %08x!\n", reg);
  79.                 return;
  80.         }
  81.  
  82.         reg >>= 2;
  83.  
  84.         if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
  85.                 si_pm4_cmd_begin(state, opcode);
  86.                 si_pm4_cmd_add(state, reg);
  87.         }
  88.  
  89.         state->last_reg = reg;
  90.         si_pm4_cmd_add(state, val);
  91.         si_pm4_cmd_end(state, false);
  92. }
  93.  
  94. void si_pm4_add_bo(struct si_pm4_state *state,
  95.                    struct si_resource *bo,
  96.                    enum radeon_bo_usage usage)
  97. {
  98.         unsigned idx = state->nbo++;
  99.         assert(idx < SI_PM4_MAX_BO);
  100.  
  101.         si_resource_reference(&state->bo[idx], bo);
  102.         state->bo_usage[idx] = usage;
  103. }
  104.  
  105. void si_pm4_sh_data_begin(struct si_pm4_state *state)
  106. {
  107.         si_pm4_cmd_begin(state, PKT3_NOP);
  108. }
  109.  
  110. void si_pm4_sh_data_add(struct si_pm4_state *state, uint32_t dw)
  111. {
  112.         si_pm4_cmd_add(state, dw);
  113. }
  114.  
  115. void si_pm4_sh_data_end(struct si_pm4_state *state, unsigned base, unsigned idx)
  116. {
  117.         unsigned offs = state->last_pm4 + 1;
  118.         unsigned reg = base + idx * 4;
  119.  
  120.         /* Bail if no data was added */
  121.         if (state->ndw == offs) {
  122.                 state->ndw--;
  123.                 return;
  124.         }
  125.  
  126.         si_pm4_cmd_end(state, false);
  127.  
  128.         si_pm4_cmd_begin(state, PKT3_SET_SH_REG_OFFSET);
  129.         si_pm4_cmd_add(state, (reg - SI_SH_REG_OFFSET) >> 2);
  130.         state->relocs[state->nrelocs++] = state->ndw;
  131.         si_pm4_cmd_add(state, offs << 2);
  132.         si_pm4_cmd_add(state, 0);
  133.         si_pm4_cmd_end(state, false);
  134. }
  135.  
  136. void si_pm4_inval_shader_cache(struct si_pm4_state *state)
  137. {
  138.         state->cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
  139.         state->cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
  140. }
  141.  
  142. void si_pm4_inval_texture_cache(struct si_pm4_state *state)
  143. {
  144.         state->cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
  145.         state->cp_coher_cntl |= S_0085F0_TCL1_ACTION_ENA(1);
  146. }
  147.  
  148. void si_pm4_inval_fb_cache(struct si_pm4_state *state, unsigned nr_cbufs)
  149. {
  150.         state->cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1);
  151.         state->cp_coher_cntl |= ((1 << nr_cbufs) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT;
  152. }
  153.  
  154. void si_pm4_inval_zsbuf_cache(struct si_pm4_state *state)
  155. {
  156.         state->cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
  157. }
  158.  
  159. void si_pm4_free_state(struct r600_context *rctx,
  160.                        struct si_pm4_state *state,
  161.                        unsigned idx)
  162. {
  163.         if (state == NULL)
  164.                 return;
  165.  
  166.         if (idx != ~0 && rctx->emitted.array[idx] == state) {
  167.                 rctx->emitted.array[idx] = NULL;
  168.         }
  169.  
  170.         for (int i = 0; i < state->nbo; ++i) {
  171.                 si_resource_reference(&state->bo[i], NULL);
  172.         }
  173.         FREE(state);
  174. }
  175.  
  176. struct si_pm4_state * si_pm4_alloc_state(struct r600_context *rctx)
  177. {
  178.         struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
  179.  
  180.         if (pm4 == NULL)
  181.                 return NULL;
  182.  
  183.         pm4->chip_class = rctx->chip_class;
  184.  
  185.         return pm4;
  186. }
  187.  
  188. uint32_t si_pm4_sync_flags(struct r600_context *rctx)
  189. {
  190.         uint32_t cp_coher_cntl = 0;
  191.  
  192.         for (int i = 0; i < NUMBER_OF_STATES; ++i) {
  193.                 struct si_pm4_state *state = rctx->queued.array[i];
  194.  
  195.                 if (!state || rctx->emitted.array[i] == state)
  196.                         continue;
  197.  
  198.                 cp_coher_cntl |= state->cp_coher_cntl;
  199.         }
  200.         return cp_coher_cntl;
  201. }
  202.  
  203. unsigned si_pm4_dirty_dw(struct r600_context *rctx)
  204. {
  205.         unsigned count = 0;
  206.  
  207.         for (int i = 0; i < NUMBER_OF_STATES; ++i) {
  208.                 struct si_pm4_state *state = rctx->queued.array[i];
  209.  
  210.                 if (!state || rctx->emitted.array[i] == state)
  211.                         continue;
  212.  
  213.                 count += state->ndw;
  214. #if R600_TRACE_CS
  215.                 /* for tracing each states */
  216.                 if (rctx->screen->trace_bo) {
  217.                         count += R600_TRACE_CS_DWORDS;
  218.                 }
  219. #endif
  220.         }
  221.  
  222.         return count;
  223. }
  224.  
  225. void si_pm4_emit(struct r600_context *rctx, struct si_pm4_state *state)
  226. {
  227.         struct radeon_winsys_cs *cs = rctx->cs;
  228.         for (int i = 0; i < state->nbo; ++i) {
  229.                 r600_context_bo_reloc(rctx, state->bo[i],
  230.                                       state->bo_usage[i]);
  231.         }
  232.  
  233.         memcpy(&cs->buf[cs->cdw], state->pm4, state->ndw * 4);
  234.  
  235.         for (int i = 0; i < state->nrelocs; ++i) {
  236.                 cs->buf[cs->cdw + state->relocs[i]] += cs->cdw << 2;
  237.         }
  238.  
  239.         cs->cdw += state->ndw;
  240.  
  241. #if R600_TRACE_CS
  242.         if (rctx->screen->trace_bo) {
  243.                 r600_trace_emit(rctx);
  244.         }
  245. #endif
  246. }
  247.  
  248. void si_pm4_emit_dirty(struct r600_context *rctx)
  249. {
  250.         for (int i = 0; i < NUMBER_OF_STATES; ++i) {
  251.                 struct si_pm4_state *state = rctx->queued.array[i];
  252.  
  253.                 if (!state || rctx->emitted.array[i] == state)
  254.                         continue;
  255.  
  256.                 si_pm4_emit(rctx, state);
  257.                 rctx->emitted.array[i] = state;
  258.         }
  259. }
  260.  
  261. void si_pm4_reset_emitted(struct r600_context *rctx)
  262. {
  263.         memset(&rctx->emitted, 0, sizeof(rctx->emitted));
  264. }
  265.