Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. #ifndef INTEL_BATCHBUFFER_H
  2. #define INTEL_BATCHBUFFER_H
  3.  
  4. #include "main/mtypes.h"
  5.  
  6. #include "brw_context.h"
  7. #include "intel_bufmgr.h"
  8. #include "intel_reg.h"
  9.  
  10. #ifdef __cplusplus
  11. extern "C" {
  12. #endif
  13.  
  14. /**
  15.  * Number of bytes to reserve for commands necessary to complete a batch.
  16.  *
  17.  * This includes:
  18.  * - MI_BATCHBUFFER_END (4 bytes)
  19.  * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
  20.  * - Any state emitted by vtbl->finish_batch():
  21.  *   - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
  22.  */
  23. #define BATCH_RESERVED 24
  24.  
  25. struct intel_batchbuffer;
  26.  
  27. void intel_batchbuffer_init(struct brw_context *brw);
  28. void intel_batchbuffer_free(struct brw_context *brw);
  29. void intel_batchbuffer_save_state(struct brw_context *brw);
  30. void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
  31.  
  32. int _intel_batchbuffer_flush(struct brw_context *brw,
  33.                              const char *file, int line);
  34.  
  35. #define intel_batchbuffer_flush(intel) \
  36.         _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
  37.  
  38.  
  39.  
  40. /* Unlike bmBufferData, this currently requires the buffer be mapped.
  41.  * Consider it a convenience function wrapping multple
  42.  * intel_buffer_dword() calls.
  43.  */
  44. void intel_batchbuffer_data(struct brw_context *brw,
  45.                             const void *data, GLuint bytes, bool is_blit);
  46.  
  47. bool intel_batchbuffer_emit_reloc(struct brw_context *brw,
  48.                                        drm_intel_bo *buffer,
  49.                                        uint32_t read_domains,
  50.                                        uint32_t write_domain,
  51.                                        uint32_t offset);
  52. bool intel_batchbuffer_emit_reloc_fenced(struct brw_context *brw,
  53.                                               drm_intel_bo *buffer,
  54.                                               uint32_t read_domains,
  55.                                               uint32_t write_domain,
  56.                                               uint32_t offset);
  57. void intel_batchbuffer_emit_mi_flush(struct brw_context *brw);
  58. void intel_emit_post_sync_nonzero_flush(struct brw_context *brw);
  59. void intel_emit_depth_stall_flushes(struct brw_context *brw);
  60. void gen7_emit_vs_workaround_flush(struct brw_context *brw);
  61.  
  62. static INLINE uint32_t float_as_int(float f)
  63. {
  64.    union {
  65.       float f;
  66.       uint32_t d;
  67.    } fi;
  68.  
  69.    fi.f = f;
  70.    return fi.d;
  71. }
  72.  
  73. /* Inline functions - might actually be better off with these
  74.  * non-inlined.  Certainly better off switching all command packets to
  75.  * be passed as structs rather than dwords, but that's a little bit of
  76.  * work...
  77.  */
  78. static INLINE unsigned
  79. intel_batchbuffer_space(struct brw_context *brw)
  80. {
  81.    return (brw->batch.state_batch_offset - brw->batch.reserved_space)
  82.       - brw->batch.used*4;
  83. }
  84.  
  85.  
  86. static INLINE void
  87. intel_batchbuffer_emit_dword(struct brw_context *brw, GLuint dword)
  88. {
  89. #ifdef DEBUG
  90.    assert(intel_batchbuffer_space(brw) >= 4);
  91. #endif
  92.    brw->batch.map[brw->batch.used++] = dword;
  93. }
  94.  
  95. static INLINE void
  96. intel_batchbuffer_emit_float(struct brw_context *brw, float f)
  97. {
  98.    intel_batchbuffer_emit_dword(brw, float_as_int(f));
  99. }
  100.  
  101. static INLINE void
  102. intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz, int is_blit)
  103. {
  104.    if (brw->gen >= 6 &&
  105.        brw->batch.is_blit != is_blit && brw->batch.used) {
  106.       intel_batchbuffer_flush(brw);
  107.    }
  108.  
  109.    brw->batch.is_blit = is_blit;
  110.  
  111. #ifdef DEBUG
  112.    assert(sz < BATCH_SZ - BATCH_RESERVED);
  113. #endif
  114.    if (intel_batchbuffer_space(brw) < sz)
  115.       intel_batchbuffer_flush(brw);
  116. }
  117.  
  118. static INLINE void
  119. intel_batchbuffer_begin(struct brw_context *brw, int n, bool is_blit)
  120. {
  121.    intel_batchbuffer_require_space(brw, n * 4, is_blit);
  122.  
  123.    brw->batch.emit = brw->batch.used;
  124. #ifdef DEBUG
  125.    brw->batch.total = n;
  126. #endif
  127. }
  128.  
  129. static INLINE void
  130. intel_batchbuffer_advance(struct brw_context *brw)
  131. {
  132. #ifdef DEBUG
  133.    struct intel_batchbuffer *batch = &brw->batch;
  134.    unsigned int _n = batch->used - batch->emit;
  135.    assert(batch->total != 0);
  136.    if (_n != batch->total) {
  137.       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
  138.               _n, batch->total);
  139.       abort();
  140.    }
  141.    batch->total = 0;
  142. #endif
  143. }
  144.  
  145. void intel_batchbuffer_cached_advance(struct brw_context *brw);
  146.  
  147. /* Here are the crusty old macros, to be removed:
  148.  */
  149. #define BATCH_LOCALS
  150.  
  151. #define BEGIN_BATCH(n) intel_batchbuffer_begin(brw, n, false)
  152. #define BEGIN_BATCH_BLT(n) intel_batchbuffer_begin(brw, n, true)
  153. #define OUT_BATCH(d) intel_batchbuffer_emit_dword(brw, d)
  154. #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(brw, f)
  155. #define OUT_RELOC(buf, read_domains, write_domain, delta) do {          \
  156.    intel_batchbuffer_emit_reloc(brw, buf,                       \
  157.                                 read_domains, write_domain, delta);     \
  158. } while (0)
  159. #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do {   \
  160.    intel_batchbuffer_emit_reloc_fenced(brw, buf,                \
  161.                                        read_domains, write_domain, delta); \
  162. } while (0)
  163.  
  164. #define ADVANCE_BATCH() intel_batchbuffer_advance(brw);
  165. #define CACHED_BATCH() intel_batchbuffer_cached_advance(brw);
  166.  
  167. #ifdef __cplusplus
  168. }
  169. #endif
  170.  
  171. #endif
  172.