Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. #ifndef INTEL_BATCHBUFFER_H
  2. #define INTEL_BATCHBUFFER_H
  3.  
  4. #include "main/mtypes.h"
  5.  
  6. #include "intel_context.h"
  7. #include "intel_bufmgr.h"
  8. #include "intel_reg.h"
  9.  
  10. #ifdef __cplusplus
  11. extern "C" {
  12. #endif
  13.  
  14. /**
  15.  * Number of bytes to reserve for commands necessary to complete a batch.
  16.  *
  17.  * This includes:
  18.  * - MI_BATCHBUFFER_END (4 bytes)
  19.  * - Optional MI_NOOP for ensuring the batch length is qword aligned (4 bytes)
  20.  * - Any state emitted by vtbl->finish_batch():
  21.  *   - Gen4-5 record ending occlusion query values (4 * 4 = 16 bytes)
  22.  */
  23. #define BATCH_RESERVED 24
  24.  
  25. struct intel_batchbuffer;
  26.  
  27. void intel_batchbuffer_init(struct intel_context *intel);
  28. void intel_batchbuffer_free(struct intel_context *intel);
  29.  
  30. int _intel_batchbuffer_flush(struct intel_context *intel,
  31.                              const char *file, int line);
  32.  
  33. #define intel_batchbuffer_flush(intel) \
  34.         _intel_batchbuffer_flush(intel, __FILE__, __LINE__)
  35.  
  36.  
  37.  
  38. /* Unlike bmBufferData, this currently requires the buffer be mapped.
  39.  * Consider it a convenience function wrapping multple
  40.  * intel_buffer_dword() calls.
  41.  */
  42. void intel_batchbuffer_data(struct intel_context *intel,
  43.                             const void *data, GLuint bytes);
  44.  
  45. bool intel_batchbuffer_emit_reloc(struct intel_context *intel,
  46.                                        drm_intel_bo *buffer,
  47.                                        uint32_t read_domains,
  48.                                        uint32_t write_domain,
  49.                                        uint32_t offset);
  50. bool intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel,
  51.                                               drm_intel_bo *buffer,
  52.                                               uint32_t read_domains,
  53.                                               uint32_t write_domain,
  54.                                               uint32_t offset);
  55. void intel_batchbuffer_emit_mi_flush(struct intel_context *intel);
  56.  
  57. static inline uint32_t float_as_int(float f)
  58. {
  59.    union {
  60.       float f;
  61.       uint32_t d;
  62.    } fi;
  63.  
  64.    fi.f = f;
  65.    return fi.d;
  66. }
  67.  
  68. /* Inline functions - might actually be better off with these
  69.  * non-inlined.  Certainly better off switching all command packets to
  70.  * be passed as structs rather than dwords, but that's a little bit of
  71.  * work...
  72.  */
  73. static inline unsigned
  74. intel_batchbuffer_space(struct intel_context *intel)
  75. {
  76.    return (intel->batch.bo->size - intel->batch.reserved_space)
  77.       - intel->batch.used*4;
  78. }
  79.  
  80.  
  81. static inline void
  82. intel_batchbuffer_emit_dword(struct intel_context *intel, GLuint dword)
  83. {
  84. #ifdef DEBUG
  85.    assert(intel_batchbuffer_space(intel) >= 4);
  86. #endif
  87.    intel->batch.map[intel->batch.used++] = dword;
  88. }
  89.  
  90. static inline void
  91. intel_batchbuffer_emit_float(struct intel_context *intel, float f)
  92. {
  93.    intel_batchbuffer_emit_dword(intel, float_as_int(f));
  94. }
  95.  
  96. static inline void
  97. intel_batchbuffer_require_space(struct intel_context *intel,
  98.                                 GLuint sz)
  99. {
  100. #ifdef DEBUG
  101.    assert(sz < intel->maxBatchSize - BATCH_RESERVED);
  102. #endif
  103.    if (intel_batchbuffer_space(intel) < sz)
  104.       intel_batchbuffer_flush(intel);
  105. }
  106.  
  107. static inline void
  108. intel_batchbuffer_begin(struct intel_context *intel, int n)
  109. {
  110.    intel_batchbuffer_require_space(intel, n * 4);
  111.  
  112.    intel->batch.emit = intel->batch.used;
  113. #ifdef DEBUG
  114.    intel->batch.total = n;
  115. #endif
  116. }
  117.  
  118. static inline void
  119. intel_batchbuffer_advance(struct intel_context *intel)
  120. {
  121. #ifdef DEBUG
  122.    struct intel_batchbuffer *batch = &intel->batch;
  123.    unsigned int _n = batch->used - batch->emit;
  124.    assert(batch->total != 0);
  125.    if (_n != batch->total) {
  126.       fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
  127.               _n, batch->total);
  128.       abort();
  129.    }
  130.    batch->total = 0;
  131. #endif
  132. }
  133.  
  134. /* Here are the crusty old macros, to be removed:
  135.  */
  136. #define BATCH_LOCALS
  137.  
  138. #define BEGIN_BATCH(n) intel_batchbuffer_begin(intel, n)
  139. #define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel, d)
  140. #define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel,f)
  141. #define OUT_RELOC(buf, read_domains, write_domain, delta) do {          \
  142.    intel_batchbuffer_emit_reloc(intel, buf,                     \
  143.                                 read_domains, write_domain, delta);     \
  144. } while (0)
  145. #define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do {   \
  146.    intel_batchbuffer_emit_reloc_fenced(intel, buf,              \
  147.                                        read_domains, write_domain, delta); \
  148. } while (0)
  149.  
  150. #define ADVANCE_BATCH() intel_batchbuffer_advance(intel);
  151. #define CACHED_BATCH() intel_batchbuffer_cached_advance(intel);
  152.  
  153. #ifdef __cplusplus
  154. }
  155. #endif
  156.  
  157. #endif
  158.