Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright © 2014 Broadcom
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  */
  23.  
  24. #ifndef VC4_CL_H
  25. #define VC4_CL_H
  26.  
  27. #include <stdint.h>
  28.  
  29. #include "util/u_math.h"
  30. #include "util/macros.h"
  31.  
  32. #include "vc4_packet.h"
  33.  
  34. struct vc4_bo;
  35.  
  36. struct vc4_cl {
  37.         void *base;
  38.         void *next;
  39.         uint32_t size;
  40.         uint32_t reloc_next;
  41.         uint32_t reloc_count;
  42. };
  43.  
  44. void vc4_init_cl(struct vc4_context *vc4, struct vc4_cl *cl);
  45. void vc4_reset_cl(struct vc4_cl *cl);
  46. void vc4_dump_cl(void *cl, uint32_t size, bool is_render);
  47. uint32_t vc4_gem_hindex(struct vc4_context *vc4, struct vc4_bo *bo);
  48.  
  49. struct PACKED unaligned_16 { uint16_t x; };
  50. struct PACKED unaligned_32 { uint32_t x; };
  51.  
  52. static inline void
  53. put_unaligned_32(void *ptr, uint32_t val)
  54. {
  55.         struct unaligned_32 *p = ptr;
  56.         p->x = val;
  57. }
  58.  
  59. static inline void
  60. put_unaligned_16(void *ptr, uint16_t val)
  61. {
  62.         struct unaligned_16 *p = ptr;
  63.         p->x = val;
  64. }
  65.  
  66. static inline void
  67. cl_u8(struct vc4_cl *cl, uint8_t n)
  68. {
  69.         assert((cl->next - cl->base) + 1 <= cl->size);
  70.  
  71.         *(uint8_t *)cl->next = n;
  72.         cl->next++;
  73. }
  74.  
  75. static inline void
  76. cl_u16(struct vc4_cl *cl, uint16_t n)
  77. {
  78.         assert((cl->next - cl->base) + 2 <= cl->size);
  79.  
  80.         put_unaligned_16(cl->next, n);
  81.         cl->next += 2;
  82. }
  83.  
  84. static inline void
  85. cl_u32(struct vc4_cl *cl, uint32_t n)
  86. {
  87.         assert((cl->next - cl->base) + 4 <= cl->size);
  88.  
  89.         put_unaligned_32(cl->next, n);
  90.         cl->next += 4;
  91. }
  92.  
  93. static inline void
  94. cl_aligned_u32(struct vc4_cl *cl, uint32_t n)
  95. {
  96.         assert((cl->next - cl->base) + 4 <= cl->size);
  97.  
  98.         *(uint32_t *)cl->next = n;
  99.         cl->next += 4;
  100. }
  101.  
  102. static inline void
  103. cl_ptr(struct vc4_cl *cl, void *ptr)
  104. {
  105.         assert((cl->next - cl->base) + sizeof(void *) <= cl->size);
  106.  
  107.         *(void **)cl->next = ptr;
  108.         cl->next += sizeof(void *);
  109. }
  110.  
  111. static inline void
  112. cl_f(struct vc4_cl *cl, float f)
  113. {
  114.         cl_u32(cl, fui(f));
  115. }
  116.  
  117. static inline void
  118. cl_aligned_f(struct vc4_cl *cl, float f)
  119. {
  120.         cl_aligned_u32(cl, fui(f));
  121. }
  122.  
  123. static inline void
  124. cl_start_reloc(struct vc4_cl *cl, uint32_t n)
  125. {
  126.         assert(n == 1 || n == 2);
  127.         assert(cl->reloc_count == 0);
  128.         cl->reloc_count = n;
  129.  
  130.         cl_u8(cl, VC4_PACKET_GEM_HANDLES);
  131.         cl->reloc_next = cl->next - cl->base;
  132.         cl_u32(cl, 0); /* Space where hindex will be written. */
  133.         cl_u32(cl, 0); /* Space where hindex will be written. */
  134. }
  135.  
  136. static inline void
  137. cl_start_shader_reloc(struct vc4_cl *cl, uint32_t n)
  138. {
  139.         assert(cl->reloc_count == 0);
  140.         cl->reloc_count = n;
  141.         cl->reloc_next = cl->next - cl->base;
  142.  
  143.         /* Space where hindex will be written. */
  144.         cl->next += n * 4;
  145. }
  146.  
  147. static inline void
  148. cl_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
  149. {
  150.         *(uint32_t *)(cl->base + cl->reloc_next) = hindex;
  151.         cl->reloc_next += 4;
  152.  
  153.         cl->reloc_count--;
  154.  
  155.         cl_u32(cl, offset);
  156. }
  157.  
  158. static inline void
  159. cl_aligned_reloc_hindex(struct vc4_cl *cl, uint32_t hindex, uint32_t offset)
  160. {
  161.         *(uint32_t *)(cl->base + cl->reloc_next) = hindex;
  162.         cl->reloc_next += 4;
  163.  
  164.         cl->reloc_count--;
  165.  
  166.         cl_aligned_u32(cl, offset);
  167. }
  168.  
  169. static inline void
  170. cl_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
  171.          struct vc4_bo *bo, uint32_t offset)
  172. {
  173.         cl_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
  174. }
  175.  
  176. static inline void
  177. cl_aligned_reloc(struct vc4_context *vc4, struct vc4_cl *cl,
  178.          struct vc4_bo *bo, uint32_t offset)
  179. {
  180.         cl_aligned_reloc_hindex(cl, vc4_gem_hindex(vc4, bo), offset);
  181. }
  182.  
  183. void cl_ensure_space(struct vc4_cl *cl, uint32_t size);
  184.  
  185. #endif /* VC4_CL_H */
  186.