Subversion Repositories Kolibri OS

Rev

Rev 6937 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  21.  * DEALINGS IN THE SOFTWARE.
  22.  */
  23.  
  24. #ifndef _INTEL_LRC_H_
  25. #define _INTEL_LRC_H_
  26.  
  27. #define GEN8_LR_CONTEXT_ALIGN 4096
  28.  
  29. /* Execlists regs */
  30. #define RING_ELSP(ring)                         _MMIO((ring)->mmio_base + 0x230)
  31. #define RING_EXECLIST_STATUS_LO(ring)           _MMIO((ring)->mmio_base + 0x234)
  32. #define RING_EXECLIST_STATUS_HI(ring)           _MMIO((ring)->mmio_base + 0x234 + 4)
  33. #define RING_CONTEXT_CONTROL(ring)              _MMIO((ring)->mmio_base + 0x244)
  34. #define   CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       (1 << 3)
  35. #define   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   (1 << 0)
  36. #define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
  37. #define RING_CONTEXT_STATUS_BUF_LO(ring, i)     _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
  38. #define RING_CONTEXT_STATUS_BUF_HI(ring, i)     _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
  39. #define RING_CONTEXT_STATUS_PTR(ring)           _MMIO((ring)->mmio_base + 0x3a0)
  40.  
  41. /* The docs specify that the write pointer wraps around after 5h, "After status
  42.  * is written out to the last available status QW at offset 5h, this pointer
  43.  * wraps to 0."
  44.  *
  45.  * Therefore, one must infer than even though there are 3 bits available, 6 and
  46.  * 7 appear to be * reserved.
  47.  */
  48. #define GEN8_CSB_ENTRIES 6
  49. #define GEN8_CSB_PTR_MASK 0x7
  50. #define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
  51. #define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
  52. #define GEN8_CSB_WRITE_PTR(csb_status) \
  53.         (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
  54. #define GEN8_CSB_READ_PTR(csb_status) \
  55.         (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
  56.  
  57. /* Logical Rings */
  58. int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
  59. int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
  60. void intel_logical_ring_stop(struct intel_engine_cs *ring);
  61. void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
  62. int intel_logical_rings_init(struct drm_device *dev);
  63. int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
  64.  
  65. int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
  66. /**
  67.  * intel_logical_ring_advance() - advance the ringbuffer tail
  68.  * @ringbuf: Ringbuffer to advance.
  69.  *
  70.  * The tail is only updated in our logical ringbuffer struct.
  71.  */
  72. static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
  73. {
  74.         ringbuf->tail &= ringbuf->size - 1;
  75. }
  76. /**
  77.  * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
  78.  * @ringbuf: Ringbuffer to write to.
  79.  * @data: DWORD to write.
  80.  */
  81. static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
  82.                                            u32 data)
  83. {
  84.         iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
  85.         ringbuf->tail += 4;
  86. }
  87. static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
  88.                                                i915_reg_t reg)
  89. {
  90.         intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
  91. }
  92.  
  93. /* Logical Ring Contexts */
  94.  
  95. /* One extra page is added before LRC for GuC as shared data */
  96. #define LRC_GUCSHR_PN   (0)
  97. #define LRC_PPHWSP_PN   (LRC_GUCSHR_PN + 1)
  98. #define LRC_STATE_PN    (LRC_PPHWSP_PN + 1)
  99.  
  100. void intel_lr_context_free(struct intel_context *ctx);
  101. uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
  102. int intel_lr_context_deferred_alloc(struct intel_context *ctx,
  103.                                     struct intel_engine_cs *ring);
  104. void intel_lr_context_unpin(struct intel_context *ctx,
  105.                             struct intel_engine_cs *engine);
  106. void intel_lr_context_reset(struct drm_device *dev,
  107.                         struct intel_context *ctx);
  108. uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
  109.                                      struct intel_engine_cs *ring);
  110.  
  111. u32 intel_execlists_ctx_id(struct intel_context *ctx,
  112.                            struct intel_engine_cs *ring);
  113.  
  114. /* Execlists */
  115. int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
  116. struct i915_execbuffer_params;
  117. int intel_execlists_submission(struct i915_execbuffer_params *params,
  118.                                struct drm_i915_gem_execbuffer2 *args,
  119.                                struct list_head *vmas);
  120.  
  121. void intel_lrc_irq_handler(struct intel_engine_cs *ring);
  122. void intel_execlists_retire_requests(struct intel_engine_cs *ring);
  123.  
  124. #endif /* _INTEL_LRC_H_ */
  125.