Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6937 | Rev 7144 | ||
---|---|---|---|
1 | #ifndef _INTEL_RINGBUFFER_H_ |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
3 | 3 | ||
4 | #include |
4 | #include |
5 | #include "i915_gem_batch_pool.h" |
5 | #include "i915_gem_batch_pool.h" |
6 | 6 | ||
7 | #define I915_CMD_HASH_ORDER 9 |
7 | #define I915_CMD_HASH_ORDER 9 |
8 | 8 | ||
9 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
9 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
10 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
10 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
11 | * to give some inclination as to some of the magic values used in the various |
11 | * to give some inclination as to some of the magic values used in the various |
12 | * workarounds! |
12 | * workarounds! |
13 | */ |
13 | */ |
14 | #define CACHELINE_BYTES 64 |
14 | #define CACHELINE_BYTES 64 |
15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
16 | 16 | ||
17 | /* |
17 | /* |
18 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
18 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" |
19 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
19 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" |
20 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
20 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" |
21 | * |
21 | * |
22 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
22 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same |
23 | * cacheline, the Head Pointer must not be greater than the Tail |
23 | * cacheline, the Head Pointer must not be greater than the Tail |
24 | * Pointer." |
24 | * Pointer." |
25 | */ |
25 | */ |
26 | #define I915_RING_FREE_SPACE 64 |
26 | #define I915_RING_FREE_SPACE 64 |
27 | 27 | ||
28 | struct intel_hw_status_page { |
28 | struct intel_hw_status_page { |
29 | u32 *page_addr; |
29 | u32 *page_addr; |
30 | unsigned int gfx_addr; |
30 | unsigned int gfx_addr; |
31 | struct drm_i915_gem_object *obj; |
31 | struct drm_i915_gem_object *obj; |
32 | }; |
32 | }; |
33 | 33 | ||
34 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
34 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
35 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
35 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
36 | 36 | ||
37 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
37 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
38 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
38 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
39 | 39 | ||
40 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
40 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
41 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
41 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
42 | 42 | ||
43 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
43 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
44 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
44 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
45 | 45 | ||
46 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
46 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
47 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
47 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
48 | 48 | ||
49 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
49 | #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) |
50 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) |
50 | #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) |
51 | 51 | ||
52 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
52 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
53 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. |
53 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. |
54 | */ |
54 | */ |
55 | #define i915_semaphore_seqno_size sizeof(uint64_t) |
55 | #define i915_semaphore_seqno_size sizeof(uint64_t) |
56 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
56 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
57 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
57 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
58 | ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
58 | ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
59 | (i915_semaphore_seqno_size * (to))) |
59 | (i915_semaphore_seqno_size * (to))) |
60 | 60 | ||
61 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
61 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
62 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
62 | (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ |
63 | ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
63 | ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ |
64 | (i915_semaphore_seqno_size * (__ring)->id)) |
64 | (i915_semaphore_seqno_size * (__ring)->id)) |
65 | 65 | ||
66 | #define GEN8_RING_SEMAPHORE_INIT do { \ |
66 | #define GEN8_RING_SEMAPHORE_INIT do { \ |
67 | if (!dev_priv->semaphore_obj) { \ |
67 | if (!dev_priv->semaphore_obj) { \ |
68 | break; \ |
68 | break; \ |
69 | } \ |
69 | } \ |
70 | ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ |
70 | ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ |
71 | ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ |
71 | ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ |
72 | ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ |
72 | ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ |
73 | ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ |
73 | ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ |
74 | ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ |
74 | ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ |
75 | ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ |
75 | ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ |
76 | } while(0) |
76 | } while(0) |
77 | 77 | ||
78 | enum intel_ring_hangcheck_action { |
78 | enum intel_ring_hangcheck_action { |
79 | HANGCHECK_IDLE = 0, |
79 | HANGCHECK_IDLE = 0, |
80 | HANGCHECK_WAIT, |
80 | HANGCHECK_WAIT, |
81 | HANGCHECK_ACTIVE, |
81 | HANGCHECK_ACTIVE, |
82 | HANGCHECK_ACTIVE_LOOP, |
82 | HANGCHECK_ACTIVE_LOOP, |
83 | HANGCHECK_KICK, |
83 | HANGCHECK_KICK, |
84 | HANGCHECK_HUNG, |
84 | HANGCHECK_HUNG, |
85 | }; |
85 | }; |
86 | 86 | ||
87 | #define HANGCHECK_SCORE_RING_HUNG 31 |
87 | #define HANGCHECK_SCORE_RING_HUNG 31 |
88 | 88 | ||
89 | struct intel_ring_hangcheck { |
89 | struct intel_ring_hangcheck { |
90 | u64 acthd; |
90 | u64 acthd; |
91 | u64 max_acthd; |
91 | u64 max_acthd; |
92 | u32 seqno; |
92 | u32 seqno; |
93 | int score; |
93 | int score; |
94 | enum intel_ring_hangcheck_action action; |
94 | enum intel_ring_hangcheck_action action; |
95 | int deadlock; |
95 | int deadlock; |
- | 96 | u32 instdone[I915_NUM_INSTDONE_REG]; |
|
96 | }; |
97 | }; |
97 | 98 | ||
98 | struct intel_ringbuffer { |
99 | struct intel_ringbuffer { |
99 | struct drm_i915_gem_object *obj; |
100 | struct drm_i915_gem_object *obj; |
100 | void __iomem *virtual_start; |
101 | void __iomem *virtual_start; |
- | 102 | struct i915_vma *vma; |
|
101 | 103 | ||
102 | struct intel_engine_cs *ring; |
104 | struct intel_engine_cs *ring; |
103 | struct list_head link; |
105 | struct list_head link; |
104 | 106 | ||
105 | u32 head; |
107 | u32 head; |
106 | u32 tail; |
108 | u32 tail; |
107 | int space; |
109 | int space; |
108 | int size; |
110 | int size; |
109 | int effective_size; |
111 | int effective_size; |
110 | int reserved_size; |
112 | int reserved_size; |
111 | int reserved_tail; |
113 | int reserved_tail; |
112 | bool reserved_in_use; |
114 | bool reserved_in_use; |
113 | 115 | ||
114 | /** We track the position of the requests in the ring buffer, and |
116 | /** We track the position of the requests in the ring buffer, and |
115 | * when each is retired we increment last_retired_head as the GPU |
117 | * when each is retired we increment last_retired_head as the GPU |
116 | * must have finished processing the request and so we know we |
118 | * must have finished processing the request and so we know we |
117 | * can advance the ringbuffer up to that position. |
119 | * can advance the ringbuffer up to that position. |
118 | * |
120 | * |
119 | * last_retired_head is set to -1 after the value is consumed so |
121 | * last_retired_head is set to -1 after the value is consumed so |
120 | * we can detect new retirements. |
122 | * we can detect new retirements. |
121 | */ |
123 | */ |
122 | u32 last_retired_head; |
124 | u32 last_retired_head; |
123 | }; |
125 | }; |
124 | 126 | ||
125 | struct intel_context; |
127 | struct intel_context; |
126 | struct drm_i915_reg_descriptor; |
128 | struct drm_i915_reg_descriptor; |
127 | 129 | ||
128 | /* |
130 | /* |
129 | * we use a single page to load ctx workarounds so all of these |
131 | * we use a single page to load ctx workarounds so all of these |
130 | * values are referred in terms of dwords |
132 | * values are referred in terms of dwords |
131 | * |
133 | * |
132 | * struct i915_wa_ctx_bb: |
134 | * struct i915_wa_ctx_bb: |
133 | * offset: specifies batch starting position, also helpful in case |
135 | * offset: specifies batch starting position, also helpful in case |
134 | * if we want to have multiple batches at different offsets based on |
136 | * if we want to have multiple batches at different offsets based on |
135 | * some criteria. It is not a requirement at the moment but provides |
137 | * some criteria. It is not a requirement at the moment but provides |
136 | * an option for future use. |
138 | * an option for future use. |
137 | * size: size of the batch in DWORDS |
139 | * size: size of the batch in DWORDS |
138 | */ |
140 | */ |
139 | struct i915_ctx_workarounds { |
141 | struct i915_ctx_workarounds { |
140 | struct i915_wa_ctx_bb { |
142 | struct i915_wa_ctx_bb { |
141 | u32 offset; |
143 | u32 offset; |
142 | u32 size; |
144 | u32 size; |
143 | } indirect_ctx, per_ctx; |
145 | } indirect_ctx, per_ctx; |
144 | struct drm_i915_gem_object *obj; |
146 | struct drm_i915_gem_object *obj; |
145 | }; |
147 | }; |
146 | 148 | ||
147 | struct intel_engine_cs { |
149 | struct intel_engine_cs { |
148 | const char *name; |
150 | const char *name; |
149 | enum intel_ring_id { |
151 | enum intel_ring_id { |
150 | RCS = 0x0, |
152 | RCS = 0, |
151 | VCS, |
- | |
152 | BCS, |
153 | BCS, |
153 | VECS, |
154 | VCS, |
- | 155 | VCS2, /* Keep instances of the same type engine together. */ |
|
154 | VCS2 |
156 | VECS |
155 | } id; |
157 | } id; |
156 | #define I915_NUM_RINGS 5 |
158 | #define I915_NUM_RINGS 5 |
157 | #define LAST_USER_RING (VECS + 1) |
159 | #define _VCS(n) (VCS + (n)) |
- | 160 | unsigned int exec_id; |
|
- | 161 | unsigned int guc_id; |
|
158 | u32 mmio_base; |
162 | u32 mmio_base; |
159 | struct drm_device *dev; |
163 | struct drm_device *dev; |
160 | struct intel_ringbuffer *buffer; |
164 | struct intel_ringbuffer *buffer; |
161 | struct list_head buffers; |
165 | struct list_head buffers; |
162 | 166 | ||
163 | /* |
167 | /* |
164 | * A pool of objects to use as shadow copies of client batch buffers |
168 | * A pool of objects to use as shadow copies of client batch buffers |
165 | * when the command parser is enabled. Prevents the client from |
169 | * when the command parser is enabled. Prevents the client from |
166 | * modifying the batch contents after software parsing. |
170 | * modifying the batch contents after software parsing. |
167 | */ |
171 | */ |
168 | struct i915_gem_batch_pool batch_pool; |
172 | struct i915_gem_batch_pool batch_pool; |
169 | 173 | ||
170 | struct intel_hw_status_page status_page; |
174 | struct intel_hw_status_page status_page; |
171 | struct i915_ctx_workarounds wa_ctx; |
175 | struct i915_ctx_workarounds wa_ctx; |
172 | 176 | ||
173 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
177 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
174 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
178 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
175 | struct drm_i915_gem_request *trace_irq_req; |
179 | struct drm_i915_gem_request *trace_irq_req; |
176 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
180 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
177 | void (*irq_put)(struct intel_engine_cs *ring); |
181 | void (*irq_put)(struct intel_engine_cs *ring); |
178 | 182 | ||
179 | int (*init_hw)(struct intel_engine_cs *ring); |
183 | int (*init_hw)(struct intel_engine_cs *ring); |
180 | 184 | ||
181 | int (*init_context)(struct drm_i915_gem_request *req); |
185 | int (*init_context)(struct drm_i915_gem_request *req); |
182 | 186 | ||
183 | void (*write_tail)(struct intel_engine_cs *ring, |
187 | void (*write_tail)(struct intel_engine_cs *ring, |
184 | u32 value); |
188 | u32 value); |
185 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
189 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
186 | u32 invalidate_domains, |
190 | u32 invalidate_domains, |
187 | u32 flush_domains); |
191 | u32 flush_domains); |
188 | int (*add_request)(struct drm_i915_gem_request *req); |
192 | int (*add_request)(struct drm_i915_gem_request *req); |
189 | /* Some chipsets are not quite as coherent as advertised and need |
193 | /* Some chipsets are not quite as coherent as advertised and need |
190 | * an expensive kick to force a true read of the up-to-date seqno. |
194 | * an expensive kick to force a true read of the up-to-date seqno. |
191 | * However, the up-to-date seqno is not always required and the last |
195 | * However, the up-to-date seqno is not always required and the last |
192 | * seen value is good enough. Note that the seqno will always be |
196 | * seen value is good enough. Note that the seqno will always be |
193 | * monotonic, even if not coherent. |
197 | * monotonic, even if not coherent. |
194 | */ |
198 | */ |
195 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
199 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
196 | bool lazy_coherency); |
200 | bool lazy_coherency); |
197 | void (*set_seqno)(struct intel_engine_cs *ring, |
201 | void (*set_seqno)(struct intel_engine_cs *ring, |
198 | u32 seqno); |
202 | u32 seqno); |
199 | int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, |
203 | int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, |
200 | u64 offset, u32 length, |
204 | u64 offset, u32 length, |
201 | unsigned dispatch_flags); |
205 | unsigned dispatch_flags); |
202 | #define I915_DISPATCH_SECURE 0x1 |
206 | #define I915_DISPATCH_SECURE 0x1 |
203 | #define I915_DISPATCH_PINNED 0x2 |
207 | #define I915_DISPATCH_PINNED 0x2 |
204 | #define I915_DISPATCH_RS 0x4 |
208 | #define I915_DISPATCH_RS 0x4 |
205 | void (*cleanup)(struct intel_engine_cs *ring); |
209 | void (*cleanup)(struct intel_engine_cs *ring); |
206 | 210 | ||
207 | /* GEN8 signal/wait table - never trust comments! |
211 | /* GEN8 signal/wait table - never trust comments! |
208 | * signal to signal to signal to signal to signal to |
212 | * signal to signal to signal to signal to signal to |
209 | * RCS VCS BCS VECS VCS2 |
213 | * RCS VCS BCS VECS VCS2 |
210 | * -------------------------------------------------------------------- |
214 | * -------------------------------------------------------------------- |
211 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | |
215 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | |
212 | * |------------------------------------------------------------------- |
216 | * |------------------------------------------------------------------- |
213 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | |
217 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | |
214 | * |------------------------------------------------------------------- |
218 | * |------------------------------------------------------------------- |
215 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | |
219 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | |
216 | * |------------------------------------------------------------------- |
220 | * |------------------------------------------------------------------- |
217 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | |
221 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | |
218 | * |------------------------------------------------------------------- |
222 | * |------------------------------------------------------------------- |
219 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | |
223 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | |
220 | * |------------------------------------------------------------------- |
224 | * |------------------------------------------------------------------- |
221 | * |
225 | * |
222 | * Generalization: |
226 | * Generalization: |
223 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) |
227 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) |
224 | * ie. transpose of g(x, y) |
228 | * ie. transpose of g(x, y) |
225 | * |
229 | * |
226 | * sync from sync from sync from sync from sync from |
230 | * sync from sync from sync from sync from sync from |
227 | * RCS VCS BCS VECS VCS2 |
231 | * RCS VCS BCS VECS VCS2 |
228 | * -------------------------------------------------------------------- |
232 | * -------------------------------------------------------------------- |
229 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | |
233 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | |
230 | * |------------------------------------------------------------------- |
234 | * |------------------------------------------------------------------- |
231 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | |
235 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | |
232 | * |------------------------------------------------------------------- |
236 | * |------------------------------------------------------------------- |
233 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | |
237 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | |
234 | * |------------------------------------------------------------------- |
238 | * |------------------------------------------------------------------- |
235 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | |
239 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | |
236 | * |------------------------------------------------------------------- |
240 | * |------------------------------------------------------------------- |
237 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | |
241 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | |
238 | * |------------------------------------------------------------------- |
242 | * |------------------------------------------------------------------- |
239 | * |
243 | * |
240 | * Generalization: |
244 | * Generalization: |
241 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) |
245 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) |
242 | * ie. transpose of f(x, y) |
246 | * ie. transpose of f(x, y) |
243 | */ |
247 | */ |
244 | struct { |
248 | struct { |
245 | u32 sync_seqno[I915_NUM_RINGS-1]; |
249 | u32 sync_seqno[I915_NUM_RINGS-1]; |
246 | 250 | ||
247 | union { |
251 | union { |
248 | struct { |
252 | struct { |
249 | /* our mbox written by others */ |
253 | /* our mbox written by others */ |
250 | u32 wait[I915_NUM_RINGS]; |
254 | u32 wait[I915_NUM_RINGS]; |
251 | /* mboxes this ring signals to */ |
255 | /* mboxes this ring signals to */ |
252 | i915_reg_t signal[I915_NUM_RINGS]; |
256 | i915_reg_t signal[I915_NUM_RINGS]; |
253 | } mbox; |
257 | } mbox; |
254 | u64 signal_ggtt[I915_NUM_RINGS]; |
258 | u64 signal_ggtt[I915_NUM_RINGS]; |
255 | }; |
259 | }; |
256 | 260 | ||
257 | /* AKA wait() */ |
261 | /* AKA wait() */ |
258 | int (*sync_to)(struct drm_i915_gem_request *to_req, |
262 | int (*sync_to)(struct drm_i915_gem_request *to_req, |
259 | struct intel_engine_cs *from, |
263 | struct intel_engine_cs *from, |
260 | u32 seqno); |
264 | u32 seqno); |
261 | int (*signal)(struct drm_i915_gem_request *signaller_req, |
265 | int (*signal)(struct drm_i915_gem_request *signaller_req, |
262 | /* num_dwords needed by caller */ |
266 | /* num_dwords needed by caller */ |
263 | unsigned int num_dwords); |
267 | unsigned int num_dwords); |
264 | } semaphore; |
268 | } semaphore; |
265 | 269 | ||
266 | /* Execlists */ |
270 | /* Execlists */ |
267 | spinlock_t execlist_lock; |
271 | spinlock_t execlist_lock; |
268 | struct list_head execlist_queue; |
272 | struct list_head execlist_queue; |
269 | struct list_head execlist_retired_req_list; |
273 | struct list_head execlist_retired_req_list; |
270 | u8 next_context_status_buffer; |
274 | u8 next_context_status_buffer; |
- | 275 | bool disable_lite_restore_wa; |
|
- | 276 | u32 ctx_desc_template; |
|
271 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
277 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
272 | int (*emit_request)(struct drm_i915_gem_request *request); |
278 | int (*emit_request)(struct drm_i915_gem_request *request); |
273 | int (*emit_flush)(struct drm_i915_gem_request *request, |
279 | int (*emit_flush)(struct drm_i915_gem_request *request, |
274 | u32 invalidate_domains, |
280 | u32 invalidate_domains, |
275 | u32 flush_domains); |
281 | u32 flush_domains); |
276 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
282 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
277 | u64 offset, unsigned dispatch_flags); |
283 | u64 offset, unsigned dispatch_flags); |
278 | 284 | ||
279 | /** |
285 | /** |
280 | * List of objects currently involved in rendering from the |
286 | * List of objects currently involved in rendering from the |
281 | * ringbuffer. |
287 | * ringbuffer. |
282 | * |
288 | * |
283 | * Includes buffers having the contents of their GPU caches |
289 | * Includes buffers having the contents of their GPU caches |
284 | * flushed, not necessarily primitives. last_read_req |
290 | * flushed, not necessarily primitives. last_read_req |
285 | * represents when the rendering involved will be completed. |
291 | * represents when the rendering involved will be completed. |
286 | * |
292 | * |
287 | * A reference is held on the buffer while on this list. |
293 | * A reference is held on the buffer while on this list. |
288 | */ |
294 | */ |
289 | struct list_head active_list; |
295 | struct list_head active_list; |
290 | 296 | ||
291 | /** |
297 | /** |
292 | * List of breadcrumbs associated with GPU requests currently |
298 | * List of breadcrumbs associated with GPU requests currently |
293 | * outstanding. |
299 | * outstanding. |
294 | */ |
300 | */ |
295 | struct list_head request_list; |
301 | struct list_head request_list; |
296 | 302 | ||
297 | /** |
303 | /** |
298 | * Seqno of request most recently submitted to request_list. |
304 | * Seqno of request most recently submitted to request_list. |
299 | * Used exclusively by hang checker to avoid grabbing lock while |
305 | * Used exclusively by hang checker to avoid grabbing lock while |
300 | * inspecting request list. |
306 | * inspecting request list. |
301 | */ |
307 | */ |
302 | u32 last_submitted_seqno; |
308 | u32 last_submitted_seqno; |
303 | 309 | ||
304 | bool gpu_caches_dirty; |
310 | bool gpu_caches_dirty; |
305 | 311 | ||
306 | wait_queue_head_t irq_queue; |
312 | wait_queue_head_t irq_queue; |
307 | - | ||
308 | struct intel_context *default_context; |
313 | |
309 | struct intel_context *last_context; |
314 | struct intel_context *last_context; |
310 | 315 | ||
311 | struct intel_ring_hangcheck hangcheck; |
316 | struct intel_ring_hangcheck hangcheck; |
312 | 317 | ||
313 | struct { |
318 | struct { |
314 | struct drm_i915_gem_object *obj; |
319 | struct drm_i915_gem_object *obj; |
315 | u32 gtt_offset; |
320 | u32 gtt_offset; |
316 | volatile u32 *cpu_page; |
321 | volatile u32 *cpu_page; |
317 | } scratch; |
322 | } scratch; |
318 | 323 | ||
319 | bool needs_cmd_parser; |
324 | bool needs_cmd_parser; |
320 | 325 | ||
321 | /* |
326 | /* |
322 | * Table of commands the command parser needs to know about |
327 | * Table of commands the command parser needs to know about |
323 | * for this ring. |
328 | * for this ring. |
324 | */ |
329 | */ |
325 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
330 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
326 | 331 | ||
327 | /* |
332 | /* |
328 | * Table of registers allowed in commands that read/write registers. |
333 | * Table of registers allowed in commands that read/write registers. |
329 | */ |
334 | */ |
330 | const struct drm_i915_reg_descriptor *reg_table; |
335 | const struct drm_i915_reg_descriptor *reg_table; |
331 | int reg_count; |
336 | int reg_count; |
332 | 337 | ||
333 | /* |
338 | /* |
334 | * Table of registers allowed in commands that read/write registers, but |
339 | * Table of registers allowed in commands that read/write registers, but |
335 | * only from the DRM master. |
340 | * only from the DRM master. |
336 | */ |
341 | */ |
337 | const struct drm_i915_reg_descriptor *master_reg_table; |
342 | const struct drm_i915_reg_descriptor *master_reg_table; |
338 | int master_reg_count; |
343 | int master_reg_count; |
339 | 344 | ||
340 | /* |
345 | /* |
341 | * Returns the bitmask for the length field of the specified command. |
346 | * Returns the bitmask for the length field of the specified command. |
342 | * Return 0 for an unrecognized/invalid command. |
347 | * Return 0 for an unrecognized/invalid command. |
343 | * |
348 | * |
344 | * If the command parser finds an entry for a command in the ring's |
349 | * If the command parser finds an entry for a command in the ring's |
345 | * cmd_tables, it gets the command's length based on the table entry. |
350 | * cmd_tables, it gets the command's length based on the table entry. |
346 | * If not, it calls this function to determine the per-ring length field |
351 | * If not, it calls this function to determine the per-ring length field |
347 | * encoding for the command (i.e. certain opcode ranges use certain bits |
352 | * encoding for the command (i.e. certain opcode ranges use certain bits |
348 | * to encode the command length in the header). |
353 | * to encode the command length in the header). |
349 | */ |
354 | */ |
350 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
355 | u32 (*get_cmd_length_mask)(u32 cmd_header); |
351 | }; |
356 | }; |
352 | 357 | ||
353 | static inline bool |
358 | static inline bool |
354 | intel_ring_initialized(struct intel_engine_cs *ring) |
359 | intel_ring_initialized(struct intel_engine_cs *ring) |
355 | { |
360 | { |
356 | return ring->dev != NULL; |
361 | return ring->dev != NULL; |
357 | } |
362 | } |
358 | 363 | ||
359 | static inline unsigned |
364 | static inline unsigned |
360 | intel_ring_flag(struct intel_engine_cs *ring) |
365 | intel_ring_flag(struct intel_engine_cs *ring) |
361 | { |
366 | { |
362 | return 1 << ring->id; |
367 | return 1 << ring->id; |
363 | } |
368 | } |
364 | 369 | ||
365 | static inline u32 |
370 | static inline u32 |
366 | intel_ring_sync_index(struct intel_engine_cs *ring, |
371 | intel_ring_sync_index(struct intel_engine_cs *ring, |
367 | struct intel_engine_cs *other) |
372 | struct intel_engine_cs *other) |
368 | { |
373 | { |
369 | int idx; |
374 | int idx; |
370 | 375 | ||
371 | /* |
376 | /* |
372 | * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; |
377 | * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; |
373 | * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; |
378 | * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; |
374 | * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; |
379 | * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; |
375 | * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; |
380 | * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; |
376 | * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; |
381 | * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; |
377 | */ |
382 | */ |
378 | 383 | ||
379 | idx = (other - ring) - 1; |
384 | idx = (other - ring) - 1; |
380 | if (idx < 0) |
385 | if (idx < 0) |
381 | idx += I915_NUM_RINGS; |
386 | idx += I915_NUM_RINGS; |
382 | 387 | ||
383 | return idx; |
388 | return idx; |
384 | } |
389 | } |
385 | 390 | ||
386 | static inline void |
391 | static inline void |
387 | intel_flush_status_page(struct intel_engine_cs *ring, int reg) |
392 | intel_flush_status_page(struct intel_engine_cs *ring, int reg) |
388 | { |
393 | { |
389 | drm_clflush_virt_range(&ring->status_page.page_addr[reg], |
394 | drm_clflush_virt_range(&ring->status_page.page_addr[reg], |
390 | sizeof(uint32_t)); |
395 | sizeof(uint32_t)); |
391 | } |
396 | } |
392 | 397 | ||
393 | static inline u32 |
398 | static inline u32 |
394 | intel_read_status_page(struct intel_engine_cs *ring, |
399 | intel_read_status_page(struct intel_engine_cs *ring, |
395 | int reg) |
400 | int reg) |
396 | { |
401 | { |
397 | /* Ensure that the compiler doesn't optimize away the load. */ |
402 | /* Ensure that the compiler doesn't optimize away the load. */ |
398 | barrier(); |
403 | barrier(); |
399 | return ring->status_page.page_addr[reg]; |
404 | return ring->status_page.page_addr[reg]; |
400 | } |
405 | } |
401 | 406 | ||
402 | static inline void |
407 | static inline void |
403 | intel_write_status_page(struct intel_engine_cs *ring, |
408 | intel_write_status_page(struct intel_engine_cs *ring, |
404 | int reg, u32 value) |
409 | int reg, u32 value) |
405 | { |
410 | { |
406 | ring->status_page.page_addr[reg] = value; |
411 | ring->status_page.page_addr[reg] = value; |
407 | } |
412 | } |
408 | 413 | ||
409 | /** |
414 | /* |
410 | * Reads a dword out of the status page, which is written to from the command |
415 | * Reads a dword out of the status page, which is written to from the command |
411 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
416 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
412 | * MI_STORE_DATA_IMM. |
417 | * MI_STORE_DATA_IMM. |
413 | * |
418 | * |
414 | * The following dwords have a reserved meaning: |
419 | * The following dwords have a reserved meaning: |
415 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
420 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
416 | * 0x04: ring 0 head pointer |
421 | * 0x04: ring 0 head pointer |
417 | * 0x05: ring 1 head pointer (915-class) |
422 | * 0x05: ring 1 head pointer (915-class) |
418 | * 0x06: ring 2 head pointer (915-class) |
423 | * 0x06: ring 2 head pointer (915-class) |
419 | * 0x10-0x1b: Context status DWords (GM45) |
424 | * 0x10-0x1b: Context status DWords (GM45) |
420 | * 0x1f: Last written status offset. (GM45) |
425 | * 0x1f: Last written status offset. (GM45) |
421 | * 0x20-0x2f: Reserved (Gen6+) |
426 | * 0x20-0x2f: Reserved (Gen6+) |
422 | * |
427 | * |
423 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
428 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
424 | */ |
429 | */ |
425 | #define I915_GEM_HWS_INDEX 0x30 |
430 | #define I915_GEM_HWS_INDEX 0x30 |
- | 431 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
|
426 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
432 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
427 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
433 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
428 | 434 | ||
429 | struct intel_ringbuffer * |
435 | struct intel_ringbuffer * |
430 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); |
436 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); |
431 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
437 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
432 | struct intel_ringbuffer *ringbuf); |
438 | struct intel_ringbuffer *ringbuf); |
433 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
439 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
434 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); |
440 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); |
435 | 441 | ||
436 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
442 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
437 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
443 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
438 | 444 | ||
439 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
445 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
440 | 446 | ||
441 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
447 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
442 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
448 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
443 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
449 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
444 | u32 data) |
450 | u32 data) |
445 | { |
451 | { |
446 | struct intel_ringbuffer *ringbuf = ring->buffer; |
452 | struct intel_ringbuffer *ringbuf = ring->buffer; |
447 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
453 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
448 | ringbuf->tail += 4; |
454 | ringbuf->tail += 4; |
449 | } |
455 | } |
450 | static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, |
456 | static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, |
451 | i915_reg_t reg) |
457 | i915_reg_t reg) |
452 | { |
458 | { |
453 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
459 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
454 | } |
460 | } |
455 | static inline void intel_ring_advance(struct intel_engine_cs *ring) |
461 | static inline void intel_ring_advance(struct intel_engine_cs *ring) |
456 | { |
462 | { |
457 | struct intel_ringbuffer *ringbuf = ring->buffer; |
463 | struct intel_ringbuffer *ringbuf = ring->buffer; |
458 | ringbuf->tail &= ringbuf->size - 1; |
464 | ringbuf->tail &= ringbuf->size - 1; |
459 | } |
465 | } |
460 | int __intel_ring_space(int head, int tail, int size); |
466 | int __intel_ring_space(int head, int tail, int size); |
461 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
467 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
462 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
468 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
463 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
469 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
464 | 470 | ||
465 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
471 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
466 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
472 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
467 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
473 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
468 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
474 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
469 | 475 | ||
470 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
476 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
471 | int intel_init_pipe_control(struct intel_engine_cs *ring); |
477 | int intel_init_pipe_control(struct intel_engine_cs *ring); |
472 | 478 | ||
473 | int intel_init_render_ring_buffer(struct drm_device *dev); |
479 | int intel_init_render_ring_buffer(struct drm_device *dev); |
474 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
480 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
475 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
481 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
476 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
482 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
477 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
483 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
478 | 484 | ||
479 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
485 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
480 | 486 | ||
481 | int init_workarounds_ring(struct intel_engine_cs *ring); |
487 | int init_workarounds_ring(struct intel_engine_cs *ring); |
482 | 488 | ||
483 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
489 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
484 | { |
490 | { |
485 | return ringbuf->tail; |
491 | return ringbuf->tail; |
486 | } |
492 | } |
487 | 493 | ||
488 | /* |
494 | /* |
489 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
495 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
490 | * are complex and variable. Empirical measurement shows that the worst case |
496 | * are complex and variable. Empirical measurement shows that the worst case |
491 | * is ILK at 136 words. Reserving too much is better than reserving too little |
497 | * is ILK at 136 words. Reserving too much is better than reserving too little |
492 | * as that allows for corner cases that might have been missed. So the figure |
498 | * as that allows for corner cases that might have been missed. So the figure |
493 | * has been rounded up to 160 words. |
499 | * has been rounded up to 160 words. |
494 | */ |
500 | */ |
495 | #define MIN_SPACE_FOR_ADD_REQUEST 160 |
501 | #define MIN_SPACE_FOR_ADD_REQUEST 160 |
496 | 502 | ||
497 | /* |
503 | /* |
498 | * Reserve space in the ring to guarantee that the i915_add_request() call |
504 | * Reserve space in the ring to guarantee that the i915_add_request() call |
499 | * will always have sufficient room to do its stuff. The request creation |
505 | * will always have sufficient room to do its stuff. The request creation |
500 | * code calls this automatically. |
506 | * code calls this automatically. |
501 | */ |
507 | */ |
502 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); |
508 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); |
503 | /* Cancel the reservation, e.g. because the request is being discarded. */ |
509 | /* Cancel the reservation, e.g. because the request is being discarded. */ |
504 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); |
510 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); |
505 | /* Use the reserved space - for use by i915_add_request() only. */ |
511 | /* Use the reserved space - for use by i915_add_request() only. */ |
506 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); |
512 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); |
507 | /* Finish with the reserved space - for use by i915_add_request() only. */ |
513 | /* Finish with the reserved space - for use by i915_add_request() only. */ |
508 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); |
514 | void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); |
509 | 515 | ||
510 | /* Legacy ringbuffer specific portion of reservation code: */ |
516 | /* Legacy ringbuffer specific portion of reservation code: */ |
511 | int intel_ring_reserve_space(struct drm_i915_gem_request *request); |
517 | int intel_ring_reserve_space(struct drm_i915_gem_request *request); |
512 | 518 | ||
513 | #endif /* _INTEL_RINGBUFFER_H_ */><>>><> |
519 | #endif /* _INTEL_RINGBUFFER_H_ */><>><>>><> |