Rev 5354 | Rev 6937 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5354 | Rev 6084 | ||
---|---|---|---|
Line 1... | Line 1... | ||
1 | #ifndef _INTEL_RINGBUFFER_H_ |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
Line 3... | Line 3... | ||
3 | 3 | ||
- | 4 | #include |
|
Line 4... | Line 5... | ||
4 | #include |
5 | #include "i915_gem_batch_pool.h" |
Line 5... | Line 6... | ||
5 | 6 | ||
6 | #define I915_CMD_HASH_ORDER 9 |
7 | #define I915_CMD_HASH_ORDER 9 |
7 | 8 | ||
8 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
9 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
9 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
10 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just |
10 | * to give some inclination as to some of the magic values used in the various |
11 | * to give some inclination as to some of the magic values used in the various |
- | 12 | * workarounds! |
|
Line 11... | Line 13... | ||
11 | * workarounds! |
13 | */ |
12 | */ |
14 | #define CACHELINE_BYTES 64 |
13 | #define CACHELINE_BYTES 64 |
15 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
14 | 16 | ||
Line 93... | Line 95... | ||
93 | int deadlock; |
95 | int deadlock; |
94 | }; |
96 | }; |
Line 95... | Line 97... | ||
95 | 97 | ||
96 | struct intel_ringbuffer { |
98 | struct intel_ringbuffer { |
97 | struct drm_i915_gem_object *obj; |
99 | struct drm_i915_gem_object *obj; |
Line 98... | Line 100... | ||
98 | void __iomem *virtual_start; |
100 | void __iomem *virtual_start; |
Line 99... | Line -... | ||
99 | - | ||
100 | struct intel_engine_cs *ring; |
- | |
101 | - | ||
102 | /* |
- | |
103 | * FIXME: This backpointer is an artifact of the history of how the |
- | |
104 | * execlist patches came into being. It will get removed once the basic |
- | |
105 | * code has landed. |
- | |
106 | */ |
101 | |
107 | struct intel_context *FIXME_lrc_ctx; |
102 | struct intel_engine_cs *ring; |
108 | 103 | ||
109 | u32 head; |
104 | u32 head; |
110 | u32 tail; |
105 | u32 tail; |
- | 106 | int space; |
|
- | 107 | int size; |
|
- | 108 | int effective_size; |
|
Line 111... | Line 109... | ||
111 | int space; |
109 | int reserved_size; |
112 | int size; |
110 | int reserved_tail; |
113 | int effective_size; |
111 | bool reserved_in_use; |
114 | 112 | ||
115 | /** We track the position of the requests in the ring buffer, and |
113 | /** We track the position of the requests in the ring buffer, and |
116 | * when each is retired we increment last_retired_head as the GPU |
114 | * when each is retired we increment last_retired_head as the GPU |
117 | * must have finished processing the request and so we know we |
115 | * must have finished processing the request and so we know we |
118 | * can advance the ringbuffer up to that position. |
116 | * can advance the ringbuffer up to that position. |
119 | * |
117 | * |
- | 118 | * last_retired_head is set to -1 after the value is consumed so |
|
- | 119 | * we can detect new retirements. |
|
- | 120 | */ |
|
- | 121 | u32 last_retired_head; |
|
- | 122 | }; |
|
- | 123 | ||
- | 124 | struct intel_context; |
|
- | 125 | struct drm_i915_reg_descriptor; |
|
- | 126 | ||
- | 127 | /* |
|
- | 128 | * we use a single page to load ctx workarounds so all of these |
|
- | 129 | * values are referred in terms of dwords |
|
- | 130 | * |
|
- | 131 | * struct i915_wa_ctx_bb: |
|
- | 132 | * offset: specifies batch starting position, also helpful in case |
|
- | 133 | * if we want to have multiple batches at different offsets based on |
|
- | 134 | * some criteria. It is not a requirement at the moment but provides |
|
- | 135 | * an option for future use. |
|
- | 136 | * size: size of the batch in DWORDS |
|
- | 137 | */ |
|
- | 138 | struct i915_ctx_workarounds { |
|
- | 139 | struct i915_wa_ctx_bb { |
|
120 | * last_retired_head is set to -1 after the value is consumed so |
140 | u32 offset; |
Line 121... | Line 141... | ||
121 | * we can detect new retirements. |
141 | u32 size; |
122 | */ |
142 | } indirect_ctx, per_ctx; |
123 | u32 last_retired_head; |
143 | struct drm_i915_gem_object *obj; |
Line 136... | Line 156... | ||
136 | #define LAST_USER_RING (VECS + 1) |
156 | #define LAST_USER_RING (VECS + 1) |
137 | u32 mmio_base; |
157 | u32 mmio_base; |
138 | struct drm_device *dev; |
158 | struct drm_device *dev; |
139 | struct intel_ringbuffer *buffer; |
159 | struct intel_ringbuffer *buffer; |
Line -... | Line 160... | ||
- | 160 | ||
- | 161 | /* |
|
- | 162 | * A pool of objects to use as shadow copies of client batch buffers |
|
- | 163 | * when the command parser is enabled. Prevents the client from |
|
- | 164 | * modifying the batch contents after software parsing. |
|
- | 165 | */ |
|
- | 166 | struct i915_gem_batch_pool batch_pool; |
|
140 | 167 | ||
- | 168 | struct intel_hw_status_page status_page; |
|
Line 141... | Line 169... | ||
141 | struct intel_hw_status_page status_page; |
169 | struct i915_ctx_workarounds wa_ctx; |
142 | 170 | ||
143 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
171 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
144 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
172 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
145 | u32 trace_irq_seqno; |
173 | struct drm_i915_gem_request *trace_irq_req; |
Line 146... | Line 174... | ||
146 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
174 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
Line 147... | Line 175... | ||
147 | void (*irq_put)(struct intel_engine_cs *ring); |
175 | void (*irq_put)(struct intel_engine_cs *ring); |
148 | - | ||
Line 149... | Line 176... | ||
149 | int (*init)(struct intel_engine_cs *ring); |
176 | |
150 | 177 | int (*init_hw)(struct intel_engine_cs *ring); |
|
151 | int (*init_context)(struct intel_engine_cs *ring, |
178 | |
152 | struct intel_context *ctx); |
179 | int (*init_context)(struct drm_i915_gem_request *req); |
153 | 180 | ||
154 | void (*write_tail)(struct intel_engine_cs *ring, |
181 | void (*write_tail)(struct intel_engine_cs *ring, |
155 | u32 value); |
182 | u32 value); |
156 | int __must_check (*flush)(struct intel_engine_cs *ring, |
183 | int __must_check (*flush)(struct drm_i915_gem_request *req, |
157 | u32 invalidate_domains, |
184 | u32 invalidate_domains, |
158 | u32 flush_domains); |
185 | u32 flush_domains); |
159 | int (*add_request)(struct intel_engine_cs *ring); |
186 | int (*add_request)(struct drm_i915_gem_request *req); |
160 | /* Some chipsets are not quite as coherent as advertised and need |
187 | /* Some chipsets are not quite as coherent as advertised and need |
161 | * an expensive kick to force a true read of the up-to-date seqno. |
188 | * an expensive kick to force a true read of the up-to-date seqno. |
162 | * However, the up-to-date seqno is not always required and the last |
189 | * However, the up-to-date seqno is not always required and the last |
163 | * seen value is good enough. Note that the seqno will always be |
190 | * seen value is good enough. Note that the seqno will always be |
164 | * monotonic, even if not coherent. |
191 | * monotonic, even if not coherent. |
165 | */ |
192 | */ |
166 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
193 | u32 (*get_seqno)(struct intel_engine_cs *ring, |
167 | bool lazy_coherency); |
194 | bool lazy_coherency); |
168 | void (*set_seqno)(struct intel_engine_cs *ring, |
195 | void (*set_seqno)(struct intel_engine_cs *ring, |
169 | u32 seqno); |
196 | u32 seqno); |
- | 197 | int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, |
|
170 | int (*dispatch_execbuffer)(struct intel_engine_cs *ring, |
198 | u64 offset, u32 length, |
Line 171... | Line 199... | ||
171 | u64 offset, u32 length, |
199 | unsigned dispatch_flags); |
172 | unsigned flags); |
200 | #define I915_DISPATCH_SECURE 0x1 |
173 | #define I915_DISPATCH_SECURE 0x1 |
201 | #define I915_DISPATCH_PINNED 0x2 |
Line 213... | Line 241... | ||
213 | */ |
241 | */ |
214 | struct { |
242 | struct { |
215 | u32 sync_seqno[I915_NUM_RINGS-1]; |
243 | u32 sync_seqno[I915_NUM_RINGS-1]; |
Line 216... | Line 244... | ||
216 | 244 | ||
217 | union { |
245 | union { |
218 | struct { |
246 | struct { |
219 | /* our mbox written by others */ |
247 | /* our mbox written by others */ |
220 | u32 wait[I915_NUM_RINGS]; |
248 | u32 wait[I915_NUM_RINGS]; |
221 | /* mboxes this ring signals to */ |
249 | /* mboxes this ring signals to */ |
222 | u32 signal[I915_NUM_RINGS]; |
250 | u32 signal[I915_NUM_RINGS]; |
223 | } mbox; |
251 | } mbox; |
224 | u64 signal_ggtt[I915_NUM_RINGS]; |
252 | u64 signal_ggtt[I915_NUM_RINGS]; |
Line 225... | Line 253... | ||
225 | }; |
253 | }; |
226 | 254 | ||
227 | /* AKA wait() */ |
255 | /* AKA wait() */ |
228 | int (*sync_to)(struct intel_engine_cs *ring, |
256 | int (*sync_to)(struct drm_i915_gem_request *to_req, |
229 | struct intel_engine_cs *to, |
257 | struct intel_engine_cs *from, |
230 | u32 seqno); |
258 | u32 seqno); |
231 | int (*signal)(struct intel_engine_cs *signaller, |
259 | int (*signal)(struct drm_i915_gem_request *signaller_req, |
232 | /* num_dwords needed by caller */ |
260 | /* num_dwords needed by caller */ |
Line 233... | Line 261... | ||
233 | unsigned int num_dwords); |
261 | unsigned int num_dwords); |
234 | } semaphore; |
262 | } semaphore; |
235 | 263 | ||
236 | /* Execlists */ |
264 | /* Execlists */ |
237 | spinlock_t execlist_lock; |
265 | spinlock_t execlist_lock; |
238 | struct list_head execlist_queue; |
266 | struct list_head execlist_queue; |
239 | struct list_head execlist_retired_req_list; |
267 | struct list_head execlist_retired_req_list; |
240 | u8 next_context_status_buffer; |
268 | u8 next_context_status_buffer; |
241 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
269 | u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ |
242 | int (*emit_request)(struct intel_ringbuffer *ringbuf); |
270 | int (*emit_request)(struct drm_i915_gem_request *request); |
243 | int (*emit_flush)(struct intel_ringbuffer *ringbuf, |
271 | int (*emit_flush)(struct drm_i915_gem_request *request, |
244 | u32 invalidate_domains, |
272 | u32 invalidate_domains, |
Line 245... | Line 273... | ||
245 | u32 flush_domains); |
273 | u32 flush_domains); |
246 | int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, |
274 | int (*emit_bb_start)(struct drm_i915_gem_request *req, |
247 | u64 offset, unsigned flags); |
275 | u64 offset, unsigned dispatch_flags); |
248 | 276 | ||
249 | /** |
277 | /** |
250 | * List of objects currently involved in rendering from the |
278 | * List of objects currently involved in rendering from the |
251 | * ringbuffer. |
279 | * ringbuffer. |
252 | * |
280 | * |
253 | * Includes buffers having the contents of their GPU caches |
281 | * Includes buffers having the contents of their GPU caches |
254 | * flushed, not necessarily primitives. last_rendering_seqno |
282 | * flushed, not necessarily primitives. last_read_req |
255 | * represents when the rendering involved will be completed. |
283 | * represents when the rendering involved will be completed. |
Line 263... | Line 291... | ||
263 | * outstanding. |
291 | * outstanding. |
264 | */ |
292 | */ |
265 | struct list_head request_list; |
293 | struct list_head request_list; |
Line 266... | Line 294... | ||
266 | 294 | ||
267 | /** |
295 | /** |
- | 296 | * Seqno of request most recently submitted to request_list. |
|
- | 297 | * Used exclusively by hang checker to avoid grabbing lock while |
|
268 | * Do we have some not yet emitted requests outstanding? |
298 | * inspecting request list. |
269 | */ |
- | |
270 | struct drm_i915_gem_request *preallocated_lazy_request; |
299 | */ |
- | 300 | u32 last_submitted_seqno; |
|
271 | u32 outstanding_lazy_seqno; |
301 | |
272 | bool gpu_caches_dirty; |
- | |
Line 273... | Line 302... | ||
273 | bool fbc_dirty; |
302 | bool gpu_caches_dirty; |
Line 274... | Line 303... | ||
274 | 303 | ||
275 | wait_queue_head_t irq_queue; |
304 | wait_queue_head_t irq_queue; |
Line 294... | Line 323... | ||
294 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
323 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
Line 295... | Line 324... | ||
295 | 324 | ||
296 | /* |
325 | /* |
297 | * Table of registers allowed in commands that read/write registers. |
326 | * Table of registers allowed in commands that read/write registers. |
298 | */ |
327 | */ |
299 | const u32 *reg_table; |
328 | const struct drm_i915_reg_descriptor *reg_table; |
Line 300... | Line 329... | ||
300 | int reg_count; |
329 | int reg_count; |
301 | 330 | ||
302 | /* |
331 | /* |
303 | * Table of registers allowed in commands that read/write registers, but |
332 | * Table of registers allowed in commands that read/write registers, but |
304 | * only from the DRM master. |
333 | * only from the DRM master. |
305 | */ |
334 | */ |
Line 306... | Line 335... | ||
306 | const u32 *master_reg_table; |
335 | const struct drm_i915_reg_descriptor *master_reg_table; |
307 | int master_reg_count; |
336 | int master_reg_count; |
308 | 337 | ||
Line 346... | Line 375... | ||
346 | idx += I915_NUM_RINGS; |
375 | idx += I915_NUM_RINGS; |
Line 347... | Line 376... | ||
347 | 376 | ||
348 | return idx; |
377 | return idx; |
Line -... | Line 378... | ||
- | 378 | } |
|
- | 379 | ||
- | 380 | static inline void |
|
- | 381 | intel_flush_status_page(struct intel_engine_cs *ring, int reg) |
|
- | 382 | { |
|
- | 383 | drm_clflush_virt_range(&ring->status_page.page_addr[reg], |
|
- | 384 | sizeof(uint32_t)); |
|
349 | } |
385 | } |
350 | 386 | ||
351 | static inline u32 |
387 | static inline u32 |
352 | intel_read_status_page(struct intel_engine_cs *ring, |
388 | intel_read_status_page(struct intel_engine_cs *ring, |
353 | int reg) |
389 | int reg) |
Line 374... | Line 410... | ||
374 | * 0x04: ring 0 head pointer |
410 | * 0x04: ring 0 head pointer |
375 | * 0x05: ring 1 head pointer (915-class) |
411 | * 0x05: ring 1 head pointer (915-class) |
376 | * 0x06: ring 2 head pointer (915-class) |
412 | * 0x06: ring 2 head pointer (915-class) |
377 | * 0x10-0x1b: Context status DWords (GM45) |
413 | * 0x10-0x1b: Context status DWords (GM45) |
378 | * 0x1f: Last written status offset. (GM45) |
414 | * 0x1f: Last written status offset. (GM45) |
- | 415 | * 0x20-0x2f: Reserved (Gen6+) |
|
379 | * |
416 | * |
380 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
417 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
381 | */ |
418 | */ |
382 | #define I915_GEM_HWS_INDEX 0x20 |
419 | #define I915_GEM_HWS_INDEX 0x30 |
383 | #define I915_GEM_HWS_SCRATCH_INDEX 0x30 |
420 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
384 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
421 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
Line -... | Line 422... | ||
- | 422 | ||
385 | 423 | struct intel_ringbuffer * |
|
386 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
424 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); |
387 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
425 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
388 | struct intel_ringbuffer *ringbuf); |
426 | struct intel_ringbuffer *ringbuf); |
389 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
- | |
390 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
427 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
Line 391... | Line 428... | ||
391 | struct intel_ringbuffer *ringbuf); |
428 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); |
392 | 429 | ||
Line -... | Line 430... | ||
- | 430 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
|
- | 431 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
|
393 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
432 | |
394 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |
433 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
395 | 434 | ||
396 | int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); |
435 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
397 | int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); |
436 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
398 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
437 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
399 | u32 data) |
438 | u32 data) |
Line 406... | Line 445... | ||
406 | { |
445 | { |
407 | struct intel_ringbuffer *ringbuf = ring->buffer; |
446 | struct intel_ringbuffer *ringbuf = ring->buffer; |
408 | ringbuf->tail &= ringbuf->size - 1; |
447 | ringbuf->tail &= ringbuf->size - 1; |
409 | } |
448 | } |
410 | int __intel_ring_space(int head, int tail, int size); |
449 | int __intel_ring_space(int head, int tail, int size); |
- | 450 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); |
|
411 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
451 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
412 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
452 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
413 | void __intel_ring_advance(struct intel_engine_cs *ring); |
- | |
Line 414... | Line 453... | ||
414 | 453 | ||
415 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
454 | int __must_check intel_ring_idle(struct intel_engine_cs *ring); |
416 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
455 | void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); |
417 | int intel_ring_flush_all_caches(struct intel_engine_cs *ring); |
456 | int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); |
Line 418... | Line 457... | ||
418 | int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); |
457 | int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); |
419 | 458 | ||
Line 420... | Line 459... | ||
420 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
459 | void intel_fini_pipe_control(struct intel_engine_cs *ring); |
Line 425... | Line 464... | ||
425 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
464 | int intel_init_bsd2_ring_buffer(struct drm_device *dev); |
426 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
465 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
427 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
466 | int intel_init_vebox_ring_buffer(struct drm_device *dev); |
Line 428... | Line 467... | ||
428 | 467 | ||
429 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
- | |
Line 430... | Line 468... | ||
430 | void intel_ring_setup_status_page(struct intel_engine_cs *ring); |
468 | u64 intel_ring_get_active_head(struct intel_engine_cs *ring); |
Line 431... | Line 469... | ||
431 | 469 | ||
432 | int init_workarounds_ring(struct intel_engine_cs *ring); |
470 | int init_workarounds_ring(struct intel_engine_cs *ring); |
433 | 471 | ||
434 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
472 | static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) |
Line 435... | Line -... | ||
435 | { |
- | |
436 | return ringbuf->tail; |
473 | { |
437 | } |
474 | return ringbuf->tail; |
- | 475 | } |
|
- | 476 | ||
- | 477 | /* |
|
438 | 478 | * Arbitrary size for largest possible 'add request' sequence. The code paths |
|
439 | static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) |
479 | * are complex and variable. Empirical measurement shows that the worst case |
- | 480 | * is ILK at 136 words. Reserving too much is better than reserving too little |
|
Line -... | Line 481... | ||
- | 481 | * as that allows for corner cases that might have been missed. So the figure |
|
440 | { |
482 | * has been rounded up to 160 words. |
- | 483 | */ |
|
- | 484 | #define MIN_SPACE_FOR_ADD_REQUEST 160 |
|
441 | BUG_ON(ring->outstanding_lazy_seqno == 0); |
485 | |
- | 486 | /* |
|
- | 487 | * Reserve space in the ring to guarantee that the i915_add_request() call |
|
- | 488 | * will always have sufficient room to do its stuff. The request creation |
|
- | 489 | * code calls this automatically. |
|
442 | return ring->outstanding_lazy_seqno; |
490 | */ |
- | 491 | void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); |
|
443 | } |
492 | /* Cancel the reservation, e.g. because the request is being discarded. */ |
444 | 493 | void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); |
|
- | 494 | /* Use the reserved space - for use by i915_add_request() only. */ |
|
- | 495 | void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); |
|
Line 445... | Line 496... | ||
445 | static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) |
496 | /* Finish with the reserved space - for use by i915_add_request() only. */ |