Rev 2352 | Rev 3243 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2352 | Rev 3031 | ||
---|---|---|---|
Line 1... | Line 1... | ||
1 | #ifndef _INTEL_RINGBUFFER_H_ |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
Line 3... | Line -... | ||
3 | - | ||
4 | enum { |
- | |
5 | RCS = 0x0, |
- | |
6 | VCS, |
- | |
7 | BCS, |
- | |
8 | I915_NUM_RINGS, |
- | |
9 | }; |
- | |
10 | 3 | ||
11 | struct intel_hw_status_page { |
4 | struct intel_hw_status_page { |
12 | u32 __iomem *page_addr; |
5 | u32 *page_addr; |
13 | unsigned int gfx_addr; |
6 | unsigned int gfx_addr; |
14 | struct drm_i915_gem_object *obj; |
7 | struct drm_i915_gem_object *obj; |
Line 15... | Line 8... | ||
15 | }; |
8 | }; |
Line 34... | Line 27... | ||
34 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
27 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
Line 35... | Line 28... | ||
35 | 28 | ||
36 | struct intel_ring_buffer { |
29 | struct intel_ring_buffer { |
37 | const char *name; |
30 | const char *name; |
38 | enum intel_ring_id { |
31 | enum intel_ring_id { |
39 | RING_RENDER = 0x1, |
32 | RCS = 0x0, |
40 | RING_BSD = 0x2, |
33 | VCS, |
41 | RING_BLT = 0x4, |
34 | BCS, |
- | 35 | } id; |
|
42 | } id; |
36 | #define I915_NUM_RINGS 3 |
43 | u32 mmio_base; |
37 | u32 mmio_base; |
44 | void __iomem *virtual_start; |
38 | void __iomem *virtual_start; |
45 | struct drm_device *dev; |
39 | struct drm_device *dev; |
Line 50... | Line 44... | ||
50 | int space; |
44 | int space; |
51 | int size; |
45 | int size; |
52 | int effective_size; |
46 | int effective_size; |
53 | struct intel_hw_status_page status_page; |
47 | struct intel_hw_status_page status_page; |
Line -... | Line 48... | ||
- | 48 | ||
- | 49 | /** We track the position of the requests in the ring buffer, and |
|
- | 50 | * when each is retired we increment last_retired_head as the GPU |
|
54 | 51 | * must have finished processing the request and so we know we |
|
- | 52 | * can advance the ringbuffer up to that position. |
|
- | 53 | * |
|
55 | spinlock_t irq_lock; |
54 | * last_retired_head is set to -1 after the value is consumed so |
- | 55 | * we can detect new retirements. |
|
56 | u32 irq_refcount; |
56 | */ |
- | 57 | u32 last_retired_head; |
|
57 | u32 irq_mask; |
58 | |
- | 59 | u32 irq_refcount; /* protected by dev_priv->irq_lock */ |
|
58 | u32 irq_seqno; /* last seq seem at irq time */ |
60 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
59 | u32 trace_irq_seqno; |
- | |
60 | u32 waiting_seqno; |
61 | u32 trace_irq_seqno; |
61 | u32 sync_seqno[I915_NUM_RINGS-1]; |
62 | u32 sync_seqno[I915_NUM_RINGS-1]; |
62 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
Line 63... | Line 64... | ||
63 | void (*irq_put)(struct intel_ring_buffer *ring); |
64 | void (*irq_put)(struct intel_ring_buffer *ring); |
Line 69... | Line 70... | ||
69 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
70 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
70 | u32 invalidate_domains, |
71 | u32 invalidate_domains, |
71 | u32 flush_domains); |
72 | u32 flush_domains); |
72 | int (*add_request)(struct intel_ring_buffer *ring, |
73 | int (*add_request)(struct intel_ring_buffer *ring, |
73 | u32 *seqno); |
74 | u32 *seqno); |
- | 75 | /* Some chipsets are not quite as coherent as advertised and need |
|
- | 76 | * an expensive kick to force a true read of the up-to-date seqno. |
|
- | 77 | * However, the up-to-date seqno is not always required and the last |
|
- | 78 | * seen value is good enough. Note that the seqno will always be |
|
- | 79 | * monotonic, even if not coherent. |
|
- | 80 | */ |
|
74 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
81 | u32 (*get_seqno)(struct intel_ring_buffer *ring, |
- | 82 | bool lazy_coherency); |
|
75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
83 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
76 | u32 offset, u32 length); |
84 | u32 offset, u32 length); |
77 | void (*cleanup)(struct intel_ring_buffer *ring); |
85 | void (*cleanup)(struct intel_ring_buffer *ring); |
78 | int (*sync_to)(struct intel_ring_buffer *ring, |
86 | int (*sync_to)(struct intel_ring_buffer *ring, |
79 | struct intel_ring_buffer *to, |
87 | struct intel_ring_buffer *to, |
Line 98... | Line 106... | ||
98 | * outstanding. |
106 | * outstanding. |
99 | */ |
107 | */ |
100 | struct list_head request_list; |
108 | struct list_head request_list; |
Line 101... | Line 109... | ||
101 | 109 | ||
102 | /** |
- | |
103 | * List of objects currently pending a GPU write flush. |
- | |
104 | * |
- | |
105 | * All elements on this list will belong to either the |
- | |
106 | * active_list or flushing_list, last_rendering_seqno can |
- | |
107 | * be used to differentiate between the two elements. |
- | |
108 | */ |
- | |
109 | struct list_head gpu_write_list; |
- | |
110 | - | ||
111 | /** |
110 | /** |
112 | * Do we have some not yet emitted requests outstanding? |
111 | * Do we have some not yet emitted requests outstanding? |
113 | */ |
112 | */ |
- | 113 | u32 outstanding_lazy_request; |
|
Line 114... | Line 114... | ||
114 | u32 outstanding_lazy_request; |
114 | bool gpu_caches_dirty; |
- | 115 | ||
- | 116 | wait_queue_head_t irq_queue; |
|
- | 117 | ||
- | 118 | /** |
|
115 | 119 | * Do an explicit TLB flush before MI_SET_CONTEXT |
|
- | 120 | */ |
|
- | 121 | bool itlb_before_ctx_switch; |
|
Line 116... | Line 122... | ||
116 | wait_queue_head_t irq_queue; |
122 | struct i915_hw_context *default_context; |
117 | drm_local_map_t map; |
123 | struct drm_i915_gem_object *last_context_obj; |
Line -... | Line 124... | ||
- | 124 | ||
- | 125 | void *private; |
|
- | 126 | }; |
|
- | 127 | ||
- | 128 | static inline bool |
|
- | 129 | intel_ring_initialized(struct intel_ring_buffer *ring) |
|
- | 130 | { |
|
- | 131 | return ring->obj != NULL; |
|
- | 132 | } |
|
- | 133 | ||
- | 134 | static inline unsigned |
|
- | 135 | intel_ring_flag(struct intel_ring_buffer *ring) |
|
118 | 136 | { |
|
119 | void *private; |
137 | return 1 << ring->id; |
120 | }; |
138 | } |
121 | 139 | ||
122 | static inline u32 |
140 | static inline u32 |
Line 140... | Line 158... | ||
140 | 158 | ||
141 | static inline u32 |
159 | static inline u32 |
142 | intel_read_status_page(struct intel_ring_buffer *ring, |
160 | intel_read_status_page(struct intel_ring_buffer *ring, |
143 | int reg) |
161 | int reg) |
- | 162 | { |
|
- | 163 | /* Ensure that the compiler doesn't optimize away the load. */ |
|
144 | { |
164 | barrier(); |
145 | return ioread32(ring->status_page.page_addr + reg); |
165 | return ring->status_page.page_addr[reg]; |
Line 146... | Line 166... | ||
146 | } |
166 | } |
147 | 167 | ||
148 | /** |
168 | /** |
Line 158... | Line 178... | ||
158 | * 0x10-0x1b: Context status DWords (GM45) |
178 | * 0x10-0x1b: Context status DWords (GM45) |
159 | * 0x1f: Last written status offset. (GM45) |
179 | * 0x1f: Last written status offset. (GM45) |
160 | * |
180 | * |
161 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
181 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
162 | */ |
182 | */ |
163 | #define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) |
- | |
164 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
- | |
165 | #define I915_GEM_HWS_INDEX 0x20 |
183 | #define I915_GEM_HWS_INDEX 0x20 |
166 | #define I915_BREADCRUMB_INDEX 0x21 |
- | |
Line 167... | Line 184... | ||
167 | 184 | ||
Line 168... | Line 185... | ||
168 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
185 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
169 | 186 | ||
Line 183... | Line 200... | ||
183 | } |
200 | } |
Line 184... | Line 201... | ||
184 | 201 | ||
Line 185... | Line 202... | ||
185 | void intel_ring_advance(struct intel_ring_buffer *ring); |
202 | void intel_ring_advance(struct intel_ring_buffer *ring); |
- | 203 | ||
- | 204 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
|
Line 186... | Line 205... | ||
186 | 205 | int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
|
187 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
206 | int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); |
188 | 207 | ||
Line 189... | Line 208... | ||
189 | int intel_init_render_ring_buffer(struct drm_device *dev); |
208 | int intel_init_render_ring_buffer(struct drm_device *dev); |
190 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
209 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
Line -... | Line 210... | ||
- | 210 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
|
- | 211 | ||
- | 212 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
|
- | 213 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
|
- | 214 | ||
191 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
215 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
192 | 216 | { |
|
193 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
217 | return ring->tail; |
194 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
218 | } |
195 | 219 |