Rev 2342 | Rev 3031 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2326 | Serge | 1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ |
||
3 | |||
4 | enum { |
||
5 | RCS = 0x0, |
||
6 | VCS, |
||
7 | BCS, |
||
8 | I915_NUM_RINGS, |
||
9 | }; |
||
10 | |||
11 | struct intel_hw_status_page { |
||
12 | u32 __iomem *page_addr; |
||
13 | unsigned int gfx_addr; |
||
14 | struct drm_i915_gem_object *obj; |
||
15 | }; |
||
16 | |||
17 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
||
18 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
||
19 | |||
20 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
||
21 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
||
22 | |||
23 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
||
24 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
||
25 | |||
26 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
||
27 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
||
28 | |||
29 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
||
30 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
||
31 | |||
32 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
||
33 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) |
||
34 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
||
35 | |||
36 | struct intel_ring_buffer { |
||
37 | const char *name; |
||
38 | enum intel_ring_id { |
||
39 | RING_RENDER = 0x1, |
||
40 | RING_BSD = 0x2, |
||
41 | RING_BLT = 0x4, |
||
42 | } id; |
||
43 | u32 mmio_base; |
||
44 | void __iomem *virtual_start; |
||
45 | struct drm_device *dev; |
||
46 | struct drm_i915_gem_object *obj; |
||
47 | |||
48 | u32 head; |
||
49 | u32 tail; |
||
50 | int space; |
||
51 | int size; |
||
52 | int effective_size; |
||
53 | struct intel_hw_status_page status_page; |
||
54 | |||
55 | spinlock_t irq_lock; |
||
56 | u32 irq_refcount; |
||
57 | u32 irq_mask; |
||
58 | u32 irq_seqno; /* last seq seem at irq time */ |
||
59 | u32 trace_irq_seqno; |
||
60 | u32 waiting_seqno; |
||
61 | u32 sync_seqno[I915_NUM_RINGS-1]; |
||
62 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
||
63 | void (*irq_put)(struct intel_ring_buffer *ring); |
||
64 | |||
65 | int (*init)(struct intel_ring_buffer *ring); |
||
66 | |||
67 | void (*write_tail)(struct intel_ring_buffer *ring, |
||
68 | u32 value); |
||
69 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
||
70 | u32 invalidate_domains, |
||
71 | u32 flush_domains); |
||
72 | int (*add_request)(struct intel_ring_buffer *ring, |
||
73 | u32 *seqno); |
||
74 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
||
75 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
||
76 | u32 offset, u32 length); |
||
77 | void (*cleanup)(struct intel_ring_buffer *ring); |
||
2342 | Serge | 78 | int (*sync_to)(struct intel_ring_buffer *ring, |
79 | struct intel_ring_buffer *to, |
||
80 | u32 seqno); |
||
2326 | Serge | 81 | |
2342 | Serge | 82 | u32 semaphore_register[3]; /*our mbox written by others */ |
83 | u32 signal_mbox[2]; /* mboxes this ring signals to */ |
||
2326 | Serge | 84 | /** |
85 | * List of objects currently involved in rendering from the |
||
86 | * ringbuffer. |
||
87 | * |
||
88 | * Includes buffers having the contents of their GPU caches |
||
89 | * flushed, not necessarily primitives. last_rendering_seqno |
||
90 | * represents when the rendering involved will be completed. |
||
91 | * |
||
92 | * A reference is held on the buffer while on this list. |
||
93 | */ |
||
94 | struct list_head active_list; |
||
95 | |||
96 | /** |
||
97 | * List of breadcrumbs associated with GPU requests currently |
||
98 | * outstanding. |
||
99 | */ |
||
100 | struct list_head request_list; |
||
101 | |||
102 | /** |
||
103 | * List of objects currently pending a GPU write flush. |
||
104 | * |
||
105 | * All elements on this list will belong to either the |
||
106 | * active_list or flushing_list, last_rendering_seqno can |
||
107 | * be used to differentiate between the two elements. |
||
108 | */ |
||
109 | struct list_head gpu_write_list; |
||
110 | |||
111 | /** |
||
112 | * Do we have some not yet emitted requests outstanding? |
||
113 | */ |
||
114 | u32 outstanding_lazy_request; |
||
115 | |||
2352 | Serge | 116 | wait_queue_head_t irq_queue; |
2330 | Serge | 117 | drm_local_map_t map; |
2326 | Serge | 118 | |
119 | void *private; |
||
120 | }; |
||
121 | |||
122 | static inline u32 |
||
123 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
||
124 | struct intel_ring_buffer *other) |
||
125 | { |
||
126 | int idx; |
||
127 | |||
128 | /* |
||
129 | * cs -> 0 = vcs, 1 = bcs |
||
130 | * vcs -> 0 = bcs, 1 = cs, |
||
131 | * bcs -> 0 = cs, 1 = vcs. |
||
132 | */ |
||
133 | |||
134 | idx = (other - ring) - 1; |
||
135 | if (idx < 0) |
||
136 | idx += I915_NUM_RINGS; |
||
137 | |||
138 | return idx; |
||
139 | } |
||
140 | |||
141 | static inline u32 |
||
142 | intel_read_status_page(struct intel_ring_buffer *ring, |
||
143 | int reg) |
||
144 | { |
||
145 | return ioread32(ring->status_page.page_addr + reg); |
||
146 | } |
||
147 | |||
148 | /** |
||
149 | * Reads a dword out of the status page, which is written to from the command |
||
150 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
||
151 | * MI_STORE_DATA_IMM. |
||
152 | * |
||
153 | * The following dwords have a reserved meaning: |
||
154 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
||
155 | * 0x04: ring 0 head pointer |
||
156 | * 0x05: ring 1 head pointer (915-class) |
||
157 | * 0x06: ring 2 head pointer (915-class) |
||
158 | * 0x10-0x1b: Context status DWords (GM45) |
||
159 | * 0x1f: Last written status offset. (GM45) |
||
160 | * |
||
161 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
||
162 | */ |
||
163 | #define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) |
||
164 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
||
165 | #define I915_GEM_HWS_INDEX 0x20 |
||
166 | #define I915_BREADCRUMB_INDEX 0x21 |
||
167 | |||
168 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
||
169 | |||
170 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
||
171 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
||
172 | { |
||
173 | return intel_wait_ring_buffer(ring, ring->size - 8); |
||
174 | } |
||
175 | |||
176 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
||
177 | |||
178 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
||
179 | u32 data) |
||
180 | { |
||
181 | iowrite32(data, ring->virtual_start + ring->tail); |
||
182 | ring->tail += 4; |
||
183 | } |
||
184 | |||
185 | void intel_ring_advance(struct intel_ring_buffer *ring); |
||
186 | |||
187 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
||
188 | |||
189 | int intel_init_render_ring_buffer(struct drm_device *dev); |
||
190 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
||
191 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
||
192 | |||
193 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
||
194 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
||
195 | |||
196 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
||
197 | { |
||
198 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
||
199 | ring->trace_irq_seqno = seqno; |
||
200 | } |
||
201 | |||
202 | /* DRI warts */ |
||
203 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
||
204 | |||
205 | #endif /* _INTEL_RINGBUFFER_H_ */> |