Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3769 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
19 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
21 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
22 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
23 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
24 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include |
||
29 | #include |
||
30 | #include |
||
31 | |||
32 | #include "intel_batchbuffer.h" |
||
33 | |||
34 | #define MAX_BATCH_SIZE 0x400000 |
||
35 | |||
36 | static void |
||
37 | intel_batchbuffer_reset(struct intel_batchbuffer *batch, int buffer_size) |
||
38 | { |
||
39 | struct intel_driver_data *intel = batch->intel; |
||
40 | int batch_size = buffer_size; |
||
41 | |||
42 | printf("%s\n", __FUNCTION__); |
||
43 | |||
44 | assert(batch->flag == I915_EXEC_RENDER || |
||
45 | batch->flag == I915_EXEC_BLT || |
||
46 | batch->flag == I915_EXEC_BSD || |
||
47 | batch->flag == I915_EXEC_VEBOX); |
||
48 | |||
49 | dri_bo_unreference(batch->buffer); |
||
50 | batch->buffer = dri_bo_alloc(intel->bufmgr, |
||
51 | "batch buffer", |
||
52 | batch_size, |
||
53 | 0x1000); |
||
54 | assert(batch->buffer); |
||
55 | dri_bo_map(batch->buffer, 1); |
||
56 | assert(batch->buffer->virtual); |
||
57 | batch->map = batch->buffer->virtual; |
||
58 | batch->size = batch_size; |
||
59 | batch->ptr = batch->map; |
||
60 | batch->atomic = 0; |
||
61 | } |
||
62 | |||
63 | static unsigned int |
||
64 | intel_batchbuffer_space(struct intel_batchbuffer *batch) |
||
65 | { |
||
66 | return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map); |
||
67 | } |
||
68 | |||
69 | |||
70 | struct intel_batchbuffer * |
||
71 | intel_batchbuffer_new(struct intel_driver_data *intel, int flag, int buffer_size) |
||
72 | { |
||
73 | struct intel_batchbuffer *batch = calloc(1, sizeof(*batch)); |
||
74 | assert(flag == I915_EXEC_RENDER || |
||
75 | flag == I915_EXEC_BSD || |
||
76 | flag == I915_EXEC_BLT || |
||
77 | flag == I915_EXEC_VEBOX); |
||
78 | |||
79 | printf("%s\n", __FUNCTION__); |
||
80 | |||
81 | if (!buffer_size || buffer_size < BATCH_SIZE) { |
||
82 | buffer_size = BATCH_SIZE; |
||
83 | } |
||
84 | |||
85 | /* the buffer size can't exceed 4M */ |
||
86 | if (buffer_size > MAX_BATCH_SIZE) { |
||
87 | buffer_size = MAX_BATCH_SIZE; |
||
88 | } |
||
89 | |||
90 | batch->intel = intel; |
||
91 | batch->flag = flag; |
||
92 | batch->run = drm_intel_bo_mrb_exec; |
||
93 | intel_batchbuffer_reset(batch, buffer_size); |
||
94 | |||
95 | return batch; |
||
96 | } |
||
97 | |||
98 | void intel_batchbuffer_free(struct intel_batchbuffer *batch) |
||
99 | { |
||
100 | if (batch->map) { |
||
101 | dri_bo_unmap(batch->buffer); |
||
102 | batch->map = NULL; |
||
103 | } |
||
104 | |||
105 | dri_bo_unreference(batch->buffer); |
||
106 | free(batch); |
||
107 | } |
||
108 | |||
109 | void |
||
110 | intel_batchbuffer_flush(struct intel_batchbuffer *batch) |
||
111 | { |
||
112 | unsigned int used = batch->ptr - batch->map; |
||
113 | |||
114 | if (used == 0) { |
||
115 | return; |
||
116 | } |
||
117 | |||
118 | if ((used & 4) == 0) { |
||
119 | *(unsigned int*)batch->ptr = 0; |
||
120 | batch->ptr += 4; |
||
121 | } |
||
122 | |||
123 | *(unsigned int*)batch->ptr = MI_BATCH_BUFFER_END; |
||
124 | batch->ptr += 4; |
||
125 | dri_bo_unmap(batch->buffer); |
||
126 | used = batch->ptr - batch->map; |
||
127 | batch->run(batch->buffer, used, 0, 0, 0, batch->flag); |
||
128 | intel_batchbuffer_reset(batch, batch->size); |
||
129 | } |
||
130 | |||
131 | void |
||
132 | intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, unsigned int x) |
||
133 | { |
||
134 | assert(intel_batchbuffer_space(batch) >= 4); |
||
135 | *(unsigned int *)batch->ptr = x; |
||
136 | batch->ptr += 4; |
||
137 | } |
||
138 | |||
139 | void |
||
140 | intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch, dri_bo *bo, |
||
141 | uint32_t read_domains, uint32_t write_domains, |
||
142 | uint32_t delta) |
||
143 | { |
||
144 | assert(batch->ptr - batch->map < batch->size); |
||
145 | dri_bo_emit_reloc(batch->buffer, read_domains, write_domains, |
||
146 | delta, batch->ptr - batch->map, bo); |
||
147 | intel_batchbuffer_emit_dword(batch, bo->offset + delta); |
||
148 | } |
||
149 | |||
150 | void |
||
151 | intel_batchbuffer_require_space(struct intel_batchbuffer *batch, |
||
152 | unsigned int size) |
||
153 | { |
||
154 | assert(size < batch->size - 8); |
||
155 | |||
156 | if (intel_batchbuffer_space(batch) < size) { |
||
157 | intel_batchbuffer_flush(batch); |
||
158 | } |
||
159 | } |
||
160 | |||
161 | void |
||
162 | intel_batchbuffer_data(struct intel_batchbuffer *batch, |
||
163 | void *data, |
||
164 | unsigned int size) |
||
165 | { |
||
166 | assert((size & 3) == 0); |
||
167 | intel_batchbuffer_require_space(batch, size); |
||
168 | |||
169 | assert(batch->ptr); |
||
170 | memcpy(batch->ptr, data, size); |
||
171 | batch->ptr += size; |
||
172 | } |
||
173 | |||
174 | void |
||
175 | intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch) |
||
176 | { |
||
177 | struct intel_driver_data *intel = batch->intel; |
||
178 | |||
179 | if (IS_GEN6(intel->device_id) || |
||
180 | IS_GEN7(intel->device_id)) { |
||
181 | if (batch->flag == I915_EXEC_RENDER) { |
||
182 | BEGIN_BATCH(batch, 4); |
||
183 | OUT_BATCH(batch, CMD_PIPE_CONTROL | 0x2); |
||
184 | |||
185 | if (IS_GEN6(intel->device_id)) |
||
186 | OUT_BATCH(batch, |
||
187 | CMD_PIPE_CONTROL_WC_FLUSH | |
||
188 | CMD_PIPE_CONTROL_TC_FLUSH | |
||
189 | CMD_PIPE_CONTROL_NOWRITE); |
||
190 | else |
||
191 | OUT_BATCH(batch, |
||
192 | CMD_PIPE_CONTROL_WC_FLUSH | |
||
193 | CMD_PIPE_CONTROL_TC_FLUSH | |
||
194 | CMD_PIPE_CONTROL_DC_FLUSH | |
||
195 | CMD_PIPE_CONTROL_NOWRITE); |
||
196 | |||
197 | OUT_BATCH(batch, 0); |
||
198 | OUT_BATCH(batch, 0); |
||
199 | ADVANCE_BATCH(batch); |
||
200 | } else { |
||
201 | if (batch->flag == I915_EXEC_BLT) { |
||
202 | BEGIN_BLT_BATCH(batch, 4); |
||
203 | OUT_BLT_BATCH(batch, MI_FLUSH_DW); |
||
204 | OUT_BLT_BATCH(batch, 0); |
||
205 | OUT_BLT_BATCH(batch, 0); |
||
206 | OUT_BLT_BATCH(batch, 0); |
||
207 | ADVANCE_BLT_BATCH(batch); |
||
208 | }else if (batch->flag == I915_EXEC_VEBOX) { |
||
209 | BEGIN_VEB_BATCH(batch, 4); |
||
210 | OUT_VEB_BATCH(batch, MI_FLUSH_DW); |
||
211 | OUT_VEB_BATCH(batch, 0); |
||
212 | OUT_VEB_BATCH(batch, 0); |
||
213 | OUT_VEB_BATCH(batch, 0); |
||
214 | ADVANCE_VEB_BATCH(batch); |
||
215 | } else { |
||
216 | assert(batch->flag == I915_EXEC_BSD); |
||
217 | BEGIN_BCS_BATCH(batch, 4); |
||
218 | OUT_BCS_BATCH(batch, MI_FLUSH_DW | MI_FLUSH_DW_VIDEO_PIPELINE_CACHE_INVALIDATE); |
||
219 | OUT_BCS_BATCH(batch, 0); |
||
220 | OUT_BCS_BATCH(batch, 0); |
||
221 | OUT_BCS_BATCH(batch, 0); |
||
222 | ADVANCE_BCS_BATCH(batch); |
||
223 | } |
||
224 | } |
||
225 | } else { |
||
226 | if (batch->flag == I915_EXEC_RENDER) { |
||
227 | BEGIN_BATCH(batch, 1); |
||
228 | OUT_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE); |
||
229 | ADVANCE_BATCH(batch); |
||
230 | } else { |
||
231 | assert(batch->flag == I915_EXEC_BSD); |
||
232 | BEGIN_BCS_BATCH(batch, 1); |
||
233 | OUT_BCS_BATCH(batch, MI_FLUSH | MI_FLUSH_STATE_INSTRUCTION_CACHE_INVALIDATE); |
||
234 | ADVANCE_BCS_BATCH(batch); |
||
235 | } |
||
236 | } |
||
237 | } |
||
238 | |||
239 | void |
||
240 | intel_batchbuffer_begin_batch(struct intel_batchbuffer *batch, int total) |
||
241 | { |
||
242 | batch->emit_total = total * 4; |
||
243 | batch->emit_start = batch->ptr; |
||
244 | } |
||
245 | |||
246 | void |
||
247 | intel_batchbuffer_advance_batch(struct intel_batchbuffer *batch) |
||
248 | { |
||
249 | assert(batch->emit_total == (batch->ptr - batch->emit_start)); |
||
250 | } |
||
251 | |||
252 | void |
||
253 | intel_batchbuffer_check_batchbuffer_flag(struct intel_batchbuffer *batch, int flag) |
||
254 | { |
||
255 | if (flag != I915_EXEC_RENDER && |
||
256 | flag != I915_EXEC_BLT && |
||
257 | flag != I915_EXEC_BSD && |
||
258 | flag != I915_EXEC_VEBOX) |
||
259 | return; |
||
260 | |||
261 | if (batch->flag == flag) |
||
262 | return; |
||
263 | |||
264 | intel_batchbuffer_flush(batch); |
||
265 | batch->flag = flag; |
||
266 | } |
||
267 | |||
268 | int |
||
269 | intel_batchbuffer_check_free_space(struct intel_batchbuffer *batch, int size) |
||
270 | { |
||
271 | return intel_batchbuffer_space(batch) >= size; |
||
272 | } |
||
273 | |||
274 | static void |
||
275 | intel_batchbuffer_start_atomic_helper(struct intel_batchbuffer *batch, |
||
276 | int flag, |
||
277 | unsigned int size) |
||
278 | { |
||
279 | assert(!batch->atomic); |
||
280 | intel_batchbuffer_check_batchbuffer_flag(batch, flag); |
||
281 | intel_batchbuffer_require_space(batch, size); |
||
282 | batch->atomic = 1; |
||
283 | } |
||
284 | |||
285 | void |
||
286 | intel_batchbuffer_start_atomic(struct intel_batchbuffer *batch, unsigned int size) |
||
287 | { |
||
288 | intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_RENDER, size); |
||
289 | } |
||
290 | |||
291 | void |
||
292 | intel_batchbuffer_start_atomic_blt(struct intel_batchbuffer *batch, unsigned int size) |
||
293 | { |
||
294 | intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BLT, size); |
||
295 | } |
||
296 | |||
297 | void |
||
298 | intel_batchbuffer_start_atomic_bcs(struct intel_batchbuffer *batch, unsigned int size) |
||
299 | { |
||
300 | intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_BSD, size); |
||
301 | } |
||
302 | |||
303 | void |
||
304 | intel_batchbuffer_start_atomic_veb(struct intel_batchbuffer *batch, unsigned int size) |
||
305 | { |
||
306 | intel_batchbuffer_start_atomic_helper(batch, I915_EXEC_VEBOX, size); |
||
307 | } |
||
308 | |||
309 | |||
310 | void |
||
311 | intel_batchbuffer_end_atomic(struct intel_batchbuffer *batch) |
||
312 | { |
||
313 | assert(batch->atomic); |
||
314 | batch->atomic = 0; |
||
315 | } |
||
316 | |||
317 | int |
||
318 | intel_batchbuffer_used_size(struct intel_batchbuffer *batch) |
||
319 | { |
||
320 | return batch->ptr - batch->map; |
||
321 | } |
||
322 | |||
323 | void |
||
324 | intel_batchbuffer_align(struct intel_batchbuffer *batch, unsigned int alignedment) |
||
325 | { |
||
326 | int used = batch->ptr - batch->map; |
||
327 | int pad_size; |
||
328 | |||
329 | assert((alignedment & 3) == 0); |
||
330 | pad_size = ALIGN(used, alignedment) - used; |
||
331 | assert((pad_size & 3) == 0); |
||
332 | assert(intel_batchbuffer_space(batch) >= pad_size); |
||
333 | |||
334 | while (pad_size >= 4) { |
||
335 | intel_batchbuffer_emit_dword(batch, 0); |
||
336 | pad_size -= 4; |
||
337 | } |
||
338 | }>>>> |
||
339 |