Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4358 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
19 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
21 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
22 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
23 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
24 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "intel_context.h" |
||
29 | #include "intel_batchbuffer.h" |
||
30 | #include "intel_buffer_objects.h" |
||
31 | #include "intel_reg.h" |
||
32 | #include "intel_bufmgr.h" |
||
33 | #include "intel_buffers.h" |
||
34 | |||
35 | static void |
||
36 | intel_batchbuffer_reset(struct intel_context *intel); |
||
37 | |||
38 | void |
||
39 | intel_batchbuffer_init(struct intel_context *intel) |
||
40 | { |
||
41 | intel_batchbuffer_reset(intel); |
||
42 | |||
43 | intel->batch.cpu_map = malloc(intel->maxBatchSize); |
||
44 | intel->batch.map = intel->batch.cpu_map; |
||
45 | } |
||
46 | |||
47 | static void |
||
48 | intel_batchbuffer_reset(struct intel_context *intel) |
||
49 | { |
||
50 | if (intel->batch.last_bo != NULL) { |
||
51 | drm_intel_bo_unreference(intel->batch.last_bo); |
||
52 | intel->batch.last_bo = NULL; |
||
53 | } |
||
54 | intel->batch.last_bo = intel->batch.bo; |
||
55 | |||
56 | intel->batch.bo = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer", |
||
57 | intel->maxBatchSize, 4096); |
||
58 | |||
59 | intel->batch.reserved_space = BATCH_RESERVED; |
||
60 | intel->batch.used = 0; |
||
61 | } |
||
62 | |||
63 | void |
||
64 | intel_batchbuffer_free(struct intel_context *intel) |
||
65 | { |
||
66 | free(intel->batch.cpu_map); |
||
67 | drm_intel_bo_unreference(intel->batch.last_bo); |
||
68 | drm_intel_bo_unreference(intel->batch.bo); |
||
69 | } |
||
70 | |||
71 | static void |
||
72 | do_batch_dump(struct intel_context *intel) |
||
73 | { |
||
74 | struct drm_intel_decode *decode; |
||
75 | struct intel_batchbuffer *batch = &intel->batch; |
||
76 | int ret; |
||
77 | |||
78 | decode = drm_intel_decode_context_alloc(intel->intelScreen->deviceID); |
||
79 | if (!decode) |
||
80 | return; |
||
81 | |||
82 | ret = drm_intel_bo_map(batch->bo, false); |
||
83 | if (ret == 0) { |
||
84 | drm_intel_decode_set_batch_pointer(decode, |
||
85 | batch->bo->virtual, |
||
86 | batch->bo->offset, |
||
87 | batch->used); |
||
88 | } else { |
||
89 | fprintf(stderr, |
||
90 | "WARNING: failed to map batchbuffer (%s), " |
||
91 | "dumping uploaded data instead.\n", strerror(ret)); |
||
92 | |||
93 | drm_intel_decode_set_batch_pointer(decode, |
||
94 | batch->map, |
||
95 | batch->bo->offset, |
||
96 | batch->used); |
||
97 | } |
||
98 | |||
99 | drm_intel_decode(decode); |
||
100 | |||
101 | drm_intel_decode_context_free(decode); |
||
102 | |||
103 | if (ret == 0) { |
||
104 | drm_intel_bo_unmap(batch->bo); |
||
105 | |||
106 | if (intel->vtbl.debug_batch != NULL) |
||
107 | intel->vtbl.debug_batch(intel); |
||
108 | } |
||
109 | } |
||
110 | |||
111 | /* TODO: Push this whole function into bufmgr. |
||
112 | */ |
||
113 | static int |
||
114 | do_flush_locked(struct intel_context *intel) |
||
115 | { |
||
116 | struct intel_batchbuffer *batch = &intel->batch; |
||
117 | int ret = 0; |
||
118 | |||
119 | ret = drm_intel_bo_subdata(batch->bo, 0, 4*batch->used, batch->map); |
||
120 | |||
121 | if (!intel->intelScreen->no_hw) { |
||
122 | if (ret == 0) { |
||
123 | if (unlikely(INTEL_DEBUG & DEBUG_AUB) && intel->vtbl.annotate_aub) |
||
124 | intel->vtbl.annotate_aub(intel); |
||
125 | ret = drm_intel_bo_mrb_exec(batch->bo, 4 * batch->used, NULL, 0, 0, |
||
126 | I915_EXEC_RENDER); |
||
127 | } |
||
128 | } |
||
129 | |||
130 | if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) |
||
131 | do_batch_dump(intel); |
||
132 | |||
133 | if (ret != 0) { |
||
134 | fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret)); |
||
135 | exit(1); |
||
136 | } |
||
137 | intel->vtbl.new_batch(intel); |
||
138 | |||
139 | return ret; |
||
140 | } |
||
141 | |||
142 | int |
||
143 | _intel_batchbuffer_flush(struct intel_context *intel, |
||
144 | const char *file, int line) |
||
145 | { |
||
146 | int ret; |
||
147 | |||
148 | if (intel->batch.used == 0) |
||
149 | return 0; |
||
150 | |||
151 | if (intel->first_post_swapbuffers_batch == NULL) { |
||
152 | intel->first_post_swapbuffers_batch = intel->batch.bo; |
||
153 | drm_intel_bo_reference(intel->first_post_swapbuffers_batch); |
||
154 | } |
||
155 | |||
156 | if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) |
||
157 | fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line, |
||
158 | 4*intel->batch.used); |
||
159 | |||
160 | intel->batch.reserved_space = 0; |
||
161 | |||
162 | if (intel->vtbl.finish_batch) |
||
163 | intel->vtbl.finish_batch(intel); |
||
164 | |||
165 | /* Mark the end of the buffer. */ |
||
166 | intel_batchbuffer_emit_dword(intel, MI_BATCH_BUFFER_END); |
||
167 | if (intel->batch.used & 1) { |
||
168 | /* Round batchbuffer usage to 2 DWORDs. */ |
||
169 | intel_batchbuffer_emit_dword(intel, MI_NOOP); |
||
170 | } |
||
171 | |||
172 | intel_upload_finish(intel); |
||
173 | |||
174 | /* Check that we didn't just wrap our batchbuffer at a bad time. */ |
||
175 | assert(!intel->no_batch_wrap); |
||
176 | |||
177 | ret = do_flush_locked(intel); |
||
178 | |||
179 | if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) { |
||
180 | fprintf(stderr, "waiting for idle\n"); |
||
181 | drm_intel_bo_wait_rendering(intel->batch.bo); |
||
182 | } |
||
183 | |||
184 | /* Reset the buffer: |
||
185 | */ |
||
186 | intel_batchbuffer_reset(intel); |
||
187 | |||
188 | return ret; |
||
189 | } |
||
190 | |||
191 | |||
192 | /* This is the only way buffers get added to the validate list. |
||
193 | */ |
||
194 | bool |
||
195 | intel_batchbuffer_emit_reloc(struct intel_context *intel, |
||
196 | drm_intel_bo *buffer, |
||
197 | uint32_t read_domains, uint32_t write_domain, |
||
198 | uint32_t delta) |
||
199 | { |
||
200 | int ret; |
||
201 | |||
202 | ret = drm_intel_bo_emit_reloc(intel->batch.bo, 4*intel->batch.used, |
||
203 | buffer, delta, |
||
204 | read_domains, write_domain); |
||
205 | assert(ret == 0); |
||
206 | (void)ret; |
||
207 | |||
208 | /* |
||
209 | * Using the old buffer offset, write in what the right data would be, in case |
||
210 | * the buffer doesn't move and we can short-circuit the relocation processing |
||
211 | * in the kernel |
||
212 | */ |
||
213 | intel_batchbuffer_emit_dword(intel, buffer->offset + delta); |
||
214 | |||
215 | return true; |
||
216 | } |
||
217 | |||
218 | bool |
||
219 | intel_batchbuffer_emit_reloc_fenced(struct intel_context *intel, |
||
220 | drm_intel_bo *buffer, |
||
221 | uint32_t read_domains, |
||
222 | uint32_t write_domain, |
||
223 | uint32_t delta) |
||
224 | { |
||
225 | int ret; |
||
226 | |||
227 | ret = drm_intel_bo_emit_reloc_fence(intel->batch.bo, 4*intel->batch.used, |
||
228 | buffer, delta, |
||
229 | read_domains, write_domain); |
||
230 | assert(ret == 0); |
||
231 | (void)ret; |
||
232 | |||
233 | /* |
||
234 | * Using the old buffer offset, write in what the right data would |
||
235 | * be, in case the buffer doesn't move and we can short-circuit the |
||
236 | * relocation processing in the kernel |
||
237 | */ |
||
238 | intel_batchbuffer_emit_dword(intel, buffer->offset + delta); |
||
239 | |||
240 | return true; |
||
241 | } |
||
242 | |||
243 | void |
||
244 | intel_batchbuffer_data(struct intel_context *intel, |
||
245 | const void *data, GLuint bytes) |
||
246 | { |
||
247 | assert((bytes & 3) == 0); |
||
248 | intel_batchbuffer_require_space(intel, bytes); |
||
249 | __memcpy(intel->batch.map + intel->batch.used, data, bytes); |
||
250 | intel->batch.used += bytes >> 2; |
||
251 | } |
||
252 | |||
253 | /* Emit a pipelined flush to either flush render and texture cache for |
||
254 | * reading from a FBO-drawn texture, or flush so that frontbuffer |
||
255 | * render appears on the screen in DRI1. |
||
256 | * |
||
257 | * This is also used for the always_flush_cache driconf debug option. |
||
258 | */ |
||
259 | void |
||
260 | intel_batchbuffer_emit_mi_flush(struct intel_context *intel) |
||
261 | { |
||
262 | BEGIN_BATCH(1); |
||
263 | OUT_BATCH(MI_FLUSH); |
||
264 | ADVANCE_BATCH(); |
||
265 | } |