Rev 4293 | Rev 4539 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2332 | Serge | 1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * Zou Nan hai |
||
26 | * Xiang Hai hao |
||
27 | * |
||
28 | */ |
||
29 | |||
3031 | serge | 30 | #include |
2332 | Serge | 31 | #include "i915_drv.h" |
3031 | serge | 32 | #include |
2351 | Serge | 33 | #include "i915_trace.h" |
2332 | Serge | 34 | #include "intel_drv.h" |
35 | |||
36 | static inline int ring_space(struct intel_ring_buffer *ring) |
||
37 | { |
||
3243 | Serge | 38 | int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); |
2332 | Serge | 39 | if (space < 0) |
40 | space += ring->size; |
||
41 | return space; |
||
42 | } |
||
43 | |||
3031 | serge | 44 | static int |
45 | gen2_render_ring_flush(struct intel_ring_buffer *ring, |
||
46 | u32 invalidate_domains, |
||
47 | u32 flush_domains) |
||
2332 | Serge | 48 | { |
3031 | serge | 49 | u32 cmd; |
50 | int ret; |
||
2332 | Serge | 51 | |
3031 | serge | 52 | cmd = MI_FLUSH; |
53 | if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) |
||
54 | cmd |= MI_NO_WRITE_FLUSH; |
||
2332 | Serge | 55 | |
3031 | serge | 56 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
57 | cmd |= MI_READ_FLUSH; |
||
2332 | Serge | 58 | |
3031 | serge | 59 | ret = intel_ring_begin(ring, 2); |
60 | if (ret) |
||
61 | return ret; |
||
62 | |||
63 | intel_ring_emit(ring, cmd); |
||
64 | intel_ring_emit(ring, MI_NOOP); |
||
65 | intel_ring_advance(ring); |
||
66 | |||
67 | return 0; |
||
2332 | Serge | 68 | } |
69 | |||
70 | static int |
||
3031 | serge | 71 | gen4_render_ring_flush(struct intel_ring_buffer *ring, |
2332 | Serge | 72 | u32 invalidate_domains, |
73 | u32 flush_domains) |
||
74 | { |
||
75 | struct drm_device *dev = ring->dev; |
||
76 | u32 cmd; |
||
77 | int ret; |
||
78 | |||
79 | /* |
||
80 | * read/write caches: |
||
81 | * |
||
82 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
||
83 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
||
84 | * also flushed at 2d versus 3d pipeline switches. |
||
85 | * |
||
86 | * read-only caches: |
||
87 | * |
||
88 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
||
89 | * MI_READ_FLUSH is set, and is always flushed on 965. |
||
90 | * |
||
91 | * I915_GEM_DOMAIN_COMMAND may not exist? |
||
92 | * |
||
93 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
||
94 | * invalidated when MI_EXE_FLUSH is set. |
||
95 | * |
||
96 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
||
97 | * invalidated with every MI_FLUSH. |
||
98 | * |
||
99 | * TLBs: |
||
100 | * |
||
101 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
||
102 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
||
103 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
||
104 | * are flushed at any MI_FLUSH. |
||
105 | */ |
||
106 | |||
107 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
||
3031 | serge | 108 | if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) |
2332 | Serge | 109 | cmd &= ~MI_NO_WRITE_FLUSH; |
110 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
||
111 | cmd |= MI_EXE_FLUSH; |
||
112 | |||
113 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
||
114 | (IS_G4X(dev) || IS_GEN5(dev))) |
||
115 | cmd |= MI_INVALIDATE_ISP; |
||
116 | |||
117 | ret = intel_ring_begin(ring, 2); |
||
118 | if (ret) |
||
119 | return ret; |
||
120 | |||
121 | intel_ring_emit(ring, cmd); |
||
122 | intel_ring_emit(ring, MI_NOOP); |
||
123 | intel_ring_advance(ring); |
||
124 | |||
125 | return 0; |
||
126 | } |
||
127 | |||
2342 | Serge | 128 | /** |
129 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for |
||
130 | * implementing two workarounds on gen6. From section 1.4.7.1 |
||
131 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: |
||
132 | * |
||
133 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those |
||
134 | * produced by non-pipelined state commands), software needs to first |
||
135 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != |
||
136 | * 0. |
||
137 | * |
||
138 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable |
||
139 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. |
||
140 | * |
||
141 | * And the workaround for these two requires this workaround first: |
||
142 | * |
||
143 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent |
||
144 | * BEFORE the pipe-control with a post-sync op and no write-cache |
||
145 | * flushes. |
||
146 | * |
||
147 | * And this last workaround is tricky because of the requirements on |
||
148 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM |
||
149 | * volume 2 part 1: |
||
150 | * |
||
151 | * "1 of the following must also be set: |
||
152 | * - Render Target Cache Flush Enable ([12] of DW1) |
||
153 | * - Depth Cache Flush Enable ([0] of DW1) |
||
154 | * - Stall at Pixel Scoreboard ([1] of DW1) |
||
155 | * - Depth Stall ([13] of DW1) |
||
156 | * - Post-Sync Operation ([13] of DW1) |
||
157 | * - Notify Enable ([8] of DW1)" |
||
158 | * |
||
159 | * The cache flushes require the workaround flush that triggered this |
||
160 | * one, so we can't use it. Depth stall would trigger the same. |
||
161 | * Post-sync nonzero is what triggered this second workaround, so we |
||
162 | * can't use that one either. Notify enable is IRQs, which aren't |
||
163 | * really our business. That leaves only stall at scoreboard. |
||
164 | */ |
||
165 | static int |
||
166 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) |
||
167 | { |
||
4104 | Serge | 168 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
2342 | Serge | 169 | int ret; |
170 | |||
171 | |||
172 | ret = intel_ring_begin(ring, 6); |
||
173 | if (ret) |
||
174 | return ret; |
||
175 | |||
176 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
177 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
||
178 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
||
179 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
180 | intel_ring_emit(ring, 0); /* low dword */ |
||
181 | intel_ring_emit(ring, 0); /* high dword */ |
||
182 | intel_ring_emit(ring, MI_NOOP); |
||
183 | intel_ring_advance(ring); |
||
184 | |||
185 | ret = intel_ring_begin(ring, 6); |
||
186 | if (ret) |
||
187 | return ret; |
||
188 | |||
189 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
190 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); |
||
191 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
192 | intel_ring_emit(ring, 0); |
||
193 | intel_ring_emit(ring, 0); |
||
194 | intel_ring_emit(ring, MI_NOOP); |
||
195 | intel_ring_advance(ring); |
||
196 | |||
197 | return 0; |
||
198 | } |
||
199 | |||
200 | static int |
||
201 | gen6_render_ring_flush(struct intel_ring_buffer *ring, |
||
202 | u32 invalidate_domains, u32 flush_domains) |
||
203 | { |
||
204 | u32 flags = 0; |
||
4104 | Serge | 205 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
2342 | Serge | 206 | int ret; |
207 | |||
208 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
||
3031 | serge | 209 | ret = intel_emit_post_sync_nonzero_flush(ring); |
210 | if (ret) |
||
211 | return ret; |
||
2342 | Serge | 212 | |
213 | /* Just flush everything. Experiments have shown that reducing the |
||
214 | * number of bits based on the write domains has little performance |
||
215 | * impact. |
||
216 | */ |
||
3031 | serge | 217 | if (flush_domains) { |
218 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
||
219 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
||
220 | /* |
||
221 | * Ensure that any following seqno writes only happen |
||
222 | * when the render cache is indeed flushed. |
||
223 | */ |
||
224 | flags |= PIPE_CONTROL_CS_STALL; |
||
225 | } |
||
226 | if (invalidate_domains) { |
||
227 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
||
228 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
||
229 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
||
230 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
||
231 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
||
232 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
||
233 | /* |
||
234 | * TLB invalidate requires a post-sync write. |
||
235 | */ |
||
3243 | Serge | 236 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
3031 | serge | 237 | } |
238 | |||
239 | ret = intel_ring_begin(ring, 4); |
||
240 | if (ret) |
||
241 | return ret; |
||
242 | |||
243 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
||
244 | intel_ring_emit(ring, flags); |
||
245 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
||
246 | intel_ring_emit(ring, 0); |
||
247 | intel_ring_advance(ring); |
||
248 | |||
249 | return 0; |
||
250 | } |
||
251 | |||
252 | static int |
||
253 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) |
||
254 | { |
||
255 | int ret; |
||
256 | |||
257 | ret = intel_ring_begin(ring, 4); |
||
258 | if (ret) |
||
259 | return ret; |
||
260 | |||
261 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
||
262 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
||
263 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
||
264 | intel_ring_emit(ring, 0); |
||
265 | intel_ring_emit(ring, 0); |
||
266 | intel_ring_advance(ring); |
||
267 | |||
268 | return 0; |
||
269 | } |
||
270 | |||
4104 | Serge | 271 | static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) |
272 | { |
||
273 | int ret; |
||
274 | |||
275 | if (!ring->fbc_dirty) |
||
276 | return 0; |
||
277 | |||
278 | ret = intel_ring_begin(ring, 4); |
||
279 | if (ret) |
||
280 | return ret; |
||
281 | intel_ring_emit(ring, MI_NOOP); |
||
282 | /* WaFbcNukeOn3DBlt:ivb/hsw */ |
||
283 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
284 | intel_ring_emit(ring, MSG_FBC_REND_STATE); |
||
285 | intel_ring_emit(ring, value); |
||
286 | intel_ring_advance(ring); |
||
287 | |||
288 | ring->fbc_dirty = false; |
||
289 | return 0; |
||
290 | } |
||
291 | |||
3031 | serge | 292 | static int |
293 | gen7_render_ring_flush(struct intel_ring_buffer *ring, |
||
294 | u32 invalidate_domains, u32 flush_domains) |
||
295 | { |
||
296 | u32 flags = 0; |
||
4104 | Serge | 297 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
3031 | serge | 298 | int ret; |
299 | |||
300 | /* |
||
301 | * Ensure that any following seqno writes only happen when the render |
||
302 | * cache is indeed flushed. |
||
303 | * |
||
304 | * Workaround: 4th PIPE_CONTROL command (except the ones with only |
||
305 | * read-cache invalidate bits set) must have the CS_STALL bit set. We |
||
306 | * don't try to be clever and just set it unconditionally. |
||
307 | */ |
||
308 | flags |= PIPE_CONTROL_CS_STALL; |
||
309 | |||
310 | /* Just flush everything. Experiments have shown that reducing the |
||
311 | * number of bits based on the write domains has little performance |
||
312 | * impact. |
||
313 | */ |
||
314 | if (flush_domains) { |
||
2342 | Serge | 315 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
3031 | serge | 316 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
317 | } |
||
318 | if (invalidate_domains) { |
||
319 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
||
2342 | Serge | 320 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
321 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
||
322 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
||
323 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
||
324 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
||
3031 | serge | 325 | /* |
326 | * TLB invalidate requires a post-sync write. |
||
327 | */ |
||
328 | flags |= PIPE_CONTROL_QW_WRITE; |
||
3480 | Serge | 329 | flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
2342 | Serge | 330 | |
3031 | serge | 331 | /* Workaround: we must issue a pipe_control with CS-stall bit |
332 | * set before a pipe_control command that has the state cache |
||
333 | * invalidate bit set. */ |
||
334 | gen7_render_ring_cs_stall_wa(ring); |
||
335 | } |
||
336 | |||
337 | ret = intel_ring_begin(ring, 4); |
||
2342 | Serge | 338 | if (ret) |
339 | return ret; |
||
340 | |||
3031 | serge | 341 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
2342 | Serge | 342 | intel_ring_emit(ring, flags); |
3480 | Serge | 343 | intel_ring_emit(ring, scratch_addr); |
3031 | serge | 344 | intel_ring_emit(ring, 0); |
2342 | Serge | 345 | intel_ring_advance(ring); |
346 | |||
4104 | Serge | 347 | if (flush_domains) |
348 | return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); |
||
349 | |||
2342 | Serge | 350 | return 0; |
351 | } |
||
352 | |||
2332 | Serge | 353 | static void ring_write_tail(struct intel_ring_buffer *ring, |
354 | u32 value) |
||
355 | { |
||
356 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
357 | I915_WRITE_TAIL(ring, value); |
||
358 | } |
||
359 | |||
360 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
||
361 | { |
||
362 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
363 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
||
364 | RING_ACTHD(ring->mmio_base) : ACTHD; |
||
365 | |||
366 | return I915_READ(acthd_reg); |
||
367 | } |
||
368 | |||
4104 | Serge | 369 | static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) |
370 | { |
||
371 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
372 | u32 addr; |
||
373 | |||
374 | addr = dev_priv->status_page_dmah->busaddr; |
||
375 | if (INTEL_INFO(ring->dev)->gen >= 4) |
||
376 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
||
377 | I915_WRITE(HWS_PGA, addr); |
||
378 | } |
||
379 | |||
2332 | Serge | 380 | static int init_ring_common(struct intel_ring_buffer *ring) |
381 | { |
||
3031 | serge | 382 | struct drm_device *dev = ring->dev; |
383 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2332 | Serge | 384 | struct drm_i915_gem_object *obj = ring->obj; |
3031 | serge | 385 | int ret = 0; |
2332 | Serge | 386 | u32 head; |
387 | |||
3031 | serge | 388 | if (HAS_FORCE_WAKE(dev)) |
389 | gen6_gt_force_wake_get(dev_priv); |
||
390 | |||
4104 | Serge | 391 | if (I915_NEED_GFX_HWS(dev)) |
392 | intel_ring_setup_status_page(ring); |
||
393 | else |
||
394 | ring_setup_phys_status_page(ring); |
||
395 | |||
2332 | Serge | 396 | /* Stop the ring if it's running. */ |
397 | I915_WRITE_CTL(ring, 0); |
||
398 | I915_WRITE_HEAD(ring, 0); |
||
399 | ring->write_tail(ring, 0); |
||
400 | |||
401 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
||
402 | |||
403 | /* G45 ring initialization fails to reset head to zero */ |
||
404 | if (head != 0) { |
||
405 | DRM_DEBUG_KMS("%s head not reset to zero " |
||
406 | "ctl %08x head %08x tail %08x start %08x\n", |
||
407 | ring->name, |
||
408 | I915_READ_CTL(ring), |
||
409 | I915_READ_HEAD(ring), |
||
410 | I915_READ_TAIL(ring), |
||
411 | I915_READ_START(ring)); |
||
412 | |||
413 | I915_WRITE_HEAD(ring, 0); |
||
414 | |||
415 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
||
416 | DRM_ERROR("failed to set %s head to zero " |
||
417 | "ctl %08x head %08x tail %08x start %08x\n", |
||
418 | ring->name, |
||
419 | I915_READ_CTL(ring), |
||
420 | I915_READ_HEAD(ring), |
||
421 | I915_READ_TAIL(ring), |
||
422 | I915_READ_START(ring)); |
||
423 | } |
||
424 | } |
||
425 | |||
3031 | serge | 426 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
427 | * registers with the above sequence (the readback of the HEAD registers |
||
428 | * also enforces ordering), otherwise the hw might lose the new ring |
||
429 | * register values. */ |
||
4104 | Serge | 430 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); |
2332 | Serge | 431 | I915_WRITE_CTL(ring, |
432 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
||
3031 | serge | 433 | | RING_VALID); |
2332 | Serge | 434 | |
435 | /* If the head is still not zero, the ring is dead */ |
||
3031 | serge | 436 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
4104 | Serge | 437 | I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && |
3031 | serge | 438 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { |
2332 | Serge | 439 | DRM_ERROR("%s initialization failed " |
440 | "ctl %08x head %08x tail %08x start %08x\n", |
||
441 | ring->name, |
||
442 | I915_READ_CTL(ring), |
||
443 | I915_READ_HEAD(ring), |
||
444 | I915_READ_TAIL(ring), |
||
445 | I915_READ_START(ring)); |
||
3031 | serge | 446 | ret = -EIO; |
447 | goto out; |
||
2332 | Serge | 448 | } |
449 | |||
3031 | serge | 450 | ring->head = I915_READ_HEAD(ring); |
451 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
||
452 | ring->space = ring_space(ring); |
||
453 | ring->last_retired_head = -1; |
||
2332 | Serge | 454 | |
4104 | Serge | 455 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
456 | |||
3031 | serge | 457 | out: |
458 | if (HAS_FORCE_WAKE(dev)) |
||
459 | gen6_gt_force_wake_put(dev_priv); |
||
2332 | Serge | 460 | |
3031 | serge | 461 | return ret; |
2332 | Serge | 462 | } |
463 | |||
464 | static int |
||
465 | init_pipe_control(struct intel_ring_buffer *ring) |
||
466 | { |
||
467 | int ret; |
||
468 | |||
4104 | Serge | 469 | if (ring->scratch.obj) |
2332 | Serge | 470 | return 0; |
471 | |||
4104 | Serge | 472 | ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); |
473 | if (ring->scratch.obj == NULL) { |
||
2332 | Serge | 474 | DRM_ERROR("Failed to allocate seqno page\n"); |
475 | ret = -ENOMEM; |
||
476 | goto err; |
||
477 | } |
||
478 | |||
4104 | Serge | 479 | i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); |
2332 | Serge | 480 | |
4104 | Serge | 481 | ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); |
2332 | Serge | 482 | if (ret) |
483 | goto err_unref; |
||
484 | |||
4104 | Serge | 485 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); |
486 | ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW); |
||
487 | if (ring->scratch.cpu_page == NULL) { |
||
488 | ret = -ENOMEM; |
||
2332 | Serge | 489 | goto err_unpin; |
4104 | Serge | 490 | } |
2332 | Serge | 491 | |
3480 | Serge | 492 | DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", |
4104 | Serge | 493 | ring->name, ring->scratch.gtt_offset); |
2332 | Serge | 494 | return 0; |
495 | |||
496 | err_unpin: |
||
4104 | Serge | 497 | i915_gem_object_unpin(ring->scratch.obj); |
2332 | Serge | 498 | err_unref: |
4104 | Serge | 499 | drm_gem_object_unreference(&ring->scratch.obj->base); |
2332 | Serge | 500 | err: |
501 | return ret; |
||
502 | } |
||
503 | |||
504 | static int init_render_ring(struct intel_ring_buffer *ring) |
||
505 | { |
||
506 | struct drm_device *dev = ring->dev; |
||
507 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
508 | int ret = init_ring_common(ring); |
||
509 | |||
3243 | Serge | 510 | if (INTEL_INFO(dev)->gen > 3) |
3031 | serge | 511 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
3243 | Serge | 512 | |
513 | /* We need to disable the AsyncFlip performance optimisations in order |
||
514 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
||
515 | * programmed to '1' on all products. |
||
4104 | Serge | 516 | * |
517 | * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv |
||
3243 | Serge | 518 | */ |
519 | if (INTEL_INFO(dev)->gen >= 6) |
||
520 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
||
521 | |||
522 | /* Required for the hardware to program scanline values for waiting */ |
||
523 | if (INTEL_INFO(dev)->gen == 6) |
||
524 | I915_WRITE(GFX_MODE, |
||
525 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); |
||
526 | |||
2332 | Serge | 527 | if (IS_GEN7(dev)) |
528 | I915_WRITE(GFX_MODE_GEN7, |
||
3031 | serge | 529 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
530 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
||
2332 | Serge | 531 | |
2342 | Serge | 532 | if (INTEL_INFO(dev)->gen >= 5) { |
2339 | Serge | 533 | ret = init_pipe_control(ring); |
2332 | Serge | 534 | if (ret) |
535 | return ret; |
||
536 | } |
||
537 | |||
3031 | serge | 538 | if (IS_GEN6(dev)) { |
539 | /* From the Sandybridge PRM, volume 1 part 3, page 24: |
||
540 | * "If this bit is set, STCunit will have LRA as replacement |
||
541 | * policy. [...] This bit must be reset. LRA replacement |
||
542 | * policy is not supported." |
||
543 | */ |
||
544 | I915_WRITE(CACHE_MODE_0, |
||
545 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
||
546 | |||
547 | /* This is not explicitly set for GEN6, so read the register. |
||
548 | * see intel_ring_mi_set_context() for why we care. |
||
549 | * TODO: consider explicitly setting the bit for GEN5 |
||
550 | */ |
||
551 | ring->itlb_before_ctx_switch = |
||
552 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); |
||
2342 | Serge | 553 | } |
554 | |||
3031 | serge | 555 | if (INTEL_INFO(dev)->gen >= 6) |
556 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
||
557 | |||
558 | if (HAS_L3_GPU_CACHE(dev)) |
||
4104 | Serge | 559 | I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
3031 | serge | 560 | |
2332 | Serge | 561 | return ret; |
562 | } |
||
563 | |||
564 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
||
565 | { |
||
3480 | Serge | 566 | struct drm_device *dev = ring->dev; |
567 | |||
4104 | Serge | 568 | if (ring->scratch.obj == NULL) |
2332 | Serge | 569 | return; |
570 | |||
4104 | Serge | 571 | if (INTEL_INFO(dev)->gen >= 5) { |
572 | // kunmap(sg_page(ring->scratch.obj->pages->sgl)); |
||
573 | i915_gem_object_unpin(ring->scratch.obj); |
||
574 | } |
||
575 | |||
576 | drm_gem_object_unreference(&ring->scratch.obj->base); |
||
577 | ring->scratch.obj = NULL; |
||
2332 | Serge | 578 | } |
579 | |||
580 | static void |
||
2342 | Serge | 581 | update_mboxes(struct intel_ring_buffer *ring, |
582 | u32 mmio_offset) |
||
2332 | Serge | 583 | { |
4104 | Serge | 584 | /* NB: In order to be able to do semaphore MBOX updates for varying number |
585 | * of rings, it's easiest if we round up each individual update to a |
||
586 | * multiple of 2 (since ring updates must always be a multiple of 2) |
||
587 | * even though the actual update only requires 3 dwords. |
||
588 | */ |
||
589 | #define MBOX_UPDATE_DWORDS 4 |
||
3243 | Serge | 590 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
2342 | Serge | 591 | intel_ring_emit(ring, mmio_offset); |
3243 | Serge | 592 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
4104 | Serge | 593 | intel_ring_emit(ring, MI_NOOP); |
2332 | Serge | 594 | } |
595 | |||
2342 | Serge | 596 | /** |
597 | * gen6_add_request - Update the semaphore mailbox registers |
||
598 | * |
||
599 | * @ring - ring that is adding a request |
||
600 | * @seqno - return seqno stuck into the ring |
||
601 | * |
||
602 | * Update the mailbox registers in the *other* rings with the current seqno. |
||
603 | * This acts like a signal in the canonical semaphore. |
||
604 | */ |
||
2332 | Serge | 605 | static int |
3243 | Serge | 606 | gen6_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 607 | { |
4104 | Serge | 608 | struct drm_device *dev = ring->dev; |
609 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
610 | struct intel_ring_buffer *useless; |
||
611 | int i, ret; |
||
2332 | Serge | 612 | |
4104 | Serge | 613 | ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * |
614 | MBOX_UPDATE_DWORDS) + |
||
615 | 4); |
||
2332 | Serge | 616 | if (ret) |
617 | return ret; |
||
4104 | Serge | 618 | #undef MBOX_UPDATE_DWORDS |
2332 | Serge | 619 | |
4104 | Serge | 620 | for_each_ring(useless, dev_priv, i) { |
621 | u32 mbox_reg = ring->signal_mbox[i]; |
||
622 | if (mbox_reg != GEN6_NOSYNC) |
||
623 | update_mboxes(ring, mbox_reg); |
||
624 | } |
||
2332 | Serge | 625 | |
626 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
||
627 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
3243 | Serge | 628 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 629 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
630 | intel_ring_advance(ring); |
||
631 | |||
632 | return 0; |
||
633 | } |
||
634 | |||
3480 | Serge | 635 | static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, |
636 | u32 seqno) |
||
637 | { |
||
638 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
639 | return dev_priv->last_seqno < seqno; |
||
640 | } |
||
641 | |||
2342 | Serge | 642 | /** |
643 | * intel_ring_sync - sync the waiter to the signaller on seqno |
||
644 | * |
||
645 | * @waiter - ring that is waiting |
||
646 | * @signaller - ring which has, or will signal |
||
647 | * @seqno - seqno which the waiter will block on |
||
648 | */ |
||
649 | static int |
||
3031 | serge | 650 | gen6_ring_sync(struct intel_ring_buffer *waiter, |
2342 | Serge | 651 | struct intel_ring_buffer *signaller, |
2332 | Serge | 652 | u32 seqno) |
653 | { |
||
654 | int ret; |
||
2342 | Serge | 655 | u32 dw1 = MI_SEMAPHORE_MBOX | |
656 | MI_SEMAPHORE_COMPARE | |
||
657 | MI_SEMAPHORE_REGISTER; |
||
2332 | Serge | 658 | |
3031 | serge | 659 | /* Throughout all of the GEM code, seqno passed implies our current |
660 | * seqno is >= the last seqno executed. However for hardware the |
||
661 | * comparison is strictly greater than. |
||
662 | */ |
||
663 | seqno -= 1; |
||
664 | |||
665 | WARN_ON(signaller->semaphore_register[waiter->id] == |
||
666 | MI_SEMAPHORE_SYNC_INVALID); |
||
667 | |||
2342 | Serge | 668 | ret = intel_ring_begin(waiter, 4); |
2332 | Serge | 669 | if (ret) |
670 | return ret; |
||
671 | |||
3480 | Serge | 672 | /* If seqno wrap happened, omit the wait with no-ops */ |
673 | if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { |
||
3031 | serge | 674 | intel_ring_emit(waiter, |
3480 | Serge | 675 | dw1 | |
676 | signaller->semaphore_register[waiter->id]); |
||
2342 | Serge | 677 | intel_ring_emit(waiter, seqno); |
678 | intel_ring_emit(waiter, 0); |
||
679 | intel_ring_emit(waiter, MI_NOOP); |
||
3480 | Serge | 680 | } else { |
681 | intel_ring_emit(waiter, MI_NOOP); |
||
682 | intel_ring_emit(waiter, MI_NOOP); |
||
683 | intel_ring_emit(waiter, MI_NOOP); |
||
684 | intel_ring_emit(waiter, MI_NOOP); |
||
685 | } |
||
2342 | Serge | 686 | intel_ring_advance(waiter); |
2332 | Serge | 687 | |
688 | return 0; |
||
689 | } |
||
690 | |||
691 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
||
692 | do { \ |
||
2342 | Serge | 693 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
694 | PIPE_CONTROL_DEPTH_STALL); \ |
||
2332 | Serge | 695 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
696 | intel_ring_emit(ring__, 0); \ |
||
697 | intel_ring_emit(ring__, 0); \ |
||
698 | } while (0) |
||
699 | |||
700 | static int |
||
3243 | Serge | 701 | pc_render_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 702 | { |
4104 | Serge | 703 | u32 scratch_addr = ring->scratch.gtt_offset + 128; |
2332 | Serge | 704 | int ret; |
705 | |||
706 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
||
707 | * incoherent with writes to memory, i.e. completely fubar, |
||
708 | * so we need to use PIPE_NOTIFY instead. |
||
709 | * |
||
710 | * However, we also need to workaround the qword write |
||
711 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
||
712 | * memory before requesting an interrupt. |
||
713 | */ |
||
714 | ret = intel_ring_begin(ring, 32); |
||
715 | if (ret) |
||
716 | return ret; |
||
717 | |||
2342 | Serge | 718 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
719 | PIPE_CONTROL_WRITE_FLUSH | |
||
720 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
||
4104 | Serge | 721 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
3243 | Serge | 722 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 723 | intel_ring_emit(ring, 0); |
724 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
725 | scratch_addr += 128; /* write to separate cachelines */ |
||
726 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
727 | scratch_addr += 128; |
||
728 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
729 | scratch_addr += 128; |
||
730 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
731 | scratch_addr += 128; |
||
732 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
733 | scratch_addr += 128; |
||
734 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
3031 | serge | 735 | |
2342 | Serge | 736 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
737 | PIPE_CONTROL_WRITE_FLUSH | |
||
738 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
||
2332 | Serge | 739 | PIPE_CONTROL_NOTIFY); |
4104 | Serge | 740 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
3243 | Serge | 741 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 742 | intel_ring_emit(ring, 0); |
743 | intel_ring_advance(ring); |
||
744 | |||
745 | return 0; |
||
746 | } |
||
747 | |||
748 | static u32 |
||
3031 | serge | 749 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2342 | Serge | 750 | { |
751 | /* Workaround to force correct ordering between irq and seqno writes on |
||
752 | * ivb (and maybe also on snb) by reading from a CS register (like |
||
753 | * ACTHD) before reading the status page. */ |
||
3031 | serge | 754 | if (!lazy_coherency) |
2342 | Serge | 755 | intel_ring_get_active_head(ring); |
756 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
757 | } |
||
758 | |||
759 | static u32 |
||
3031 | serge | 760 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2332 | Serge | 761 | { |
762 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
763 | } |
||
764 | |||
3480 | Serge | 765 | static void |
766 | ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
||
767 | { |
||
768 | intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
||
769 | } |
||
770 | |||
2332 | Serge | 771 | static u32 |
3031 | serge | 772 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2332 | Serge | 773 | { |
4104 | Serge | 774 | return ring->scratch.cpu_page[0]; |
2332 | Serge | 775 | } |
776 | |||
3480 | Serge | 777 | static void |
778 | pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
||
779 | { |
||
4104 | Serge | 780 | ring->scratch.cpu_page[0] = seqno; |
3480 | Serge | 781 | } |
782 | |||
3031 | serge | 783 | static bool |
784 | gen5_ring_get_irq(struct intel_ring_buffer *ring) |
||
2332 | Serge | 785 | { |
3031 | serge | 786 | struct drm_device *dev = ring->dev; |
787 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
788 | unsigned long flags; |
||
789 | |||
790 | if (!dev->irq_enabled) |
||
791 | return false; |
||
792 | |||
793 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
4104 | Serge | 794 | if (ring->irq_refcount++ == 0) |
795 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
||
3031 | serge | 796 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
797 | |||
798 | return true; |
||
2332 | Serge | 799 | } |
800 | |||
801 | static void |
||
3031 | serge | 802 | gen5_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 803 | { |
3031 | serge | 804 | struct drm_device *dev = ring->dev; |
805 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
806 | unsigned long flags; |
||
807 | |||
808 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
4104 | Serge | 809 | if (--ring->irq_refcount == 0) |
810 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
||
3031 | serge | 811 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 812 | } |
813 | |||
3031 | serge | 814 | static bool |
815 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) |
||
2332 | Serge | 816 | { |
3031 | serge | 817 | struct drm_device *dev = ring->dev; |
818 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
819 | unsigned long flags; |
||
820 | |||
821 | if (!dev->irq_enabled) |
||
822 | return false; |
||
823 | |||
824 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
825 | if (ring->irq_refcount++ == 0) { |
||
826 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
||
2332 | Serge | 827 | I915_WRITE(IMR, dev_priv->irq_mask); |
828 | POSTING_READ(IMR); |
||
3031 | serge | 829 | } |
830 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
831 | |||
832 | return true; |
||
2332 | Serge | 833 | } |
834 | |||
835 | static void |
||
3031 | serge | 836 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 837 | { |
3031 | serge | 838 | struct drm_device *dev = ring->dev; |
839 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
840 | unsigned long flags; |
||
841 | |||
842 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
843 | if (--ring->irq_refcount == 0) { |
||
844 | dev_priv->irq_mask |= ring->irq_enable_mask; |
||
2332 | Serge | 845 | I915_WRITE(IMR, dev_priv->irq_mask); |
846 | POSTING_READ(IMR); |
||
3031 | serge | 847 | } |
848 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
2332 | Serge | 849 | } |
850 | |||
851 | static bool |
||
3031 | serge | 852 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 853 | { |
854 | struct drm_device *dev = ring->dev; |
||
855 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 856 | unsigned long flags; |
2332 | Serge | 857 | |
858 | if (!dev->irq_enabled) |
||
859 | return false; |
||
860 | |||
3031 | serge | 861 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 862 | if (ring->irq_refcount++ == 0) { |
3031 | serge | 863 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
864 | I915_WRITE16(IMR, dev_priv->irq_mask); |
||
865 | POSTING_READ16(IMR); |
||
2332 | Serge | 866 | } |
3031 | serge | 867 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 868 | |
869 | return true; |
||
870 | } |
||
871 | |||
872 | static void |
||
3031 | serge | 873 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 874 | { |
875 | struct drm_device *dev = ring->dev; |
||
876 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 877 | unsigned long flags; |
2332 | Serge | 878 | |
3031 | serge | 879 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 880 | if (--ring->irq_refcount == 0) { |
3031 | serge | 881 | dev_priv->irq_mask |= ring->irq_enable_mask; |
882 | I915_WRITE16(IMR, dev_priv->irq_mask); |
||
883 | POSTING_READ16(IMR); |
||
2332 | Serge | 884 | } |
3031 | serge | 885 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 886 | } |
887 | |||
888 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
||
889 | { |
||
890 | struct drm_device *dev = ring->dev; |
||
891 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
892 | u32 mmio = 0; |
||
893 | |||
894 | /* The ring status page addresses are no longer next to the rest of |
||
895 | * the ring registers as of gen7. |
||
896 | */ |
||
897 | if (IS_GEN7(dev)) { |
||
898 | switch (ring->id) { |
||
3031 | serge | 899 | case RCS: |
2332 | Serge | 900 | mmio = RENDER_HWS_PGA_GEN7; |
901 | break; |
||
3031 | serge | 902 | case BCS: |
2332 | Serge | 903 | mmio = BLT_HWS_PGA_GEN7; |
904 | break; |
||
3031 | serge | 905 | case VCS: |
2332 | Serge | 906 | mmio = BSD_HWS_PGA_GEN7; |
907 | break; |
||
4104 | Serge | 908 | case VECS: |
909 | mmio = VEBOX_HWS_PGA_GEN7; |
||
910 | break; |
||
2332 | Serge | 911 | } |
912 | } else if (IS_GEN6(ring->dev)) { |
||
913 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
||
914 | } else { |
||
915 | mmio = RING_HWS_PGA(ring->mmio_base); |
||
916 | } |
||
917 | |||
918 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
||
919 | POSTING_READ(mmio); |
||
3746 | Serge | 920 | |
4104 | Serge | 921 | /* Flush the TLB for this page */ |
922 | if (INTEL_INFO(dev)->gen >= 6) { |
||
923 | u32 reg = RING_INSTPM(ring->mmio_base); |
||
924 | I915_WRITE(reg, |
||
925 | _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | |
||
926 | INSTPM_SYNC_FLUSH)); |
||
927 | if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, |
||
928 | 1000)) |
||
929 | DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", |
||
930 | ring->name); |
||
931 | } |
||
2332 | Serge | 932 | } |
933 | |||
934 | static int |
||
935 | bsd_ring_flush(struct intel_ring_buffer *ring, |
||
936 | u32 invalidate_domains, |
||
937 | u32 flush_domains) |
||
938 | { |
||
939 | int ret; |
||
940 | |||
941 | ret = intel_ring_begin(ring, 2); |
||
942 | if (ret) |
||
943 | return ret; |
||
944 | |||
945 | intel_ring_emit(ring, MI_FLUSH); |
||
946 | intel_ring_emit(ring, MI_NOOP); |
||
947 | intel_ring_advance(ring); |
||
948 | return 0; |
||
949 | } |
||
950 | |||
951 | static int |
||
3243 | Serge | 952 | i9xx_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 953 | { |
954 | int ret; |
||
955 | |||
956 | ret = intel_ring_begin(ring, 4); |
||
957 | if (ret) |
||
958 | return ret; |
||
959 | |||
960 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
||
961 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
3243 | Serge | 962 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 963 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
964 | intel_ring_advance(ring); |
||
965 | |||
966 | return 0; |
||
967 | } |
||
968 | |||
969 | static bool |
||
3031 | serge | 970 | gen6_ring_get_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 971 | { |
972 | struct drm_device *dev = ring->dev; |
||
973 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 974 | unsigned long flags; |
2332 | Serge | 975 | |
976 | if (!dev->irq_enabled) |
||
977 | return false; |
||
978 | |||
2342 | Serge | 979 | /* It looks like we need to prevent the gt from suspending while waiting |
980 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
||
981 | * blt/bsd rings on ivb. */ |
||
982 | gen6_gt_force_wake_get(dev_priv); |
||
983 | |||
3031 | serge | 984 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 985 | if (ring->irq_refcount++ == 0) { |
3031 | serge | 986 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
4104 | Serge | 987 | I915_WRITE_IMR(ring, |
988 | ~(ring->irq_enable_mask | |
||
989 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); |
||
3031 | serge | 990 | else |
991 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
||
4104 | Serge | 992 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
2332 | Serge | 993 | } |
3031 | serge | 994 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 995 | |
2351 | Serge | 996 | return true; |
2332 | Serge | 997 | } |
998 | |||
999 | static void |
||
3031 | serge | 1000 | gen6_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 1001 | { |
1002 | struct drm_device *dev = ring->dev; |
||
1003 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 1004 | unsigned long flags; |
2332 | Serge | 1005 | |
3031 | serge | 1006 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 1007 | if (--ring->irq_refcount == 0) { |
3031 | serge | 1008 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
4104 | Serge | 1009 | I915_WRITE_IMR(ring, |
1010 | ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); |
||
3031 | serge | 1011 | else |
1012 | I915_WRITE_IMR(ring, ~0); |
||
4104 | Serge | 1013 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
2332 | Serge | 1014 | } |
3031 | serge | 1015 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2342 | Serge | 1016 | |
1017 | gen6_gt_force_wake_put(dev_priv); |
||
2332 | Serge | 1018 | } |
1019 | |||
4104 | Serge | 1020 | static bool |
1021 | hsw_vebox_get_irq(struct intel_ring_buffer *ring) |
||
1022 | { |
||
1023 | struct drm_device *dev = ring->dev; |
||
1024 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1025 | unsigned long flags; |
||
1026 | |||
1027 | if (!dev->irq_enabled) |
||
1028 | return false; |
||
1029 | |||
1030 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
1031 | if (ring->irq_refcount++ == 0) { |
||
1032 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
||
1033 | snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); |
||
1034 | } |
||
1035 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
1036 | |||
1037 | return true; |
||
1038 | } |
||
1039 | |||
1040 | static void |
||
1041 | hsw_vebox_put_irq(struct intel_ring_buffer *ring) |
||
1042 | { |
||
1043 | struct drm_device *dev = ring->dev; |
||
1044 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1045 | unsigned long flags; |
||
1046 | |||
1047 | if (!dev->irq_enabled) |
||
1048 | return; |
||
1049 | |||
1050 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
1051 | if (--ring->irq_refcount == 0) { |
||
1052 | I915_WRITE_IMR(ring, ~0); |
||
1053 | snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); |
||
1054 | } |
||
1055 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
1056 | } |
||
1057 | |||
2332 | Serge | 1058 | static int |
3243 | Serge | 1059 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1060 | u32 offset, u32 length, |
||
1061 | unsigned flags) |
||
2332 | Serge | 1062 | { |
1063 | int ret; |
||
1064 | |||
1065 | ret = intel_ring_begin(ring, 2); |
||
1066 | if (ret) |
||
1067 | return ret; |
||
1068 | |||
1069 | intel_ring_emit(ring, |
||
3031 | serge | 1070 | MI_BATCH_BUFFER_START | |
1071 | MI_BATCH_GTT | |
||
3243 | Serge | 1072 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
2332 | Serge | 1073 | intel_ring_emit(ring, offset); |
1074 | intel_ring_advance(ring); |
||
1075 | |||
1076 | return 0; |
||
1077 | } |
||
1078 | |||
3243 | Serge | 1079 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
1080 | #define I830_BATCH_LIMIT (256*1024) |
||
2332 | Serge | 1081 | static int |
3031 | serge | 1082 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
3243 | Serge | 1083 | u32 offset, u32 len, |
1084 | unsigned flags) |
||
2332 | Serge | 1085 | { |
1086 | int ret; |
||
1087 | |||
3243 | Serge | 1088 | if (flags & I915_DISPATCH_PINNED) { |
2332 | Serge | 1089 | ret = intel_ring_begin(ring, 4); |
1090 | if (ret) |
||
1091 | return ret; |
||
1092 | |||
1093 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
||
3243 | Serge | 1094 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
2332 | Serge | 1095 | intel_ring_emit(ring, offset + len - 8); |
3243 | Serge | 1096 | intel_ring_emit(ring, MI_NOOP); |
1097 | intel_ring_advance(ring); |
||
1098 | } else { |
||
4104 | Serge | 1099 | u32 cs_offset = ring->scratch.gtt_offset; |
3243 | Serge | 1100 | |
1101 | if (len > I830_BATCH_LIMIT) |
||
1102 | return -ENOSPC; |
||
1103 | |||
1104 | ret = intel_ring_begin(ring, 9+3); |
||
1105 | if (ret) |
||
1106 | return ret; |
||
1107 | /* Blit the batch (which has now all relocs applied) to the stable batch |
||
1108 | * scratch bo area (so that the CS never stumbles over its tlb |
||
1109 | * invalidation bug) ... */ |
||
1110 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | |
||
1111 | XY_SRC_COPY_BLT_WRITE_ALPHA | |
||
1112 | XY_SRC_COPY_BLT_WRITE_RGB); |
||
1113 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); |
||
2332 | Serge | 1114 | intel_ring_emit(ring, 0); |
3243 | Serge | 1115 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); |
1116 | intel_ring_emit(ring, cs_offset); |
||
1117 | intel_ring_emit(ring, 0); |
||
1118 | intel_ring_emit(ring, 4096); |
||
1119 | intel_ring_emit(ring, offset); |
||
1120 | intel_ring_emit(ring, MI_FLUSH); |
||
1121 | |||
1122 | /* ... and execute it. */ |
||
1123 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
||
1124 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
||
1125 | intel_ring_emit(ring, cs_offset + len - 8); |
||
3031 | serge | 1126 | intel_ring_advance(ring); |
3243 | Serge | 1127 | } |
3031 | serge | 1128 | |
1129 | return 0; |
||
1130 | } |
||
1131 | |||
1132 | static int |
||
1133 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
||
3243 | Serge | 1134 | u32 offset, u32 len, |
1135 | unsigned flags) |
||
3031 | serge | 1136 | { |
1137 | int ret; |
||
1138 | |||
2332 | Serge | 1139 | ret = intel_ring_begin(ring, 2); |
1140 | if (ret) |
||
1141 | return ret; |
||
1142 | |||
3031 | serge | 1143 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
3243 | Serge | 1144 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
2332 | Serge | 1145 | intel_ring_advance(ring); |
1146 | |||
1147 | return 0; |
||
1148 | } |
||
1149 | |||
1150 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
||
1151 | { |
||
1152 | struct drm_i915_gem_object *obj; |
||
1153 | |||
1154 | obj = ring->status_page.obj; |
||
1155 | if (obj == NULL) |
||
1156 | return; |
||
1157 | |||
3031 | serge | 1158 | // kunmap(sg_page(obj->pages->sgl)); |
2344 | Serge | 1159 | i915_gem_object_unpin(obj); |
1160 | drm_gem_object_unreference(&obj->base); |
||
2332 | Serge | 1161 | ring->status_page.obj = NULL; |
1162 | } |
||
1163 | |||
1164 | static int init_status_page(struct intel_ring_buffer *ring) |
||
1165 | { |
||
1166 | struct drm_device *dev = ring->dev; |
||
1167 | struct drm_i915_gem_object *obj; |
||
1168 | int ret; |
||
1169 | |||
1170 | obj = i915_gem_alloc_object(dev, 4096); |
||
1171 | if (obj == NULL) { |
||
1172 | DRM_ERROR("Failed to allocate status page\n"); |
||
1173 | ret = -ENOMEM; |
||
1174 | goto err; |
||
1175 | } |
||
1176 | |||
2352 | Serge | 1177 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
2332 | Serge | 1178 | |
4104 | Serge | 1179 | ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); |
2332 | Serge | 1180 | if (ret != 0) { |
1181 | goto err_unref; |
||
1182 | } |
||
1183 | |||
4104 | Serge | 1184 | ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); |
3243 | Serge | 1185 | ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW); |
2332 | Serge | 1186 | if (ring->status_page.page_addr == NULL) { |
3031 | serge | 1187 | ret = -ENOMEM; |
2332 | Serge | 1188 | goto err_unpin; |
1189 | } |
||
1190 | ring->status_page.obj = obj; |
||
1191 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
1192 | |||
1193 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
||
1194 | ring->name, ring->status_page.gfx_addr); |
||
1195 | |||
1196 | return 0; |
||
1197 | |||
1198 | err_unpin: |
||
2344 | Serge | 1199 | i915_gem_object_unpin(obj); |
2332 | Serge | 1200 | err_unref: |
2344 | Serge | 1201 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1202 | err: |
1203 | return ret; |
||
1204 | } |
||
1205 | |||
4104 | Serge | 1206 | static int init_phys_status_page(struct intel_ring_buffer *ring) |
3243 | Serge | 1207 | { |
1208 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
1209 | |||
1210 | if (!dev_priv->status_page_dmah) { |
||
1211 | dev_priv->status_page_dmah = |
||
1212 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); |
||
1213 | if (!dev_priv->status_page_dmah) |
||
1214 | return -ENOMEM; |
||
1215 | } |
||
1216 | |||
1217 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
||
1218 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
1219 | |||
1220 | return 0; |
||
1221 | } |
||
1222 | |||
3031 | serge | 1223 | static int intel_init_ring_buffer(struct drm_device *dev, |
2332 | Serge | 1224 | struct intel_ring_buffer *ring) |
1225 | { |
||
2340 | Serge | 1226 | struct drm_i915_gem_object *obj; |
3031 | serge | 1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
2332 | Serge | 1228 | int ret; |
2340 | Serge | 1229 | |
2332 | Serge | 1230 | ring->dev = dev; |
1231 | INIT_LIST_HEAD(&ring->active_list); |
||
1232 | INIT_LIST_HEAD(&ring->request_list); |
||
3031 | serge | 1233 | ring->size = 32 * PAGE_SIZE; |
3243 | Serge | 1234 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); |
2332 | Serge | 1235 | |
2352 | Serge | 1236 | init_waitqueue_head(&ring->irq_queue); |
2332 | Serge | 1237 | |
1238 | if (I915_NEED_GFX_HWS(dev)) { |
||
2340 | Serge | 1239 | ret = init_status_page(ring); |
1240 | if (ret) |
||
1241 | return ret; |
||
3243 | Serge | 1242 | } else { |
1243 | BUG_ON(ring->id != RCS); |
||
4104 | Serge | 1244 | ret = init_phys_status_page(ring); |
3243 | Serge | 1245 | if (ret) |
1246 | return ret; |
||
2332 | Serge | 1247 | } |
1248 | |||
3480 | Serge | 1249 | obj = NULL; |
4371 | Serge | 1250 | if (!HAS_LLC(dev)) |
1251 | obj = i915_gem_object_create_stolen(dev, ring->size); |
||
3480 | Serge | 1252 | if (obj == NULL) |
2332 | Serge | 1253 | obj = i915_gem_alloc_object(dev, ring->size); |
1254 | if (obj == NULL) { |
||
1255 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
||
1256 | ret = -ENOMEM; |
||
1257 | goto err_hws; |
||
1258 | } |
||
1259 | |||
1260 | ring->obj = obj; |
||
1261 | |||
4104 | Serge | 1262 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); |
2332 | Serge | 1263 | if (ret) |
1264 | goto err_unref; |
||
1265 | |||
3031 | serge | 1266 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
1267 | if (ret) |
||
1268 | goto err_unpin; |
||
2332 | Serge | 1269 | |
3031 | serge | 1270 | ring->virtual_start = |
4104 | Serge | 1271 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
3031 | serge | 1272 | ring->size); |
1273 | if (ring->virtual_start == NULL) { |
||
2332 | Serge | 1274 | DRM_ERROR("Failed to map ringbuffer.\n"); |
1275 | ret = -EINVAL; |
||
1276 | goto err_unpin; |
||
1277 | } |
||
1278 | |||
1279 | ret = ring->init(ring); |
||
1280 | if (ret) |
||
1281 | goto err_unmap; |
||
1282 | |||
1283 | /* Workaround an erratum on the i830 which causes a hang if |
||
1284 | * the TAIL pointer points to within the last 2 cachelines |
||
1285 | * of the buffer. |
||
1286 | */ |
||
1287 | ring->effective_size = ring->size; |
||
3031 | serge | 1288 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
2332 | Serge | 1289 | ring->effective_size -= 128; |
2340 | Serge | 1290 | |
2332 | Serge | 1291 | return 0; |
1292 | |||
1293 | err_unmap: |
||
3480 | Serge | 1294 | iounmap(ring->virtual_start); |
2332 | Serge | 1295 | err_unpin: |
2344 | Serge | 1296 | i915_gem_object_unpin(obj); |
2332 | Serge | 1297 | err_unref: |
2344 | Serge | 1298 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1299 | ring->obj = NULL; |
1300 | err_hws: |
||
1301 | // cleanup_status_page(ring); |
||
1302 | return ret; |
||
1303 | } |
||
1304 | |||
1305 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
||
1306 | { |
||
1307 | struct drm_i915_private *dev_priv; |
||
1308 | int ret; |
||
1309 | |||
1310 | if (ring->obj == NULL) |
||
1311 | return; |
||
1312 | |||
1313 | /* Disable the ring buffer. The ring must be idle at this point */ |
||
1314 | dev_priv = ring->dev->dev_private; |
||
3243 | Serge | 1315 | ret = intel_ring_idle(ring); |
2332 | Serge | 1316 | if (ret) |
1317 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
||
1318 | ring->name, ret); |
||
1319 | |||
1320 | I915_WRITE_CTL(ring, 0); |
||
1321 | |||
3480 | Serge | 1322 | iounmap(ring->virtual_start); |
2332 | Serge | 1323 | |
2344 | Serge | 1324 | i915_gem_object_unpin(ring->obj); |
1325 | drm_gem_object_unreference(&ring->obj->base); |
||
2332 | Serge | 1326 | ring->obj = NULL; |
1327 | |||
1328 | if (ring->cleanup) |
||
1329 | ring->cleanup(ring); |
||
1330 | |||
1331 | // cleanup_status_page(ring); |
||
1332 | } |
||
1333 | |||
3031 | serge | 1334 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
2332 | Serge | 1335 | { |
3031 | serge | 1336 | int ret; |
2332 | Serge | 1337 | |
3031 | serge | 1338 | ret = i915_wait_seqno(ring, seqno); |
1339 | if (!ret) |
||
1340 | i915_gem_retire_requests_ring(ring); |
||
1341 | |||
1342 | return ret; |
||
1343 | } |
||
1344 | |||
1345 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) |
||
1346 | { |
||
1347 | struct drm_i915_gem_request *request; |
||
1348 | u32 seqno = 0; |
||
1349 | int ret; |
||
1350 | |||
1351 | i915_gem_retire_requests_ring(ring); |
||
1352 | |||
1353 | if (ring->last_retired_head != -1) { |
||
1354 | ring->head = ring->last_retired_head; |
||
1355 | ring->last_retired_head = -1; |
||
2332 | Serge | 1356 | ring->space = ring_space(ring); |
1357 | if (ring->space >= n) |
||
1358 | return 0; |
||
1359 | } |
||
1360 | |||
3031 | serge | 1361 | list_for_each_entry(request, &ring->request_list, list) { |
1362 | int space; |
||
2344 | Serge | 1363 | |
3031 | serge | 1364 | if (request->tail == -1) |
1365 | continue; |
||
1366 | |||
3243 | Serge | 1367 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); |
3031 | serge | 1368 | if (space < 0) |
1369 | space += ring->size; |
||
1370 | if (space >= n) { |
||
1371 | seqno = request->seqno; |
||
1372 | break; |
||
1373 | } |
||
1374 | |||
1375 | /* Consume this request in case we need more space than |
||
1376 | * is available and so need to prevent a race between |
||
1377 | * updating last_retired_head and direct reads of |
||
1378 | * I915_RING_HEAD. It also provides a nice sanity check. |
||
1379 | */ |
||
1380 | request->tail = -1; |
||
1381 | } |
||
1382 | |||
1383 | if (seqno == 0) |
||
1384 | return -ENOSPC; |
||
1385 | |||
1386 | ret = intel_ring_wait_seqno(ring, seqno); |
||
1387 | if (ret) |
||
1388 | return ret; |
||
1389 | |||
1390 | if (WARN_ON(ring->last_retired_head == -1)) |
||
1391 | return -ENOSPC; |
||
1392 | |||
1393 | ring->head = ring->last_retired_head; |
||
1394 | ring->last_retired_head = -1; |
||
1395 | ring->space = ring_space(ring); |
||
1396 | if (WARN_ON(ring->space < n)) |
||
1397 | return -ENOSPC; |
||
1398 | |||
1399 | return 0; |
||
1400 | } |
||
1401 | |||
3243 | Serge | 1402 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) |
3031 | serge | 1403 | { |
1404 | struct drm_device *dev = ring->dev; |
||
1405 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1406 | unsigned long end; |
||
1407 | int ret; |
||
1408 | |||
1409 | ret = intel_ring_wait_request(ring, n); |
||
1410 | if (ret != -ENOSPC) |
||
1411 | return ret; |
||
1412 | |||
3243 | Serge | 1413 | trace_i915_ring_wait_begin(ring); |
3031 | serge | 1414 | /* With GEM the hangcheck timer should kick us out of the loop, |
1415 | * leaving it early runs the risk of corrupting GEM state (due |
||
1416 | * to running on almost untested codepaths). But on resume |
||
1417 | * timers don't work yet, so prevent a complete hang in that |
||
1418 | * case by choosing an insanely large timeout. */ |
||
1419 | end = GetTimerTicks() + 60 * HZ; |
||
1420 | |||
2332 | Serge | 1421 | do { |
1422 | ring->head = I915_READ_HEAD(ring); |
||
1423 | ring->space = ring_space(ring); |
||
1424 | if (ring->space >= n) { |
||
2351 | Serge | 1425 | trace_i915_ring_wait_end(ring); |
2332 | Serge | 1426 | return 0; |
1427 | } |
||
1428 | |||
1429 | msleep(1); |
||
3031 | serge | 1430 | |
3480 | Serge | 1431 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1432 | dev_priv->mm.interruptible); |
||
3031 | serge | 1433 | if (ret) |
1434 | return ret; |
||
1435 | } while (!time_after(GetTimerTicks(), end)); |
||
2351 | Serge | 1436 | trace_i915_ring_wait_end(ring); |
2332 | Serge | 1437 | return -EBUSY; |
1438 | } |
||
1439 | |||
3243 | Serge | 1440 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
1441 | { |
||
1442 | uint32_t __iomem *virt; |
||
1443 | int rem = ring->size - ring->tail; |
||
1444 | |||
1445 | if (ring->space < rem) { |
||
1446 | int ret = ring_wait_for_space(ring, rem); |
||
1447 | if (ret) |
||
1448 | return ret; |
||
1449 | } |
||
1450 | |||
1451 | virt = ring->virtual_start + ring->tail; |
||
1452 | rem /= 4; |
||
1453 | while (rem--) |
||
1454 | iowrite32(MI_NOOP, virt++); |
||
1455 | |||
1456 | ring->tail = 0; |
||
1457 | ring->space = ring_space(ring); |
||
1458 | |||
1459 | return 0; |
||
1460 | } |
||
1461 | |||
1462 | int intel_ring_idle(struct intel_ring_buffer *ring) |
||
1463 | { |
||
1464 | u32 seqno; |
||
1465 | int ret; |
||
1466 | |||
1467 | /* We need to add any requests required to flush the objects and ring */ |
||
1468 | if (ring->outstanding_lazy_request) { |
||
4104 | Serge | 1469 | ret = i915_add_request(ring, NULL); |
3243 | Serge | 1470 | if (ret) |
1471 | return ret; |
||
1472 | } |
||
1473 | |||
1474 | /* Wait upon the last request to be completed */ |
||
1475 | if (list_empty(&ring->request_list)) |
||
1476 | return 0; |
||
1477 | |||
1478 | seqno = list_entry(ring->request_list.prev, |
||
1479 | struct drm_i915_gem_request, |
||
1480 | list)->seqno; |
||
1481 | |||
1482 | return i915_wait_seqno(ring, seqno); |
||
1483 | } |
||
1484 | |||
1485 | static int |
||
1486 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) |
||
1487 | { |
||
1488 | if (ring->outstanding_lazy_request) |
||
1489 | return 0; |
||
1490 | |||
1491 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); |
||
1492 | } |
||
1493 | |||
3480 | Serge | 1494 | static int __intel_ring_begin(struct intel_ring_buffer *ring, |
1495 | int bytes) |
||
1496 | { |
||
1497 | int ret; |
||
1498 | |||
1499 | if (unlikely(ring->tail + bytes > ring->effective_size)) { |
||
1500 | ret = intel_wrap_ring_buffer(ring); |
||
1501 | if (unlikely(ret)) |
||
1502 | return ret; |
||
1503 | } |
||
1504 | |||
1505 | if (unlikely(ring->space < bytes)) { |
||
1506 | ret = ring_wait_for_space(ring, bytes); |
||
1507 | if (unlikely(ret)) |
||
1508 | return ret; |
||
1509 | } |
||
1510 | |||
1511 | ring->space -= bytes; |
||
1512 | return 0; |
||
1513 | } |
||
1514 | |||
2332 | Serge | 1515 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1516 | int num_dwords) |
||
1517 | { |
||
3031 | serge | 1518 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
2332 | Serge | 1519 | int ret; |
1520 | |||
3480 | Serge | 1521 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
1522 | dev_priv->mm.interruptible); |
||
3031 | serge | 1523 | if (ret) |
1524 | return ret; |
||
2332 | Serge | 1525 | |
3243 | Serge | 1526 | /* Preallocate the olr before touching the ring */ |
1527 | ret = intel_ring_alloc_seqno(ring); |
||
1528 | if (ret) |
||
1529 | return ret; |
||
1530 | |||
3480 | Serge | 1531 | return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); |
1532 | } |
||
2332 | Serge | 1533 | |
3480 | Serge | 1534 | void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
1535 | { |
||
1536 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
1537 | |||
1538 | BUG_ON(ring->outstanding_lazy_request); |
||
1539 | |||
1540 | if (INTEL_INFO(ring->dev)->gen >= 6) { |
||
1541 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
||
1542 | I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); |
||
4104 | Serge | 1543 | if (HAS_VEBOX(ring->dev)) |
1544 | I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); |
||
2332 | Serge | 1545 | } |
1546 | |||
3480 | Serge | 1547 | ring->set_seqno(ring, seqno); |
4104 | Serge | 1548 | ring->hangcheck.seqno = seqno; |
2332 | Serge | 1549 | } |
1550 | |||
1551 | void intel_ring_advance(struct intel_ring_buffer *ring) |
||
1552 | { |
||
3031 | serge | 1553 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1554 | |||
2332 | Serge | 1555 | ring->tail &= ring->size - 1; |
3480 | Serge | 1556 | if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) |
3031 | serge | 1557 | return; |
2332 | Serge | 1558 | ring->write_tail(ring, ring->tail); |
1559 | } |
||
1560 | |||
1561 | |||
1562 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
||
1563 | u32 value) |
||
1564 | { |
||
1565 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
1566 | |||
1567 | /* Every tail move must follow the sequence below */ |
||
3031 | serge | 1568 | |
1569 | /* Disable notification that the ring is IDLE. The GT |
||
1570 | * will then assume that it is busy and bring it out of rc6. |
||
1571 | */ |
||
2332 | Serge | 1572 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
3031 | serge | 1573 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
2332 | Serge | 1574 | |
3031 | serge | 1575 | /* Clear the context id. Here be magic! */ |
1576 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); |
||
1577 | |||
1578 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ |
||
2332 | Serge | 1579 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
3031 | serge | 1580 | GEN6_BSD_SLEEP_INDICATOR) == 0, |
2332 | Serge | 1581 | 50)) |
3031 | serge | 1582 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); |
2332 | Serge | 1583 | |
3031 | serge | 1584 | /* Now that the ring is fully powered up, update the tail */ |
2332 | Serge | 1585 | I915_WRITE_TAIL(ring, value); |
3031 | serge | 1586 | POSTING_READ(RING_TAIL(ring->mmio_base)); |
1587 | |||
1588 | /* Let the ring send IDLE messages to the GT again, |
||
1589 | * and so let it sleep to conserve power when idle. |
||
1590 | */ |
||
2332 | Serge | 1591 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
3031 | serge | 1592 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
2332 | Serge | 1593 | } |
1594 | |||
4104 | Serge | 1595 | static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, |
2332 | Serge | 1596 | u32 invalidate, u32 flush) |
1597 | { |
||
1598 | uint32_t cmd; |
||
1599 | int ret; |
||
1600 | |||
1601 | ret = intel_ring_begin(ring, 4); |
||
1602 | if (ret) |
||
1603 | return ret; |
||
1604 | |||
1605 | cmd = MI_FLUSH_DW; |
||
3243 | Serge | 1606 | /* |
1607 | * Bspec vol 1c.5 - video engine command streamer: |
||
1608 | * "If ENABLED, all TLBs will be invalidated once the flush |
||
1609 | * operation is complete. This bit is only valid when the |
||
1610 | * Post-Sync Operation field is a value of 1h or 3h." |
||
1611 | */ |
||
2332 | Serge | 1612 | if (invalidate & I915_GEM_GPU_DOMAINS) |
3243 | Serge | 1613 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
1614 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
||
2332 | Serge | 1615 | intel_ring_emit(ring, cmd); |
3243 | Serge | 1616 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
2332 | Serge | 1617 | intel_ring_emit(ring, 0); |
1618 | intel_ring_emit(ring, MI_NOOP); |
||
1619 | intel_ring_advance(ring); |
||
1620 | return 0; |
||
1621 | } |
||
1622 | |||
1623 | static int |
||
3243 | Serge | 1624 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1625 | u32 offset, u32 len, |
||
1626 | unsigned flags) |
||
1627 | { |
||
1628 | int ret; |
||
1629 | |||
1630 | ret = intel_ring_begin(ring, 2); |
||
1631 | if (ret) |
||
1632 | return ret; |
||
1633 | |||
1634 | intel_ring_emit(ring, |
||
1635 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | |
||
1636 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); |
||
1637 | /* bit0-7 is the length on GEN6+ */ |
||
1638 | intel_ring_emit(ring, offset); |
||
1639 | intel_ring_advance(ring); |
||
1640 | |||
1641 | return 0; |
||
1642 | } |
||
1643 | |||
1644 | static int |
||
2332 | Serge | 1645 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
3243 | Serge | 1646 | u32 offset, u32 len, |
1647 | unsigned flags) |
||
2332 | Serge | 1648 | { |
1649 | int ret; |
||
1650 | |||
1651 | ret = intel_ring_begin(ring, 2); |
||
1652 | if (ret) |
||
1653 | return ret; |
||
1654 | |||
3243 | Serge | 1655 | intel_ring_emit(ring, |
1656 | MI_BATCH_BUFFER_START | |
||
1657 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
||
2332 | Serge | 1658 | /* bit0-7 is the length on GEN6+ */ |
1659 | intel_ring_emit(ring, offset); |
||
1660 | intel_ring_advance(ring); |
||
1661 | |||
1662 | return 0; |
||
1663 | } |
||
1664 | |||
1665 | /* Blitter support (SandyBridge+) */ |
||
1666 | |||
4104 | Serge | 1667 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
2332 | Serge | 1668 | u32 invalidate, u32 flush) |
1669 | { |
||
4104 | Serge | 1670 | struct drm_device *dev = ring->dev; |
2332 | Serge | 1671 | uint32_t cmd; |
1672 | int ret; |
||
1673 | |||
3031 | serge | 1674 | ret = intel_ring_begin(ring, 4); |
2332 | Serge | 1675 | if (ret) |
1676 | return ret; |
||
1677 | |||
1678 | cmd = MI_FLUSH_DW; |
||
3243 | Serge | 1679 | /* |
1680 | * Bspec vol 1c.3 - blitter engine command streamer: |
||
1681 | * "If ENABLED, all TLBs will be invalidated once the flush |
||
1682 | * operation is complete. This bit is only valid when the |
||
1683 | * Post-Sync Operation field is a value of 1h or 3h." |
||
1684 | */ |
||
2332 | Serge | 1685 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
3243 | Serge | 1686 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
1687 | MI_FLUSH_DW_OP_STOREDW; |
||
2332 | Serge | 1688 | intel_ring_emit(ring, cmd); |
3243 | Serge | 1689 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
2332 | Serge | 1690 | intel_ring_emit(ring, 0); |
1691 | intel_ring_emit(ring, MI_NOOP); |
||
1692 | intel_ring_advance(ring); |
||
4104 | Serge | 1693 | |
1694 | if (IS_GEN7(dev) && flush) |
||
1695 | return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); |
||
1696 | |||
2332 | Serge | 1697 | return 0; |
1698 | } |
||
1699 | |||
1700 | int intel_init_render_ring_buffer(struct drm_device *dev) |
||
1701 | { |
||
1702 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1703 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
||
2340 | Serge | 1704 | |
3031 | serge | 1705 | ring->name = "render ring"; |
1706 | ring->id = RCS; |
||
1707 | ring->mmio_base = RENDER_RING_BASE; |
||
1708 | |||
2332 | Serge | 1709 | if (INTEL_INFO(dev)->gen >= 6) { |
2339 | Serge | 1710 | ring->add_request = gen6_add_request; |
3031 | serge | 1711 | ring->flush = gen7_render_ring_flush; |
1712 | if (INTEL_INFO(dev)->gen == 6) |
||
2342 | Serge | 1713 | ring->flush = gen6_render_ring_flush; |
3031 | serge | 1714 | ring->irq_get = gen6_ring_get_irq; |
1715 | ring->irq_put = gen6_ring_put_irq; |
||
4104 | Serge | 1716 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; |
2342 | Serge | 1717 | ring->get_seqno = gen6_ring_get_seqno; |
3480 | Serge | 1718 | ring->set_seqno = ring_set_seqno; |
3031 | serge | 1719 | ring->sync_to = gen6_ring_sync; |
4104 | Serge | 1720 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; |
1721 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; |
||
1722 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; |
||
1723 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; |
||
1724 | ring->signal_mbox[RCS] = GEN6_NOSYNC; |
||
1725 | ring->signal_mbox[VCS] = GEN6_VRSYNC; |
||
1726 | ring->signal_mbox[BCS] = GEN6_BRSYNC; |
||
1727 | ring->signal_mbox[VECS] = GEN6_VERSYNC; |
||
2332 | Serge | 1728 | } else if (IS_GEN5(dev)) { |
2339 | Serge | 1729 | ring->add_request = pc_render_add_request; |
3031 | serge | 1730 | ring->flush = gen4_render_ring_flush; |
2342 | Serge | 1731 | ring->get_seqno = pc_render_get_seqno; |
3480 | Serge | 1732 | ring->set_seqno = pc_render_set_seqno; |
3031 | serge | 1733 | ring->irq_get = gen5_ring_get_irq; |
1734 | ring->irq_put = gen5_ring_put_irq; |
||
4104 | Serge | 1735 | ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | |
1736 | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; |
||
3031 | serge | 1737 | } else { |
1738 | ring->add_request = i9xx_add_request; |
||
1739 | if (INTEL_INFO(dev)->gen < 4) |
||
1740 | ring->flush = gen2_render_ring_flush; |
||
1741 | else |
||
1742 | ring->flush = gen4_render_ring_flush; |
||
1743 | ring->get_seqno = ring_get_seqno; |
||
3480 | Serge | 1744 | ring->set_seqno = ring_set_seqno; |
3031 | serge | 1745 | if (IS_GEN2(dev)) { |
1746 | ring->irq_get = i8xx_ring_get_irq; |
||
1747 | ring->irq_put = i8xx_ring_put_irq; |
||
1748 | } else { |
||
1749 | ring->irq_get = i9xx_ring_get_irq; |
||
1750 | ring->irq_put = i9xx_ring_put_irq; |
||
1751 | } |
||
1752 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
||
2332 | Serge | 1753 | } |
3031 | serge | 1754 | ring->write_tail = ring_write_tail; |
3243 | Serge | 1755 | if (IS_HASWELL(dev)) |
1756 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
||
1757 | else if (INTEL_INFO(dev)->gen >= 6) |
||
3031 | serge | 1758 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1759 | else if (INTEL_INFO(dev)->gen >= 4) |
||
1760 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1761 | else if (IS_I830(dev) || IS_845G(dev)) |
||
1762 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
||
1763 | else |
||
1764 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
||
1765 | ring->init = init_render_ring; |
||
1766 | ring->cleanup = render_ring_cleanup; |
||
2332 | Serge | 1767 | |
3243 | Serge | 1768 | /* Workaround batchbuffer to combat CS tlb bug. */ |
1769 | if (HAS_BROKEN_CS_TLB(dev)) { |
||
1770 | struct drm_i915_gem_object *obj; |
||
1771 | int ret; |
||
3031 | serge | 1772 | |
3243 | Serge | 1773 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); |
1774 | if (obj == NULL) { |
||
1775 | DRM_ERROR("Failed to allocate batch bo\n"); |
||
1776 | return -ENOMEM; |
||
1777 | } |
||
1778 | |||
4104 | Serge | 1779 | ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); |
3243 | Serge | 1780 | if (ret != 0) { |
1781 | drm_gem_object_unreference(&obj->base); |
||
1782 | DRM_ERROR("Failed to ping batch bo\n"); |
||
1783 | return ret; |
||
1784 | } |
||
1785 | |||
4104 | Serge | 1786 | ring->scratch.obj = obj; |
1787 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); |
||
2332 | Serge | 1788 | } |
2340 | Serge | 1789 | |
2332 | Serge | 1790 | return intel_init_ring_buffer(dev, ring); |
1791 | } |
||
1792 | |||
3243 | Serge | 1793 | #if 0 |
1794 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
||
1795 | { |
||
1796 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1797 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
||
1798 | int ret; |
||
2332 | Serge | 1799 | |
3243 | Serge | 1800 | ring->name = "render ring"; |
1801 | ring->id = RCS; |
||
1802 | ring->mmio_base = RENDER_RING_BASE; |
||
1803 | |||
1804 | if (INTEL_INFO(dev)->gen >= 6) { |
||
1805 | /* non-kms not supported on gen6+ */ |
||
1806 | return -ENODEV; |
||
1807 | } |
||
1808 | |||
1809 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding |
||
1810 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up |
||
1811 | * the special gen5 functions. */ |
||
1812 | ring->add_request = i9xx_add_request; |
||
1813 | if (INTEL_INFO(dev)->gen < 4) |
||
1814 | ring->flush = gen2_render_ring_flush; |
||
1815 | else |
||
1816 | ring->flush = gen4_render_ring_flush; |
||
1817 | ring->get_seqno = ring_get_seqno; |
||
3480 | Serge | 1818 | ring->set_seqno = ring_set_seqno; |
3243 | Serge | 1819 | if (IS_GEN2(dev)) { |
1820 | ring->irq_get = i8xx_ring_get_irq; |
||
1821 | ring->irq_put = i8xx_ring_put_irq; |
||
1822 | } else { |
||
1823 | ring->irq_get = i9xx_ring_get_irq; |
||
1824 | ring->irq_put = i9xx_ring_put_irq; |
||
1825 | } |
||
1826 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
||
1827 | ring->write_tail = ring_write_tail; |
||
1828 | if (INTEL_INFO(dev)->gen >= 4) |
||
1829 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1830 | else if (IS_I830(dev) || IS_845G(dev)) |
||
1831 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
||
1832 | else |
||
1833 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
||
1834 | ring->init = init_render_ring; |
||
1835 | ring->cleanup = render_ring_cleanup; |
||
1836 | |||
1837 | ring->dev = dev; |
||
1838 | INIT_LIST_HEAD(&ring->active_list); |
||
1839 | INIT_LIST_HEAD(&ring->request_list); |
||
1840 | |||
1841 | ring->size = size; |
||
1842 | ring->effective_size = ring->size; |
||
1843 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
||
1844 | ring->effective_size -= 128; |
||
1845 | |||
1846 | ring->virtual_start = ioremap_wc(start, size); |
||
1847 | if (ring->virtual_start == NULL) { |
||
1848 | DRM_ERROR("can not ioremap virtual address for" |
||
1849 | " ring buffer\n"); |
||
1850 | return -ENOMEM; |
||
1851 | } |
||
1852 | |||
1853 | if (!I915_NEED_GFX_HWS(dev)) { |
||
4104 | Serge | 1854 | ret = init_phys_status_page(ring); |
3243 | Serge | 1855 | if (ret) |
1856 | return ret; |
||
1857 | } |
||
1858 | |||
1859 | return 0; |
||
1860 | } |
||
1861 | #endif |
||
1862 | |||
2332 | Serge | 1863 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1864 | { |
||
1865 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1866 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
||
1867 | |||
3031 | serge | 1868 | ring->name = "bsd ring"; |
1869 | ring->id = VCS; |
||
2332 | Serge | 1870 | |
3031 | serge | 1871 | ring->write_tail = ring_write_tail; |
1872 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
||
1873 | ring->mmio_base = GEN6_BSD_RING_BASE; |
||
1874 | /* gen6 bsd needs a special wa for tail updates */ |
||
1875 | if (IS_GEN6(dev)) |
||
1876 | ring->write_tail = gen6_bsd_ring_write_tail; |
||
4104 | Serge | 1877 | ring->flush = gen6_bsd_ring_flush; |
3031 | serge | 1878 | ring->add_request = gen6_add_request; |
1879 | ring->get_seqno = gen6_ring_get_seqno; |
||
3480 | Serge | 1880 | ring->set_seqno = ring_set_seqno; |
4104 | Serge | 1881 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
3031 | serge | 1882 | ring->irq_get = gen6_ring_get_irq; |
1883 | ring->irq_put = gen6_ring_put_irq; |
||
1884 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
||
1885 | ring->sync_to = gen6_ring_sync; |
||
4104 | Serge | 1886 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; |
1887 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; |
||
1888 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; |
||
1889 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; |
||
1890 | ring->signal_mbox[RCS] = GEN6_RVSYNC; |
||
1891 | ring->signal_mbox[VCS] = GEN6_NOSYNC; |
||
1892 | ring->signal_mbox[BCS] = GEN6_BVSYNC; |
||
1893 | ring->signal_mbox[VECS] = GEN6_VEVSYNC; |
||
3031 | serge | 1894 | } else { |
1895 | ring->mmio_base = BSD_RING_BASE; |
||
1896 | ring->flush = bsd_ring_flush; |
||
1897 | ring->add_request = i9xx_add_request; |
||
1898 | ring->get_seqno = ring_get_seqno; |
||
3480 | Serge | 1899 | ring->set_seqno = ring_set_seqno; |
3031 | serge | 1900 | if (IS_GEN5(dev)) { |
4104 | Serge | 1901 | ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; |
3031 | serge | 1902 | ring->irq_get = gen5_ring_get_irq; |
1903 | ring->irq_put = gen5_ring_put_irq; |
||
1904 | } else { |
||
1905 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; |
||
1906 | ring->irq_get = i9xx_ring_get_irq; |
||
1907 | ring->irq_put = i9xx_ring_put_irq; |
||
1908 | } |
||
1909 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1910 | } |
||
1911 | ring->init = init_ring_common; |
||
1912 | |||
2332 | Serge | 1913 | return intel_init_ring_buffer(dev, ring); |
1914 | } |
||
1915 | |||
1916 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
||
1917 | { |
||
1918 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1919 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
||
1920 | |||
3031 | serge | 1921 | ring->name = "blitter ring"; |
1922 | ring->id = BCS; |
||
2332 | Serge | 1923 | |
3031 | serge | 1924 | ring->mmio_base = BLT_RING_BASE; |
1925 | ring->write_tail = ring_write_tail; |
||
4104 | Serge | 1926 | ring->flush = gen6_ring_flush; |
3031 | serge | 1927 | ring->add_request = gen6_add_request; |
1928 | ring->get_seqno = gen6_ring_get_seqno; |
||
3480 | Serge | 1929 | ring->set_seqno = ring_set_seqno; |
4104 | Serge | 1930 | ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; |
3031 | serge | 1931 | ring->irq_get = gen6_ring_get_irq; |
1932 | ring->irq_put = gen6_ring_put_irq; |
||
1933 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
||
1934 | ring->sync_to = gen6_ring_sync; |
||
4104 | Serge | 1935 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; |
1936 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; |
||
1937 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; |
||
1938 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; |
||
1939 | ring->signal_mbox[RCS] = GEN6_RBSYNC; |
||
1940 | ring->signal_mbox[VCS] = GEN6_VBSYNC; |
||
1941 | ring->signal_mbox[BCS] = GEN6_NOSYNC; |
||
1942 | ring->signal_mbox[VECS] = GEN6_VEBSYNC; |
||
3031 | serge | 1943 | ring->init = init_ring_common; |
1944 | |||
2332 | Serge | 1945 | return intel_init_ring_buffer(dev, ring); |
1946 | } |
||
3031 | serge | 1947 | |
4104 | Serge | 1948 | int intel_init_vebox_ring_buffer(struct drm_device *dev) |
1949 | { |
||
1950 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1951 | struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; |
||
1952 | |||
1953 | ring->name = "video enhancement ring"; |
||
1954 | ring->id = VECS; |
||
1955 | |||
1956 | ring->mmio_base = VEBOX_RING_BASE; |
||
1957 | ring->write_tail = ring_write_tail; |
||
1958 | ring->flush = gen6_ring_flush; |
||
1959 | ring->add_request = gen6_add_request; |
||
1960 | ring->get_seqno = gen6_ring_get_seqno; |
||
1961 | ring->set_seqno = ring_set_seqno; |
||
1962 | ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; |
||
1963 | ring->irq_get = hsw_vebox_get_irq; |
||
1964 | ring->irq_put = hsw_vebox_put_irq; |
||
1965 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
||
1966 | ring->sync_to = gen6_ring_sync; |
||
1967 | ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; |
||
1968 | ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; |
||
1969 | ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; |
||
1970 | ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; |
||
1971 | ring->signal_mbox[RCS] = GEN6_RVESYNC; |
||
1972 | ring->signal_mbox[VCS] = GEN6_VVESYNC; |
||
1973 | ring->signal_mbox[BCS] = GEN6_BVESYNC; |
||
1974 | ring->signal_mbox[VECS] = GEN6_NOSYNC; |
||
1975 | ring->init = init_ring_common; |
||
1976 | |||
1977 | return intel_init_ring_buffer(dev, ring); |
||
1978 | } |
||
1979 | |||
3031 | serge | 1980 | int |
1981 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) |
||
1982 | { |
||
1983 | int ret; |
||
1984 | |||
1985 | if (!ring->gpu_caches_dirty) |
||
1986 | return 0; |
||
1987 | |||
1988 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); |
||
1989 | if (ret) |
||
1990 | return ret; |
||
1991 | |||
1992 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); |
||
1993 | |||
1994 | ring->gpu_caches_dirty = false; |
||
1995 | return 0; |
||
1996 | } |
||
1997 | |||
1998 | int |
||
1999 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) |
||
2000 | { |
||
2001 | uint32_t flush_domains; |
||
2002 | int ret; |
||
2003 | |||
2004 | flush_domains = 0; |
||
2005 | if (ring->gpu_caches_dirty) |
||
2006 | flush_domains = I915_GEM_GPU_DOMAINS; |
||
2007 | |||
2008 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
||
2009 | if (ret) |
||
2010 | return ret; |
||
2011 | |||
2012 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
||
2013 | |||
2014 | ring->gpu_caches_dirty = false; |
||
2015 | return 0; |
||
2016 | }>>>>>>><>><>>><>> |