Rev 3037 | Rev 3480 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2332 | Serge | 1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * Zou Nan hai |
||
26 | * Xiang Hai hao |
||
27 | * |
||
28 | */ |
||
29 | #define iowrite32(v, addr) writel((v), (addr)) |
||
30 | #define ioread32(addr) readl(addr) |
||
31 | |||
3031 | serge | 32 | #include |
2332 | Serge | 33 | #include "i915_drv.h" |
3031 | serge | 34 | #include |
2351 | Serge | 35 | #include "i915_trace.h" |
2332 | Serge | 36 | #include "intel_drv.h" |
37 | |||
2342 | Serge | 38 | /* |
39 | * 965+ support PIPE_CONTROL commands, which provide finer grained control |
||
40 | * over cache flushing. |
||
41 | */ |
||
42 | struct pipe_control { |
||
43 | struct drm_i915_gem_object *obj; |
||
44 | volatile u32 *cpu_page; |
||
45 | u32 gtt_offset; |
||
46 | }; |
||
47 | |||
2332 | Serge | 48 | static inline int ring_space(struct intel_ring_buffer *ring) |
49 | { |
||
3243 | Serge | 50 | int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); |
2332 | Serge | 51 | if (space < 0) |
52 | space += ring->size; |
||
53 | return space; |
||
54 | } |
||
55 | |||
3031 | serge | 56 | static int |
57 | gen2_render_ring_flush(struct intel_ring_buffer *ring, |
||
58 | u32 invalidate_domains, |
||
59 | u32 flush_domains) |
||
2332 | Serge | 60 | { |
3031 | serge | 61 | u32 cmd; |
62 | int ret; |
||
2332 | Serge | 63 | |
3031 | serge | 64 | cmd = MI_FLUSH; |
65 | if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) |
||
66 | cmd |= MI_NO_WRITE_FLUSH; |
||
2332 | Serge | 67 | |
3031 | serge | 68 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
69 | cmd |= MI_READ_FLUSH; |
||
2332 | Serge | 70 | |
3031 | serge | 71 | ret = intel_ring_begin(ring, 2); |
72 | if (ret) |
||
73 | return ret; |
||
74 | |||
75 | intel_ring_emit(ring, cmd); |
||
76 | intel_ring_emit(ring, MI_NOOP); |
||
77 | intel_ring_advance(ring); |
||
78 | |||
79 | return 0; |
||
2332 | Serge | 80 | } |
81 | |||
82 | static int |
||
3031 | serge | 83 | gen4_render_ring_flush(struct intel_ring_buffer *ring, |
2332 | Serge | 84 | u32 invalidate_domains, |
85 | u32 flush_domains) |
||
86 | { |
||
87 | struct drm_device *dev = ring->dev; |
||
88 | u32 cmd; |
||
89 | int ret; |
||
90 | |||
91 | /* |
||
92 | * read/write caches: |
||
93 | * |
||
94 | * I915_GEM_DOMAIN_RENDER is always invalidated, but is |
||
95 | * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is |
||
96 | * also flushed at 2d versus 3d pipeline switches. |
||
97 | * |
||
98 | * read-only caches: |
||
99 | * |
||
100 | * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if |
||
101 | * MI_READ_FLUSH is set, and is always flushed on 965. |
||
102 | * |
||
103 | * I915_GEM_DOMAIN_COMMAND may not exist? |
||
104 | * |
||
105 | * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is |
||
106 | * invalidated when MI_EXE_FLUSH is set. |
||
107 | * |
||
108 | * I915_GEM_DOMAIN_VERTEX, which exists on 965, is |
||
109 | * invalidated with every MI_FLUSH. |
||
110 | * |
||
111 | * TLBs: |
||
112 | * |
||
113 | * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND |
||
114 | * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and |
||
115 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
||
116 | * are flushed at any MI_FLUSH. |
||
117 | */ |
||
118 | |||
119 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
||
3031 | serge | 120 | if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) |
2332 | Serge | 121 | cmd &= ~MI_NO_WRITE_FLUSH; |
122 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
||
123 | cmd |= MI_EXE_FLUSH; |
||
124 | |||
125 | if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && |
||
126 | (IS_G4X(dev) || IS_GEN5(dev))) |
||
127 | cmd |= MI_INVALIDATE_ISP; |
||
128 | |||
129 | ret = intel_ring_begin(ring, 2); |
||
130 | if (ret) |
||
131 | return ret; |
||
132 | |||
133 | intel_ring_emit(ring, cmd); |
||
134 | intel_ring_emit(ring, MI_NOOP); |
||
135 | intel_ring_advance(ring); |
||
136 | |||
137 | return 0; |
||
138 | } |
||
139 | |||
2342 | Serge | 140 | /** |
141 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for |
||
142 | * implementing two workarounds on gen6. From section 1.4.7.1 |
||
143 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: |
||
144 | * |
||
145 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those |
||
146 | * produced by non-pipelined state commands), software needs to first |
||
147 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != |
||
148 | * 0. |
||
149 | * |
||
150 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable |
||
151 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. |
||
152 | * |
||
153 | * And the workaround for these two requires this workaround first: |
||
154 | * |
||
155 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent |
||
156 | * BEFORE the pipe-control with a post-sync op and no write-cache |
||
157 | * flushes. |
||
158 | * |
||
159 | * And this last workaround is tricky because of the requirements on |
||
160 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM |
||
161 | * volume 2 part 1: |
||
162 | * |
||
163 | * "1 of the following must also be set: |
||
164 | * - Render Target Cache Flush Enable ([12] of DW1) |
||
165 | * - Depth Cache Flush Enable ([0] of DW1) |
||
166 | * - Stall at Pixel Scoreboard ([1] of DW1) |
||
167 | * - Depth Stall ([13] of DW1) |
||
168 | * - Post-Sync Operation ([13] of DW1) |
||
169 | * - Notify Enable ([8] of DW1)" |
||
170 | * |
||
171 | * The cache flushes require the workaround flush that triggered this |
||
172 | * one, so we can't use it. Depth stall would trigger the same. |
||
173 | * Post-sync nonzero is what triggered this second workaround, so we |
||
174 | * can't use that one either. Notify enable is IRQs, which aren't |
||
175 | * really our business. That leaves only stall at scoreboard. |
||
176 | */ |
||
177 | static int |
||
178 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) |
||
179 | { |
||
180 | struct pipe_control *pc = ring->private; |
||
181 | u32 scratch_addr = pc->gtt_offset + 128; |
||
182 | int ret; |
||
183 | |||
184 | |||
185 | ret = intel_ring_begin(ring, 6); |
||
186 | if (ret) |
||
187 | return ret; |
||
188 | |||
189 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
190 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
||
191 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
||
192 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
193 | intel_ring_emit(ring, 0); /* low dword */ |
||
194 | intel_ring_emit(ring, 0); /* high dword */ |
||
195 | intel_ring_emit(ring, MI_NOOP); |
||
196 | intel_ring_advance(ring); |
||
197 | |||
198 | ret = intel_ring_begin(ring, 6); |
||
199 | if (ret) |
||
200 | return ret; |
||
201 | |||
202 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
||
203 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); |
||
204 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ |
||
205 | intel_ring_emit(ring, 0); |
||
206 | intel_ring_emit(ring, 0); |
||
207 | intel_ring_emit(ring, MI_NOOP); |
||
208 | intel_ring_advance(ring); |
||
209 | |||
210 | return 0; |
||
211 | } |
||
212 | |||
213 | static int |
||
214 | gen6_render_ring_flush(struct intel_ring_buffer *ring, |
||
215 | u32 invalidate_domains, u32 flush_domains) |
||
216 | { |
||
217 | u32 flags = 0; |
||
218 | struct pipe_control *pc = ring->private; |
||
219 | u32 scratch_addr = pc->gtt_offset + 128; |
||
220 | int ret; |
||
221 | |||
222 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
||
3031 | serge | 223 | ret = intel_emit_post_sync_nonzero_flush(ring); |
224 | if (ret) |
||
225 | return ret; |
||
2342 | Serge | 226 | |
227 | /* Just flush everything. Experiments have shown that reducing the |
||
228 | * number of bits based on the write domains has little performance |
||
229 | * impact. |
||
230 | */ |
||
3031 | serge | 231 | if (flush_domains) { |
232 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
||
233 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
||
234 | /* |
||
235 | * Ensure that any following seqno writes only happen |
||
236 | * when the render cache is indeed flushed. |
||
237 | */ |
||
238 | flags |= PIPE_CONTROL_CS_STALL; |
||
239 | } |
||
240 | if (invalidate_domains) { |
||
241 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
||
242 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
||
243 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
||
244 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
||
245 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
||
246 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
||
247 | /* |
||
248 | * TLB invalidate requires a post-sync write. |
||
249 | */ |
||
3243 | Serge | 250 | flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; |
3031 | serge | 251 | } |
252 | |||
253 | ret = intel_ring_begin(ring, 4); |
||
254 | if (ret) |
||
255 | return ret; |
||
256 | |||
257 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
||
258 | intel_ring_emit(ring, flags); |
||
259 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
||
260 | intel_ring_emit(ring, 0); |
||
261 | intel_ring_advance(ring); |
||
262 | |||
263 | return 0; |
||
264 | } |
||
265 | |||
266 | static int |
||
267 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) |
||
268 | { |
||
269 | int ret; |
||
270 | |||
271 | ret = intel_ring_begin(ring, 4); |
||
272 | if (ret) |
||
273 | return ret; |
||
274 | |||
275 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
||
276 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
||
277 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
||
278 | intel_ring_emit(ring, 0); |
||
279 | intel_ring_emit(ring, 0); |
||
280 | intel_ring_advance(ring); |
||
281 | |||
282 | return 0; |
||
283 | } |
||
284 | |||
285 | static int |
||
286 | gen7_render_ring_flush(struct intel_ring_buffer *ring, |
||
287 | u32 invalidate_domains, u32 flush_domains) |
||
288 | { |
||
289 | u32 flags = 0; |
||
290 | struct pipe_control *pc = ring->private; |
||
291 | u32 scratch_addr = pc->gtt_offset + 128; |
||
292 | int ret; |
||
293 | |||
294 | /* |
||
295 | * Ensure that any following seqno writes only happen when the render |
||
296 | * cache is indeed flushed. |
||
297 | * |
||
298 | * Workaround: 4th PIPE_CONTROL command (except the ones with only |
||
299 | * read-cache invalidate bits set) must have the CS_STALL bit set. We |
||
300 | * don't try to be clever and just set it unconditionally. |
||
301 | */ |
||
302 | flags |= PIPE_CONTROL_CS_STALL; |
||
303 | |||
304 | /* Just flush everything. Experiments have shown that reducing the |
||
305 | * number of bits based on the write domains has little performance |
||
306 | * impact. |
||
307 | */ |
||
308 | if (flush_domains) { |
||
2342 | Serge | 309 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
3031 | serge | 310 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
311 | } |
||
312 | if (invalidate_domains) { |
||
313 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
||
2342 | Serge | 314 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
315 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
||
316 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
||
317 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
||
318 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
||
3031 | serge | 319 | /* |
320 | * TLB invalidate requires a post-sync write. |
||
321 | */ |
||
322 | flags |= PIPE_CONTROL_QW_WRITE; |
||
2342 | Serge | 323 | |
3031 | serge | 324 | /* Workaround: we must issue a pipe_control with CS-stall bit |
325 | * set before a pipe_control command that has the state cache |
||
326 | * invalidate bit set. */ |
||
327 | gen7_render_ring_cs_stall_wa(ring); |
||
328 | } |
||
329 | |||
330 | ret = intel_ring_begin(ring, 4); |
||
2342 | Serge | 331 | if (ret) |
332 | return ret; |
||
333 | |||
3031 | serge | 334 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
2342 | Serge | 335 | intel_ring_emit(ring, flags); |
336 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
||
3031 | serge | 337 | intel_ring_emit(ring, 0); |
2342 | Serge | 338 | intel_ring_advance(ring); |
339 | |||
340 | return 0; |
||
341 | } |
||
342 | |||
2332 | Serge | 343 | static void ring_write_tail(struct intel_ring_buffer *ring, |
344 | u32 value) |
||
345 | { |
||
346 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
347 | I915_WRITE_TAIL(ring, value); |
||
348 | } |
||
349 | |||
350 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) |
||
351 | { |
||
352 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
353 | u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? |
||
354 | RING_ACTHD(ring->mmio_base) : ACTHD; |
||
355 | |||
356 | return I915_READ(acthd_reg); |
||
357 | } |
||
358 | |||
359 | static int init_ring_common(struct intel_ring_buffer *ring) |
||
360 | { |
||
3031 | serge | 361 | struct drm_device *dev = ring->dev; |
362 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2332 | Serge | 363 | struct drm_i915_gem_object *obj = ring->obj; |
3031 | serge | 364 | int ret = 0; |
2332 | Serge | 365 | u32 head; |
366 | |||
3031 | serge | 367 | if (HAS_FORCE_WAKE(dev)) |
368 | gen6_gt_force_wake_get(dev_priv); |
||
369 | |||
2332 | Serge | 370 | /* Stop the ring if it's running. */ |
371 | I915_WRITE_CTL(ring, 0); |
||
372 | I915_WRITE_HEAD(ring, 0); |
||
373 | ring->write_tail(ring, 0); |
||
374 | |||
375 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
||
376 | |||
377 | /* G45 ring initialization fails to reset head to zero */ |
||
378 | if (head != 0) { |
||
379 | DRM_DEBUG_KMS("%s head not reset to zero " |
||
380 | "ctl %08x head %08x tail %08x start %08x\n", |
||
381 | ring->name, |
||
382 | I915_READ_CTL(ring), |
||
383 | I915_READ_HEAD(ring), |
||
384 | I915_READ_TAIL(ring), |
||
385 | I915_READ_START(ring)); |
||
386 | |||
387 | I915_WRITE_HEAD(ring, 0); |
||
388 | |||
389 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
||
390 | DRM_ERROR("failed to set %s head to zero " |
||
391 | "ctl %08x head %08x tail %08x start %08x\n", |
||
392 | ring->name, |
||
393 | I915_READ_CTL(ring), |
||
394 | I915_READ_HEAD(ring), |
||
395 | I915_READ_TAIL(ring), |
||
396 | I915_READ_START(ring)); |
||
397 | } |
||
398 | } |
||
399 | |||
3031 | serge | 400 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
401 | * registers with the above sequence (the readback of the HEAD registers |
||
402 | * also enforces ordering), otherwise the hw might lose the new ring |
||
403 | * register values. */ |
||
404 | I915_WRITE_START(ring, obj->gtt_offset); |
||
2332 | Serge | 405 | I915_WRITE_CTL(ring, |
406 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
||
3031 | serge | 407 | | RING_VALID); |
2332 | Serge | 408 | |
409 | /* If the head is still not zero, the ring is dead */ |
||
3031 | serge | 410 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
411 | I915_READ_START(ring) == obj->gtt_offset && |
||
412 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { |
||
2332 | Serge | 413 | DRM_ERROR("%s initialization failed " |
414 | "ctl %08x head %08x tail %08x start %08x\n", |
||
415 | ring->name, |
||
416 | I915_READ_CTL(ring), |
||
417 | I915_READ_HEAD(ring), |
||
418 | I915_READ_TAIL(ring), |
||
419 | I915_READ_START(ring)); |
||
3031 | serge | 420 | ret = -EIO; |
421 | goto out; |
||
2332 | Serge | 422 | } |
423 | |||
3031 | serge | 424 | ring->head = I915_READ_HEAD(ring); |
425 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
||
426 | ring->space = ring_space(ring); |
||
427 | ring->last_retired_head = -1; |
||
2332 | Serge | 428 | |
3031 | serge | 429 | out: |
430 | if (HAS_FORCE_WAKE(dev)) |
||
431 | gen6_gt_force_wake_put(dev_priv); |
||
2332 | Serge | 432 | |
3031 | serge | 433 | return ret; |
2332 | Serge | 434 | } |
435 | |||
436 | static int |
||
437 | init_pipe_control(struct intel_ring_buffer *ring) |
||
438 | { |
||
439 | struct pipe_control *pc; |
||
440 | struct drm_i915_gem_object *obj; |
||
441 | int ret; |
||
442 | |||
443 | if (ring->private) |
||
444 | return 0; |
||
445 | |||
446 | pc = kmalloc(sizeof(*pc), GFP_KERNEL); |
||
447 | if (!pc) |
||
448 | return -ENOMEM; |
||
449 | |||
450 | obj = i915_gem_alloc_object(ring->dev, 4096); |
||
451 | if (obj == NULL) { |
||
452 | DRM_ERROR("Failed to allocate seqno page\n"); |
||
453 | ret = -ENOMEM; |
||
454 | goto err; |
||
455 | } |
||
456 | |||
2352 | Serge | 457 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
2332 | Serge | 458 | |
3031 | serge | 459 | ret = i915_gem_object_pin(obj, 4096, true, false); |
2332 | Serge | 460 | if (ret) |
461 | goto err_unref; |
||
462 | |||
463 | pc->gtt_offset = obj->gtt_offset; |
||
3243 | Serge | 464 | pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW); |
2332 | Serge | 465 | if (pc->cpu_page == NULL) |
466 | goto err_unpin; |
||
467 | |||
468 | pc->obj = obj; |
||
469 | ring->private = pc; |
||
470 | return 0; |
||
471 | |||
472 | err_unpin: |
||
2344 | Serge | 473 | i915_gem_object_unpin(obj); |
2332 | Serge | 474 | err_unref: |
2344 | Serge | 475 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 476 | err: |
477 | kfree(pc); |
||
478 | return ret; |
||
479 | } |
||
480 | |||
481 | static void |
||
482 | cleanup_pipe_control(struct intel_ring_buffer *ring) |
||
483 | { |
||
484 | struct pipe_control *pc = ring->private; |
||
485 | struct drm_i915_gem_object *obj; |
||
486 | |||
487 | if (!ring->private) |
||
488 | return; |
||
489 | |||
490 | obj = pc->obj; |
||
2339 | Serge | 491 | // kunmap(obj->pages[0]); |
2344 | Serge | 492 | i915_gem_object_unpin(obj); |
493 | drm_gem_object_unreference(&obj->base); |
||
2332 | Serge | 494 | |
495 | kfree(pc); |
||
496 | ring->private = NULL; |
||
497 | } |
||
498 | |||
499 | static int init_render_ring(struct intel_ring_buffer *ring) |
||
500 | { |
||
501 | struct drm_device *dev = ring->dev; |
||
502 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
503 | int ret = init_ring_common(ring); |
||
504 | |||
3243 | Serge | 505 | if (INTEL_INFO(dev)->gen > 3) |
3031 | serge | 506 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
3243 | Serge | 507 | |
508 | /* We need to disable the AsyncFlip performance optimisations in order |
||
509 | * to use MI_WAIT_FOR_EVENT within the CS. It should already be |
||
510 | * programmed to '1' on all products. |
||
511 | */ |
||
512 | if (INTEL_INFO(dev)->gen >= 6) |
||
513 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
||
514 | |||
515 | /* Required for the hardware to program scanline values for waiting */ |
||
516 | if (INTEL_INFO(dev)->gen == 6) |
||
517 | I915_WRITE(GFX_MODE, |
||
518 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); |
||
519 | |||
2332 | Serge | 520 | if (IS_GEN7(dev)) |
521 | I915_WRITE(GFX_MODE_GEN7, |
||
3031 | serge | 522 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
523 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
||
2332 | Serge | 524 | |
2342 | Serge | 525 | if (INTEL_INFO(dev)->gen >= 5) { |
2339 | Serge | 526 | ret = init_pipe_control(ring); |
2332 | Serge | 527 | if (ret) |
528 | return ret; |
||
529 | } |
||
530 | |||
3031 | serge | 531 | if (IS_GEN6(dev)) { |
532 | /* From the Sandybridge PRM, volume 1 part 3, page 24: |
||
533 | * "If this bit is set, STCunit will have LRA as replacement |
||
534 | * policy. [...] This bit must be reset. LRA replacement |
||
535 | * policy is not supported." |
||
536 | */ |
||
537 | I915_WRITE(CACHE_MODE_0, |
||
538 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
||
539 | |||
540 | /* This is not explicitly set for GEN6, so read the register. |
||
541 | * see intel_ring_mi_set_context() for why we care. |
||
542 | * TODO: consider explicitly setting the bit for GEN5 |
||
543 | */ |
||
544 | ring->itlb_before_ctx_switch = |
||
545 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); |
||
2342 | Serge | 546 | } |
547 | |||
3031 | serge | 548 | if (INTEL_INFO(dev)->gen >= 6) |
549 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
||
550 | |||
551 | if (HAS_L3_GPU_CACHE(dev)) |
||
552 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
||
553 | |||
2332 | Serge | 554 | return ret; |
555 | } |
||
556 | |||
557 | static void render_ring_cleanup(struct intel_ring_buffer *ring) |
||
558 | { |
||
559 | if (!ring->private) |
||
560 | return; |
||
561 | |||
562 | cleanup_pipe_control(ring); |
||
563 | } |
||
564 | |||
565 | static void |
||
2342 | Serge | 566 | update_mboxes(struct intel_ring_buffer *ring, |
567 | u32 mmio_offset) |
||
2332 | Serge | 568 | { |
3243 | Serge | 569 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
2342 | Serge | 570 | intel_ring_emit(ring, mmio_offset); |
3243 | Serge | 571 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 572 | } |
573 | |||
2342 | Serge | 574 | /** |
575 | * gen6_add_request - Update the semaphore mailbox registers |
||
576 | * |
||
577 | * @ring - ring that is adding a request |
||
578 | * @seqno - return seqno stuck into the ring |
||
579 | * |
||
580 | * Update the mailbox registers in the *other* rings with the current seqno. |
||
581 | * This acts like a signal in the canonical semaphore. |
||
582 | */ |
||
2332 | Serge | 583 | static int |
3243 | Serge | 584 | gen6_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 585 | { |
2342 | Serge | 586 | u32 mbox1_reg; |
587 | u32 mbox2_reg; |
||
2332 | Serge | 588 | int ret; |
589 | |||
590 | ret = intel_ring_begin(ring, 10); |
||
591 | if (ret) |
||
592 | return ret; |
||
593 | |||
2342 | Serge | 594 | mbox1_reg = ring->signal_mbox[0]; |
595 | mbox2_reg = ring->signal_mbox[1]; |
||
2332 | Serge | 596 | |
3243 | Serge | 597 | update_mboxes(ring, mbox1_reg); |
598 | update_mboxes(ring, mbox2_reg); |
||
2332 | Serge | 599 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
600 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
3243 | Serge | 601 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 602 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
603 | intel_ring_advance(ring); |
||
604 | |||
605 | return 0; |
||
606 | } |
||
607 | |||
2342 | Serge | 608 | /** |
609 | * intel_ring_sync - sync the waiter to the signaller on seqno |
||
610 | * |
||
611 | * @waiter - ring that is waiting |
||
612 | * @signaller - ring which has, or will signal |
||
613 | * @seqno - seqno which the waiter will block on |
||
614 | */ |
||
615 | static int |
||
3031 | serge | 616 | gen6_ring_sync(struct intel_ring_buffer *waiter, |
2342 | Serge | 617 | struct intel_ring_buffer *signaller, |
2332 | Serge | 618 | u32 seqno) |
619 | { |
||
620 | int ret; |
||
2342 | Serge | 621 | u32 dw1 = MI_SEMAPHORE_MBOX | |
622 | MI_SEMAPHORE_COMPARE | |
||
623 | MI_SEMAPHORE_REGISTER; |
||
2332 | Serge | 624 | |
3031 | serge | 625 | /* Throughout all of the GEM code, seqno passed implies our current |
626 | * seqno is >= the last seqno executed. However for hardware the |
||
627 | * comparison is strictly greater than. |
||
628 | */ |
||
629 | seqno -= 1; |
||
630 | |||
631 | WARN_ON(signaller->semaphore_register[waiter->id] == |
||
632 | MI_SEMAPHORE_SYNC_INVALID); |
||
633 | |||
2342 | Serge | 634 | ret = intel_ring_begin(waiter, 4); |
2332 | Serge | 635 | if (ret) |
636 | return ret; |
||
637 | |||
3031 | serge | 638 | intel_ring_emit(waiter, |
639 | dw1 | signaller->semaphore_register[waiter->id]); |
||
2342 | Serge | 640 | intel_ring_emit(waiter, seqno); |
641 | intel_ring_emit(waiter, 0); |
||
642 | intel_ring_emit(waiter, MI_NOOP); |
||
643 | intel_ring_advance(waiter); |
||
2332 | Serge | 644 | |
645 | return 0; |
||
646 | } |
||
647 | |||
648 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
||
649 | do { \ |
||
2342 | Serge | 650 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
651 | PIPE_CONTROL_DEPTH_STALL); \ |
||
2332 | Serge | 652 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
653 | intel_ring_emit(ring__, 0); \ |
||
654 | intel_ring_emit(ring__, 0); \ |
||
655 | } while (0) |
||
656 | |||
657 | static int |
||
3243 | Serge | 658 | pc_render_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 659 | { |
660 | struct pipe_control *pc = ring->private; |
||
661 | u32 scratch_addr = pc->gtt_offset + 128; |
||
662 | int ret; |
||
663 | |||
664 | /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently |
||
665 | * incoherent with writes to memory, i.e. completely fubar, |
||
666 | * so we need to use PIPE_NOTIFY instead. |
||
667 | * |
||
668 | * However, we also need to workaround the qword write |
||
669 | * incoherence by flushing the 6 PIPE_NOTIFY buffers out to |
||
670 | * memory before requesting an interrupt. |
||
671 | */ |
||
672 | ret = intel_ring_begin(ring, 32); |
||
673 | if (ret) |
||
674 | return ret; |
||
675 | |||
2342 | Serge | 676 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
677 | PIPE_CONTROL_WRITE_FLUSH | |
||
678 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
||
2332 | Serge | 679 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
3243 | Serge | 680 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 681 | intel_ring_emit(ring, 0); |
682 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
683 | scratch_addr += 128; /* write to separate cachelines */ |
||
684 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
685 | scratch_addr += 128; |
||
686 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
687 | scratch_addr += 128; |
||
688 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
689 | scratch_addr += 128; |
||
690 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
691 | scratch_addr += 128; |
||
692 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
||
3031 | serge | 693 | |
2342 | Serge | 694 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
695 | PIPE_CONTROL_WRITE_FLUSH | |
||
696 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
||
2332 | Serge | 697 | PIPE_CONTROL_NOTIFY); |
698 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
||
3243 | Serge | 699 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 700 | intel_ring_emit(ring, 0); |
701 | intel_ring_advance(ring); |
||
702 | |||
703 | return 0; |
||
704 | } |
||
705 | |||
706 | static u32 |
||
3031 | serge | 707 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2342 | Serge | 708 | { |
709 | /* Workaround to force correct ordering between irq and seqno writes on |
||
710 | * ivb (and maybe also on snb) by reading from a CS register (like |
||
711 | * ACTHD) before reading the status page. */ |
||
3031 | serge | 712 | if (!lazy_coherency) |
2342 | Serge | 713 | intel_ring_get_active_head(ring); |
714 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
715 | } |
||
716 | |||
717 | static u32 |
||
3031 | serge | 718 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2332 | Serge | 719 | { |
720 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
||
721 | } |
||
722 | |||
723 | static u32 |
||
3031 | serge | 724 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
2332 | Serge | 725 | { |
726 | struct pipe_control *pc = ring->private; |
||
727 | return pc->cpu_page[0]; |
||
728 | } |
||
729 | |||
3031 | serge | 730 | static bool |
731 | gen5_ring_get_irq(struct intel_ring_buffer *ring) |
||
2332 | Serge | 732 | { |
3031 | serge | 733 | struct drm_device *dev = ring->dev; |
734 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
735 | unsigned long flags; |
||
736 | |||
737 | if (!dev->irq_enabled) |
||
738 | return false; |
||
739 | |||
740 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
741 | if (ring->irq_refcount++ == 0) { |
||
742 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; |
||
2332 | Serge | 743 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
744 | POSTING_READ(GTIMR); |
||
3031 | serge | 745 | } |
746 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
747 | |||
748 | return true; |
||
2332 | Serge | 749 | } |
750 | |||
751 | static void |
||
3031 | serge | 752 | gen5_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 753 | { |
3031 | serge | 754 | struct drm_device *dev = ring->dev; |
755 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
756 | unsigned long flags; |
||
757 | |||
758 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
759 | if (--ring->irq_refcount == 0) { |
||
760 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; |
||
2332 | Serge | 761 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
762 | POSTING_READ(GTIMR); |
||
3031 | serge | 763 | } |
764 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
2332 | Serge | 765 | } |
766 | |||
3031 | serge | 767 | static bool |
768 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) |
||
2332 | Serge | 769 | { |
3031 | serge | 770 | struct drm_device *dev = ring->dev; |
771 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
772 | unsigned long flags; |
||
773 | |||
774 | if (!dev->irq_enabled) |
||
775 | return false; |
||
776 | |||
777 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
778 | if (ring->irq_refcount++ == 0) { |
||
779 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
||
2332 | Serge | 780 | I915_WRITE(IMR, dev_priv->irq_mask); |
781 | POSTING_READ(IMR); |
||
3031 | serge | 782 | } |
783 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
784 | |||
785 | return true; |
||
2332 | Serge | 786 | } |
787 | |||
788 | static void |
||
3031 | serge | 789 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 790 | { |
3031 | serge | 791 | struct drm_device *dev = ring->dev; |
792 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
793 | unsigned long flags; |
||
794 | |||
795 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
||
796 | if (--ring->irq_refcount == 0) { |
||
797 | dev_priv->irq_mask |= ring->irq_enable_mask; |
||
2332 | Serge | 798 | I915_WRITE(IMR, dev_priv->irq_mask); |
799 | POSTING_READ(IMR); |
||
3031 | serge | 800 | } |
801 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
||
2332 | Serge | 802 | } |
803 | |||
804 | static bool |
||
3031 | serge | 805 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 806 | { |
807 | struct drm_device *dev = ring->dev; |
||
808 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 809 | unsigned long flags; |
2332 | Serge | 810 | |
811 | if (!dev->irq_enabled) |
||
812 | return false; |
||
813 | |||
3031 | serge | 814 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 815 | if (ring->irq_refcount++ == 0) { |
3031 | serge | 816 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
817 | I915_WRITE16(IMR, dev_priv->irq_mask); |
||
818 | POSTING_READ16(IMR); |
||
2332 | Serge | 819 | } |
3031 | serge | 820 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 821 | |
822 | return true; |
||
823 | } |
||
824 | |||
825 | static void |
||
3031 | serge | 826 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 827 | { |
828 | struct drm_device *dev = ring->dev; |
||
829 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 830 | unsigned long flags; |
2332 | Serge | 831 | |
3031 | serge | 832 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 833 | if (--ring->irq_refcount == 0) { |
3031 | serge | 834 | dev_priv->irq_mask |= ring->irq_enable_mask; |
835 | I915_WRITE16(IMR, dev_priv->irq_mask); |
||
836 | POSTING_READ16(IMR); |
||
2332 | Serge | 837 | } |
3031 | serge | 838 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 839 | } |
840 | |||
841 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
||
842 | { |
||
843 | struct drm_device *dev = ring->dev; |
||
844 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
845 | u32 mmio = 0; |
||
846 | |||
847 | /* The ring status page addresses are no longer next to the rest of |
||
848 | * the ring registers as of gen7. |
||
849 | */ |
||
850 | if (IS_GEN7(dev)) { |
||
851 | switch (ring->id) { |
||
3031 | serge | 852 | case RCS: |
2332 | Serge | 853 | mmio = RENDER_HWS_PGA_GEN7; |
854 | break; |
||
3031 | serge | 855 | case BCS: |
2332 | Serge | 856 | mmio = BLT_HWS_PGA_GEN7; |
857 | break; |
||
3031 | serge | 858 | case VCS: |
2332 | Serge | 859 | mmio = BSD_HWS_PGA_GEN7; |
860 | break; |
||
861 | } |
||
862 | } else if (IS_GEN6(ring->dev)) { |
||
863 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
||
864 | } else { |
||
865 | mmio = RING_HWS_PGA(ring->mmio_base); |
||
866 | } |
||
867 | |||
868 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
||
869 | POSTING_READ(mmio); |
||
870 | } |
||
871 | |||
872 | static int |
||
873 | bsd_ring_flush(struct intel_ring_buffer *ring, |
||
874 | u32 invalidate_domains, |
||
875 | u32 flush_domains) |
||
876 | { |
||
877 | int ret; |
||
878 | |||
879 | ret = intel_ring_begin(ring, 2); |
||
880 | if (ret) |
||
881 | return ret; |
||
882 | |||
883 | intel_ring_emit(ring, MI_FLUSH); |
||
884 | intel_ring_emit(ring, MI_NOOP); |
||
885 | intel_ring_advance(ring); |
||
886 | return 0; |
||
887 | } |
||
888 | |||
889 | static int |
||
3243 | Serge | 890 | i9xx_add_request(struct intel_ring_buffer *ring) |
2332 | Serge | 891 | { |
892 | int ret; |
||
893 | |||
894 | ret = intel_ring_begin(ring, 4); |
||
895 | if (ret) |
||
896 | return ret; |
||
897 | |||
898 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
||
899 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
||
3243 | Serge | 900 | intel_ring_emit(ring, ring->outstanding_lazy_request); |
2332 | Serge | 901 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
902 | intel_ring_advance(ring); |
||
903 | |||
904 | return 0; |
||
905 | } |
||
906 | |||
907 | static bool |
||
3031 | serge | 908 | gen6_ring_get_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 909 | { |
910 | struct drm_device *dev = ring->dev; |
||
911 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 912 | unsigned long flags; |
2332 | Serge | 913 | |
914 | if (!dev->irq_enabled) |
||
915 | return false; |
||
916 | |||
2342 | Serge | 917 | /* It looks like we need to prevent the gt from suspending while waiting |
918 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
||
919 | * blt/bsd rings on ivb. */ |
||
920 | gen6_gt_force_wake_get(dev_priv); |
||
921 | |||
3031 | serge | 922 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 923 | if (ring->irq_refcount++ == 0) { |
3031 | serge | 924 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
925 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | |
||
926 | GEN6_RENDER_L3_PARITY_ERROR)); |
||
927 | else |
||
928 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
||
929 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; |
||
930 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
931 | POSTING_READ(GTIMR); |
||
2332 | Serge | 932 | } |
3031 | serge | 933 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2332 | Serge | 934 | |
2351 | Serge | 935 | return true; |
2332 | Serge | 936 | } |
937 | |||
938 | static void |
||
3031 | serge | 939 | gen6_ring_put_irq(struct intel_ring_buffer *ring) |
2332 | Serge | 940 | { |
941 | struct drm_device *dev = ring->dev; |
||
942 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
3031 | serge | 943 | unsigned long flags; |
2332 | Serge | 944 | |
3031 | serge | 945 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
2332 | Serge | 946 | if (--ring->irq_refcount == 0) { |
3031 | serge | 947 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
948 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
||
949 | else |
||
950 | I915_WRITE_IMR(ring, ~0); |
||
951 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; |
||
952 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
||
953 | POSTING_READ(GTIMR); |
||
2332 | Serge | 954 | } |
3031 | serge | 955 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2342 | Serge | 956 | |
957 | gen6_gt_force_wake_put(dev_priv); |
||
2332 | Serge | 958 | } |
959 | |||
960 | static int |
||
3243 | Serge | 961 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, |
962 | u32 offset, u32 length, |
||
963 | unsigned flags) |
||
2332 | Serge | 964 | { |
965 | int ret; |
||
966 | |||
967 | ret = intel_ring_begin(ring, 2); |
||
968 | if (ret) |
||
969 | return ret; |
||
970 | |||
971 | intel_ring_emit(ring, |
||
3031 | serge | 972 | MI_BATCH_BUFFER_START | |
973 | MI_BATCH_GTT | |
||
3243 | Serge | 974 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
2332 | Serge | 975 | intel_ring_emit(ring, offset); |
976 | intel_ring_advance(ring); |
||
977 | |||
978 | return 0; |
||
979 | } |
||
980 | |||
3243 | Serge | 981 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
982 | #define I830_BATCH_LIMIT (256*1024) |
||
2332 | Serge | 983 | static int |
3031 | serge | 984 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
3243 | Serge | 985 | u32 offset, u32 len, |
986 | unsigned flags) |
||
2332 | Serge | 987 | { |
988 | int ret; |
||
989 | |||
3243 | Serge | 990 | if (flags & I915_DISPATCH_PINNED) { |
2332 | Serge | 991 | ret = intel_ring_begin(ring, 4); |
992 | if (ret) |
||
993 | return ret; |
||
994 | |||
995 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
||
3243 | Serge | 996 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
2332 | Serge | 997 | intel_ring_emit(ring, offset + len - 8); |
3243 | Serge | 998 | intel_ring_emit(ring, MI_NOOP); |
999 | intel_ring_advance(ring); |
||
1000 | } else { |
||
1001 | struct drm_i915_gem_object *obj = ring->private; |
||
1002 | u32 cs_offset = obj->gtt_offset; |
||
1003 | |||
1004 | if (len > I830_BATCH_LIMIT) |
||
1005 | return -ENOSPC; |
||
1006 | |||
1007 | ret = intel_ring_begin(ring, 9+3); |
||
1008 | if (ret) |
||
1009 | return ret; |
||
1010 | /* Blit the batch (which has now all relocs applied) to the stable batch |
||
1011 | * scratch bo area (so that the CS never stumbles over its tlb |
||
1012 | * invalidation bug) ... */ |
||
1013 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | |
||
1014 | XY_SRC_COPY_BLT_WRITE_ALPHA | |
||
1015 | XY_SRC_COPY_BLT_WRITE_RGB); |
||
1016 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); |
||
2332 | Serge | 1017 | intel_ring_emit(ring, 0); |
3243 | Serge | 1018 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); |
1019 | intel_ring_emit(ring, cs_offset); |
||
1020 | intel_ring_emit(ring, 0); |
||
1021 | intel_ring_emit(ring, 4096); |
||
1022 | intel_ring_emit(ring, offset); |
||
1023 | intel_ring_emit(ring, MI_FLUSH); |
||
1024 | |||
1025 | /* ... and execute it. */ |
||
1026 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
||
1027 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
||
1028 | intel_ring_emit(ring, cs_offset + len - 8); |
||
3031 | serge | 1029 | intel_ring_advance(ring); |
3243 | Serge | 1030 | } |
3031 | serge | 1031 | |
1032 | return 0; |
||
1033 | } |
||
1034 | |||
1035 | static int |
||
1036 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
||
3243 | Serge | 1037 | u32 offset, u32 len, |
1038 | unsigned flags) |
||
3031 | serge | 1039 | { |
1040 | int ret; |
||
1041 | |||
2332 | Serge | 1042 | ret = intel_ring_begin(ring, 2); |
1043 | if (ret) |
||
1044 | return ret; |
||
1045 | |||
3031 | serge | 1046 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
3243 | Serge | 1047 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); |
2332 | Serge | 1048 | intel_ring_advance(ring); |
1049 | |||
1050 | return 0; |
||
1051 | } |
||
1052 | |||
1053 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
||
1054 | { |
||
1055 | struct drm_i915_gem_object *obj; |
||
1056 | |||
1057 | obj = ring->status_page.obj; |
||
1058 | if (obj == NULL) |
||
1059 | return; |
||
1060 | |||
3031 | serge | 1061 | // kunmap(sg_page(obj->pages->sgl)); |
2344 | Serge | 1062 | i915_gem_object_unpin(obj); |
1063 | drm_gem_object_unreference(&obj->base); |
||
2332 | Serge | 1064 | ring->status_page.obj = NULL; |
1065 | } |
||
1066 | |||
1067 | static int init_status_page(struct intel_ring_buffer *ring) |
||
1068 | { |
||
1069 | struct drm_device *dev = ring->dev; |
||
1070 | struct drm_i915_gem_object *obj; |
||
1071 | int ret; |
||
1072 | |||
1073 | obj = i915_gem_alloc_object(dev, 4096); |
||
1074 | if (obj == NULL) { |
||
1075 | DRM_ERROR("Failed to allocate status page\n"); |
||
1076 | ret = -ENOMEM; |
||
1077 | goto err; |
||
1078 | } |
||
1079 | |||
2352 | Serge | 1080 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
2332 | Serge | 1081 | |
3031 | serge | 1082 | ret = i915_gem_object_pin(obj, 4096, true, false); |
2332 | Serge | 1083 | if (ret != 0) { |
1084 | goto err_unref; |
||
1085 | } |
||
1086 | |||
1087 | ring->status_page.gfx_addr = obj->gtt_offset; |
||
3243 | Serge | 1088 | ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW); |
2332 | Serge | 1089 | if (ring->status_page.page_addr == NULL) { |
3031 | serge | 1090 | ret = -ENOMEM; |
2332 | Serge | 1091 | goto err_unpin; |
1092 | } |
||
1093 | ring->status_page.obj = obj; |
||
1094 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
1095 | |||
1096 | intel_ring_setup_status_page(ring); |
||
1097 | DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", |
||
1098 | ring->name, ring->status_page.gfx_addr); |
||
1099 | |||
1100 | return 0; |
||
1101 | |||
1102 | err_unpin: |
||
2344 | Serge | 1103 | i915_gem_object_unpin(obj); |
2332 | Serge | 1104 | err_unref: |
2344 | Serge | 1105 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1106 | err: |
1107 | return ret; |
||
1108 | } |
||
1109 | |||
3243 | Serge | 1110 | static int init_phys_hws_pga(struct intel_ring_buffer *ring) |
1111 | { |
||
1112 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
||
1113 | u32 addr; |
||
1114 | |||
1115 | if (!dev_priv->status_page_dmah) { |
||
1116 | dev_priv->status_page_dmah = |
||
1117 | drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); |
||
1118 | if (!dev_priv->status_page_dmah) |
||
1119 | return -ENOMEM; |
||
1120 | } |
||
1121 | |||
1122 | addr = dev_priv->status_page_dmah->busaddr; |
||
1123 | if (INTEL_INFO(ring->dev)->gen >= 4) |
||
1124 | addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
||
1125 | I915_WRITE(HWS_PGA, addr); |
||
1126 | |||
1127 | ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; |
||
1128 | memset(ring->status_page.page_addr, 0, PAGE_SIZE); |
||
1129 | |||
1130 | return 0; |
||
1131 | } |
||
1132 | |||
3031 | serge | 1133 | static int intel_init_ring_buffer(struct drm_device *dev, |
2332 | Serge | 1134 | struct intel_ring_buffer *ring) |
1135 | { |
||
2340 | Serge | 1136 | struct drm_i915_gem_object *obj; |
3031 | serge | 1137 | struct drm_i915_private *dev_priv = dev->dev_private; |
2332 | Serge | 1138 | int ret; |
2340 | Serge | 1139 | |
2332 | Serge | 1140 | ring->dev = dev; |
1141 | INIT_LIST_HEAD(&ring->active_list); |
||
1142 | INIT_LIST_HEAD(&ring->request_list); |
||
3031 | serge | 1143 | ring->size = 32 * PAGE_SIZE; |
3243 | Serge | 1144 | memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); |
2332 | Serge | 1145 | |
2352 | Serge | 1146 | init_waitqueue_head(&ring->irq_queue); |
2332 | Serge | 1147 | |
1148 | if (I915_NEED_GFX_HWS(dev)) { |
||
2340 | Serge | 1149 | ret = init_status_page(ring); |
1150 | if (ret) |
||
1151 | return ret; |
||
3243 | Serge | 1152 | } else { |
1153 | BUG_ON(ring->id != RCS); |
||
1154 | ret = init_phys_hws_pga(ring); |
||
1155 | if (ret) |
||
1156 | return ret; |
||
2332 | Serge | 1157 | } |
1158 | |||
1159 | obj = i915_gem_alloc_object(dev, ring->size); |
||
1160 | if (obj == NULL) { |
||
1161 | DRM_ERROR("Failed to allocate ringbuffer\n"); |
||
1162 | ret = -ENOMEM; |
||
1163 | goto err_hws; |
||
1164 | } |
||
1165 | |||
1166 | ring->obj = obj; |
||
1167 | |||
3031 | serge | 1168 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); |
2332 | Serge | 1169 | if (ret) |
1170 | goto err_unref; |
||
1171 | |||
3031 | serge | 1172 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
1173 | if (ret) |
||
1174 | goto err_unpin; |
||
2332 | Serge | 1175 | |
3031 | serge | 1176 | ring->virtual_start = |
1177 | ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, |
||
1178 | ring->size); |
||
1179 | if (ring->virtual_start == NULL) { |
||
2332 | Serge | 1180 | DRM_ERROR("Failed to map ringbuffer.\n"); |
1181 | ret = -EINVAL; |
||
1182 | goto err_unpin; |
||
1183 | } |
||
1184 | |||
1185 | ret = ring->init(ring); |
||
1186 | if (ret) |
||
1187 | goto err_unmap; |
||
1188 | |||
1189 | /* Workaround an erratum on the i830 which causes a hang if |
||
1190 | * the TAIL pointer points to within the last 2 cachelines |
||
1191 | * of the buffer. |
||
1192 | */ |
||
1193 | ring->effective_size = ring->size; |
||
3031 | serge | 1194 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
2332 | Serge | 1195 | ring->effective_size -= 128; |
2340 | Serge | 1196 | |
2332 | Serge | 1197 | return 0; |
1198 | |||
1199 | err_unmap: |
||
1200 | FreeKernelSpace(ring->virtual_start); |
||
1201 | err_unpin: |
||
2344 | Serge | 1202 | i915_gem_object_unpin(obj); |
2332 | Serge | 1203 | err_unref: |
2344 | Serge | 1204 | drm_gem_object_unreference(&obj->base); |
2332 | Serge | 1205 | ring->obj = NULL; |
1206 | err_hws: |
||
1207 | // cleanup_status_page(ring); |
||
1208 | return ret; |
||
1209 | } |
||
1210 | |||
1211 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
||
1212 | { |
||
1213 | struct drm_i915_private *dev_priv; |
||
1214 | int ret; |
||
1215 | |||
1216 | if (ring->obj == NULL) |
||
1217 | return; |
||
1218 | |||
1219 | /* Disable the ring buffer. The ring must be idle at this point */ |
||
1220 | dev_priv = ring->dev->dev_private; |
||
3243 | Serge | 1221 | ret = intel_ring_idle(ring); |
2332 | Serge | 1222 | if (ret) |
1223 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
||
1224 | ring->name, ret); |
||
1225 | |||
1226 | I915_WRITE_CTL(ring, 0); |
||
1227 | |||
1228 | // drm_core_ioremapfree(&ring->map, ring->dev); |
||
1229 | |||
2344 | Serge | 1230 | i915_gem_object_unpin(ring->obj); |
1231 | drm_gem_object_unreference(&ring->obj->base); |
||
2332 | Serge | 1232 | ring->obj = NULL; |
1233 | |||
1234 | if (ring->cleanup) |
||
1235 | ring->cleanup(ring); |
||
1236 | |||
1237 | // cleanup_status_page(ring); |
||
1238 | } |
||
1239 | |||
3031 | serge | 1240 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
2332 | Serge | 1241 | { |
3031 | serge | 1242 | int ret; |
2332 | Serge | 1243 | |
3031 | serge | 1244 | ret = i915_wait_seqno(ring, seqno); |
1245 | if (!ret) |
||
1246 | i915_gem_retire_requests_ring(ring); |
||
1247 | |||
1248 | return ret; |
||
1249 | } |
||
1250 | |||
1251 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) |
||
1252 | { |
||
1253 | struct drm_i915_gem_request *request; |
||
1254 | u32 seqno = 0; |
||
1255 | int ret; |
||
1256 | |||
1257 | i915_gem_retire_requests_ring(ring); |
||
1258 | |||
1259 | if (ring->last_retired_head != -1) { |
||
1260 | ring->head = ring->last_retired_head; |
||
1261 | ring->last_retired_head = -1; |
||
2332 | Serge | 1262 | ring->space = ring_space(ring); |
1263 | if (ring->space >= n) |
||
1264 | return 0; |
||
1265 | } |
||
1266 | |||
3031 | serge | 1267 | list_for_each_entry(request, &ring->request_list, list) { |
1268 | int space; |
||
2344 | Serge | 1269 | |
3031 | serge | 1270 | if (request->tail == -1) |
1271 | continue; |
||
1272 | |||
3243 | Serge | 1273 | space = request->tail - (ring->tail + I915_RING_FREE_SPACE); |
3031 | serge | 1274 | if (space < 0) |
1275 | space += ring->size; |
||
1276 | if (space >= n) { |
||
1277 | seqno = request->seqno; |
||
1278 | break; |
||
1279 | } |
||
1280 | |||
1281 | /* Consume this request in case we need more space than |
||
1282 | * is available and so need to prevent a race between |
||
1283 | * updating last_retired_head and direct reads of |
||
1284 | * I915_RING_HEAD. It also provides a nice sanity check. |
||
1285 | */ |
||
1286 | request->tail = -1; |
||
1287 | } |
||
1288 | |||
1289 | if (seqno == 0) |
||
1290 | return -ENOSPC; |
||
1291 | |||
1292 | ret = intel_ring_wait_seqno(ring, seqno); |
||
1293 | if (ret) |
||
1294 | return ret; |
||
1295 | |||
1296 | if (WARN_ON(ring->last_retired_head == -1)) |
||
1297 | return -ENOSPC; |
||
1298 | |||
1299 | ring->head = ring->last_retired_head; |
||
1300 | ring->last_retired_head = -1; |
||
1301 | ring->space = ring_space(ring); |
||
1302 | if (WARN_ON(ring->space < n)) |
||
1303 | return -ENOSPC; |
||
1304 | |||
1305 | return 0; |
||
1306 | } |
||
1307 | |||
3243 | Serge | 1308 | static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) |
3031 | serge | 1309 | { |
1310 | struct drm_device *dev = ring->dev; |
||
1311 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1312 | unsigned long end; |
||
1313 | int ret; |
||
1314 | |||
1315 | ret = intel_ring_wait_request(ring, n); |
||
1316 | if (ret != -ENOSPC) |
||
1317 | return ret; |
||
1318 | |||
3243 | Serge | 1319 | trace_i915_ring_wait_begin(ring); |
3031 | serge | 1320 | /* With GEM the hangcheck timer should kick us out of the loop, |
1321 | * leaving it early runs the risk of corrupting GEM state (due |
||
1322 | * to running on almost untested codepaths). But on resume |
||
1323 | * timers don't work yet, so prevent a complete hang in that |
||
1324 | * case by choosing an insanely large timeout. */ |
||
1325 | end = GetTimerTicks() + 60 * HZ; |
||
1326 | |||
2332 | Serge | 1327 | do { |
1328 | ring->head = I915_READ_HEAD(ring); |
||
1329 | ring->space = ring_space(ring); |
||
1330 | if (ring->space >= n) { |
||
2351 | Serge | 1331 | trace_i915_ring_wait_end(ring); |
2332 | Serge | 1332 | return 0; |
1333 | } |
||
1334 | |||
1335 | msleep(1); |
||
3031 | serge | 1336 | |
1337 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
||
1338 | if (ret) |
||
1339 | return ret; |
||
1340 | } while (!time_after(GetTimerTicks(), end)); |
||
2351 | Serge | 1341 | trace_i915_ring_wait_end(ring); |
2332 | Serge | 1342 | return -EBUSY; |
1343 | } |
||
1344 | |||
3243 | Serge | 1345 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
1346 | { |
||
1347 | uint32_t __iomem *virt; |
||
1348 | int rem = ring->size - ring->tail; |
||
1349 | |||
1350 | if (ring->space < rem) { |
||
1351 | int ret = ring_wait_for_space(ring, rem); |
||
1352 | if (ret) |
||
1353 | return ret; |
||
1354 | } |
||
1355 | |||
1356 | virt = ring->virtual_start + ring->tail; |
||
1357 | rem /= 4; |
||
1358 | while (rem--) |
||
1359 | iowrite32(MI_NOOP, virt++); |
||
1360 | |||
1361 | ring->tail = 0; |
||
1362 | ring->space = ring_space(ring); |
||
1363 | |||
1364 | return 0; |
||
1365 | } |
||
1366 | |||
1367 | int intel_ring_idle(struct intel_ring_buffer *ring) |
||
1368 | { |
||
1369 | u32 seqno; |
||
1370 | int ret; |
||
1371 | |||
1372 | /* We need to add any requests required to flush the objects and ring */ |
||
1373 | if (ring->outstanding_lazy_request) { |
||
1374 | ret = i915_add_request(ring, NULL, NULL); |
||
1375 | if (ret) |
||
1376 | return ret; |
||
1377 | } |
||
1378 | |||
1379 | /* Wait upon the last request to be completed */ |
||
1380 | if (list_empty(&ring->request_list)) |
||
1381 | return 0; |
||
1382 | |||
1383 | seqno = list_entry(ring->request_list.prev, |
||
1384 | struct drm_i915_gem_request, |
||
1385 | list)->seqno; |
||
1386 | |||
1387 | return i915_wait_seqno(ring, seqno); |
||
1388 | } |
||
1389 | |||
1390 | static int |
||
1391 | intel_ring_alloc_seqno(struct intel_ring_buffer *ring) |
||
1392 | { |
||
1393 | if (ring->outstanding_lazy_request) |
||
1394 | return 0; |
||
1395 | |||
1396 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); |
||
1397 | } |
||
1398 | |||
2332 | Serge | 1399 | int intel_ring_begin(struct intel_ring_buffer *ring, |
1400 | int num_dwords) |
||
1401 | { |
||
3031 | serge | 1402 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
2332 | Serge | 1403 | int n = 4*num_dwords; |
1404 | int ret; |
||
1405 | |||
3031 | serge | 1406 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
1407 | if (ret) |
||
1408 | return ret; |
||
2332 | Serge | 1409 | |
3243 | Serge | 1410 | /* Preallocate the olr before touching the ring */ |
1411 | ret = intel_ring_alloc_seqno(ring); |
||
1412 | if (ret) |
||
1413 | return ret; |
||
1414 | |||
2332 | Serge | 1415 | if (unlikely(ring->tail + n > ring->effective_size)) { |
1416 | ret = intel_wrap_ring_buffer(ring); |
||
1417 | if (unlikely(ret)) |
||
1418 | return ret; |
||
1419 | } |
||
1420 | |||
1421 | if (unlikely(ring->space < n)) { |
||
3243 | Serge | 1422 | ret = ring_wait_for_space(ring, n); |
2332 | Serge | 1423 | if (unlikely(ret)) |
1424 | return ret; |
||
1425 | } |
||
1426 | |||
1427 | ring->space -= n; |
||
1428 | return 0; |
||
1429 | } |
||
1430 | |||
1431 | void intel_ring_advance(struct intel_ring_buffer *ring) |
||
1432 | { |
||
3031 | serge | 1433 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1434 | |||
2332 | Serge | 1435 | ring->tail &= ring->size - 1; |
3031 | serge | 1436 | if (dev_priv->stop_rings & intel_ring_flag(ring)) |
1437 | return; |
||
2332 | Serge | 1438 | ring->write_tail(ring, ring->tail); |
1439 | } |
||
1440 | |||
1441 | |||
1442 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
||
1443 | u32 value) |
||
1444 | { |
||
1445 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
||
1446 | |||
1447 | /* Every tail move must follow the sequence below */ |
||
3031 | serge | 1448 | |
1449 | /* Disable notification that the ring is IDLE. The GT |
||
1450 | * will then assume that it is busy and bring it out of rc6. |
||
1451 | */ |
||
2332 | Serge | 1452 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
3031 | serge | 1453 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
2332 | Serge | 1454 | |
3031 | serge | 1455 | /* Clear the context id. Here be magic! */ |
1456 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); |
||
1457 | |||
1458 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ |
||
2332 | Serge | 1459 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
3031 | serge | 1460 | GEN6_BSD_SLEEP_INDICATOR) == 0, |
2332 | Serge | 1461 | 50)) |
3031 | serge | 1462 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); |
2332 | Serge | 1463 | |
3031 | serge | 1464 | /* Now that the ring is fully powered up, update the tail */ |
2332 | Serge | 1465 | I915_WRITE_TAIL(ring, value); |
3031 | serge | 1466 | POSTING_READ(RING_TAIL(ring->mmio_base)); |
1467 | |||
1468 | /* Let the ring send IDLE messages to the GT again, |
||
1469 | * and so let it sleep to conserve power when idle. |
||
1470 | */ |
||
2332 | Serge | 1471 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
3031 | serge | 1472 | _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
2332 | Serge | 1473 | } |
1474 | |||
1475 | static int gen6_ring_flush(struct intel_ring_buffer *ring, |
||
1476 | u32 invalidate, u32 flush) |
||
1477 | { |
||
1478 | uint32_t cmd; |
||
1479 | int ret; |
||
1480 | |||
1481 | ret = intel_ring_begin(ring, 4); |
||
1482 | if (ret) |
||
1483 | return ret; |
||
1484 | |||
1485 | cmd = MI_FLUSH_DW; |
||
3243 | Serge | 1486 | /* |
1487 | * Bspec vol 1c.5 - video engine command streamer: |
||
1488 | * "If ENABLED, all TLBs will be invalidated once the flush |
||
1489 | * operation is complete. This bit is only valid when the |
||
1490 | * Post-Sync Operation field is a value of 1h or 3h." |
||
1491 | */ |
||
2332 | Serge | 1492 | if (invalidate & I915_GEM_GPU_DOMAINS) |
3243 | Serge | 1493 | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | |
1494 | MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
||
2332 | Serge | 1495 | intel_ring_emit(ring, cmd); |
3243 | Serge | 1496 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
2332 | Serge | 1497 | intel_ring_emit(ring, 0); |
1498 | intel_ring_emit(ring, MI_NOOP); |
||
1499 | intel_ring_advance(ring); |
||
1500 | return 0; |
||
1501 | } |
||
1502 | |||
1503 | static int |
||
3243 | Serge | 1504 | hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
1505 | u32 offset, u32 len, |
||
1506 | unsigned flags) |
||
1507 | { |
||
1508 | int ret; |
||
1509 | |||
1510 | ret = intel_ring_begin(ring, 2); |
||
1511 | if (ret) |
||
1512 | return ret; |
||
1513 | |||
1514 | intel_ring_emit(ring, |
||
1515 | MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | |
||
1516 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); |
||
1517 | /* bit0-7 is the length on GEN6+ */ |
||
1518 | intel_ring_emit(ring, offset); |
||
1519 | intel_ring_advance(ring); |
||
1520 | |||
1521 | return 0; |
||
1522 | } |
||
1523 | |||
1524 | static int |
||
2332 | Serge | 1525 | gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
3243 | Serge | 1526 | u32 offset, u32 len, |
1527 | unsigned flags) |
||
2332 | Serge | 1528 | { |
1529 | int ret; |
||
1530 | |||
1531 | ret = intel_ring_begin(ring, 2); |
||
1532 | if (ret) |
||
1533 | return ret; |
||
1534 | |||
3243 | Serge | 1535 | intel_ring_emit(ring, |
1536 | MI_BATCH_BUFFER_START | |
||
1537 | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); |
||
2332 | Serge | 1538 | /* bit0-7 is the length on GEN6+ */ |
1539 | intel_ring_emit(ring, offset); |
||
1540 | intel_ring_advance(ring); |
||
1541 | |||
1542 | return 0; |
||
1543 | } |
||
1544 | |||
1545 | /* Blitter support (SandyBridge+) */ |
||
1546 | |||
1547 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
||
1548 | u32 invalidate, u32 flush) |
||
1549 | { |
||
1550 | uint32_t cmd; |
||
1551 | int ret; |
||
1552 | |||
3031 | serge | 1553 | ret = intel_ring_begin(ring, 4); |
2332 | Serge | 1554 | if (ret) |
1555 | return ret; |
||
1556 | |||
1557 | cmd = MI_FLUSH_DW; |
||
3243 | Serge | 1558 | /* |
1559 | * Bspec vol 1c.3 - blitter engine command streamer: |
||
1560 | * "If ENABLED, all TLBs will be invalidated once the flush |
||
1561 | * operation is complete. This bit is only valid when the |
||
1562 | * Post-Sync Operation field is a value of 1h or 3h." |
||
1563 | */ |
||
2332 | Serge | 1564 | if (invalidate & I915_GEM_DOMAIN_RENDER) |
3243 | Serge | 1565 | cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | |
1566 | MI_FLUSH_DW_OP_STOREDW; |
||
2332 | Serge | 1567 | intel_ring_emit(ring, cmd); |
3243 | Serge | 1568 | intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); |
2332 | Serge | 1569 | intel_ring_emit(ring, 0); |
1570 | intel_ring_emit(ring, MI_NOOP); |
||
1571 | intel_ring_advance(ring); |
||
1572 | return 0; |
||
1573 | } |
||
1574 | |||
1575 | int intel_init_render_ring_buffer(struct drm_device *dev) |
||
1576 | { |
||
1577 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1578 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
||
2340 | Serge | 1579 | |
3031 | serge | 1580 | ring->name = "render ring"; |
1581 | ring->id = RCS; |
||
1582 | ring->mmio_base = RENDER_RING_BASE; |
||
1583 | |||
2332 | Serge | 1584 | if (INTEL_INFO(dev)->gen >= 6) { |
2339 | Serge | 1585 | ring->add_request = gen6_add_request; |
3031 | serge | 1586 | ring->flush = gen7_render_ring_flush; |
1587 | if (INTEL_INFO(dev)->gen == 6) |
||
2342 | Serge | 1588 | ring->flush = gen6_render_ring_flush; |
3031 | serge | 1589 | ring->irq_get = gen6_ring_get_irq; |
1590 | ring->irq_put = gen6_ring_put_irq; |
||
1591 | ring->irq_enable_mask = GT_USER_INTERRUPT; |
||
2342 | Serge | 1592 | ring->get_seqno = gen6_ring_get_seqno; |
3031 | serge | 1593 | ring->sync_to = gen6_ring_sync; |
1594 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; |
||
1595 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; |
||
1596 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; |
||
1597 | ring->signal_mbox[0] = GEN6_VRSYNC; |
||
1598 | ring->signal_mbox[1] = GEN6_BRSYNC; |
||
2332 | Serge | 1599 | } else if (IS_GEN5(dev)) { |
2339 | Serge | 1600 | ring->add_request = pc_render_add_request; |
3031 | serge | 1601 | ring->flush = gen4_render_ring_flush; |
2342 | Serge | 1602 | ring->get_seqno = pc_render_get_seqno; |
3031 | serge | 1603 | ring->irq_get = gen5_ring_get_irq; |
1604 | ring->irq_put = gen5_ring_put_irq; |
||
1605 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; |
||
1606 | } else { |
||
1607 | ring->add_request = i9xx_add_request; |
||
1608 | if (INTEL_INFO(dev)->gen < 4) |
||
1609 | ring->flush = gen2_render_ring_flush; |
||
1610 | else |
||
1611 | ring->flush = gen4_render_ring_flush; |
||
1612 | ring->get_seqno = ring_get_seqno; |
||
1613 | if (IS_GEN2(dev)) { |
||
1614 | ring->irq_get = i8xx_ring_get_irq; |
||
1615 | ring->irq_put = i8xx_ring_put_irq; |
||
1616 | } else { |
||
1617 | ring->irq_get = i9xx_ring_get_irq; |
||
1618 | ring->irq_put = i9xx_ring_put_irq; |
||
1619 | } |
||
1620 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
||
2332 | Serge | 1621 | } |
3031 | serge | 1622 | ring->write_tail = ring_write_tail; |
3243 | Serge | 1623 | if (IS_HASWELL(dev)) |
1624 | ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; |
||
1625 | else if (INTEL_INFO(dev)->gen >= 6) |
||
3031 | serge | 1626 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
1627 | else if (INTEL_INFO(dev)->gen >= 4) |
||
1628 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1629 | else if (IS_I830(dev) || IS_845G(dev)) |
||
1630 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
||
1631 | else |
||
1632 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
||
1633 | ring->init = init_render_ring; |
||
1634 | ring->cleanup = render_ring_cleanup; |
||
2332 | Serge | 1635 | |
3243 | Serge | 1636 | /* Workaround batchbuffer to combat CS tlb bug. */ |
1637 | if (HAS_BROKEN_CS_TLB(dev)) { |
||
1638 | struct drm_i915_gem_object *obj; |
||
1639 | int ret; |
||
3031 | serge | 1640 | |
3243 | Serge | 1641 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); |
1642 | if (obj == NULL) { |
||
1643 | DRM_ERROR("Failed to allocate batch bo\n"); |
||
1644 | return -ENOMEM; |
||
1645 | } |
||
1646 | |||
1647 | ret = i915_gem_object_pin(obj, 0, true, false); |
||
1648 | if (ret != 0) { |
||
1649 | drm_gem_object_unreference(&obj->base); |
||
1650 | DRM_ERROR("Failed to ping batch bo\n"); |
||
1651 | return ret; |
||
1652 | } |
||
1653 | |||
1654 | ring->private = obj; |
||
2332 | Serge | 1655 | } |
2340 | Serge | 1656 | |
2332 | Serge | 1657 | return intel_init_ring_buffer(dev, ring); |
1658 | } |
||
1659 | |||
3243 | Serge | 1660 | #if 0 |
1661 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) |
||
1662 | { |
||
1663 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1664 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
||
1665 | int ret; |
||
2332 | Serge | 1666 | |
3243 | Serge | 1667 | ring->name = "render ring"; |
1668 | ring->id = RCS; |
||
1669 | ring->mmio_base = RENDER_RING_BASE; |
||
1670 | |||
1671 | if (INTEL_INFO(dev)->gen >= 6) { |
||
1672 | /* non-kms not supported on gen6+ */ |
||
1673 | return -ENODEV; |
||
1674 | } |
||
1675 | |||
1676 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding |
||
1677 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up |
||
1678 | * the special gen5 functions. */ |
||
1679 | ring->add_request = i9xx_add_request; |
||
1680 | if (INTEL_INFO(dev)->gen < 4) |
||
1681 | ring->flush = gen2_render_ring_flush; |
||
1682 | else |
||
1683 | ring->flush = gen4_render_ring_flush; |
||
1684 | ring->get_seqno = ring_get_seqno; |
||
1685 | if (IS_GEN2(dev)) { |
||
1686 | ring->irq_get = i8xx_ring_get_irq; |
||
1687 | ring->irq_put = i8xx_ring_put_irq; |
||
1688 | } else { |
||
1689 | ring->irq_get = i9xx_ring_get_irq; |
||
1690 | ring->irq_put = i9xx_ring_put_irq; |
||
1691 | } |
||
1692 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
||
1693 | ring->write_tail = ring_write_tail; |
||
1694 | if (INTEL_INFO(dev)->gen >= 4) |
||
1695 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1696 | else if (IS_I830(dev) || IS_845G(dev)) |
||
1697 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
||
1698 | else |
||
1699 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
||
1700 | ring->init = init_render_ring; |
||
1701 | ring->cleanup = render_ring_cleanup; |
||
1702 | |||
1703 | ring->dev = dev; |
||
1704 | INIT_LIST_HEAD(&ring->active_list); |
||
1705 | INIT_LIST_HEAD(&ring->request_list); |
||
1706 | |||
1707 | ring->size = size; |
||
1708 | ring->effective_size = ring->size; |
||
1709 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
||
1710 | ring->effective_size -= 128; |
||
1711 | |||
1712 | ring->virtual_start = ioremap_wc(start, size); |
||
1713 | if (ring->virtual_start == NULL) { |
||
1714 | DRM_ERROR("can not ioremap virtual address for" |
||
1715 | " ring buffer\n"); |
||
1716 | return -ENOMEM; |
||
1717 | } |
||
1718 | |||
1719 | if (!I915_NEED_GFX_HWS(dev)) { |
||
1720 | ret = init_phys_hws_pga(ring); |
||
1721 | if (ret) |
||
1722 | return ret; |
||
1723 | } |
||
1724 | |||
1725 | return 0; |
||
1726 | } |
||
1727 | #endif |
||
1728 | |||
2332 | Serge | 1729 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1730 | { |
||
1731 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1732 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
||
1733 | |||
3031 | serge | 1734 | ring->name = "bsd ring"; |
1735 | ring->id = VCS; |
||
2332 | Serge | 1736 | |
3031 | serge | 1737 | ring->write_tail = ring_write_tail; |
1738 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
||
1739 | ring->mmio_base = GEN6_BSD_RING_BASE; |
||
1740 | /* gen6 bsd needs a special wa for tail updates */ |
||
1741 | if (IS_GEN6(dev)) |
||
1742 | ring->write_tail = gen6_bsd_ring_write_tail; |
||
1743 | ring->flush = gen6_ring_flush; |
||
1744 | ring->add_request = gen6_add_request; |
||
1745 | ring->get_seqno = gen6_ring_get_seqno; |
||
1746 | ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; |
||
1747 | ring->irq_get = gen6_ring_get_irq; |
||
1748 | ring->irq_put = gen6_ring_put_irq; |
||
1749 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
||
1750 | ring->sync_to = gen6_ring_sync; |
||
1751 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; |
||
1752 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; |
||
1753 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; |
||
1754 | ring->signal_mbox[0] = GEN6_RVSYNC; |
||
1755 | ring->signal_mbox[1] = GEN6_BVSYNC; |
||
1756 | } else { |
||
1757 | ring->mmio_base = BSD_RING_BASE; |
||
1758 | ring->flush = bsd_ring_flush; |
||
1759 | ring->add_request = i9xx_add_request; |
||
1760 | ring->get_seqno = ring_get_seqno; |
||
1761 | if (IS_GEN5(dev)) { |
||
1762 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
||
1763 | ring->irq_get = gen5_ring_get_irq; |
||
1764 | ring->irq_put = gen5_ring_put_irq; |
||
1765 | } else { |
||
1766 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; |
||
1767 | ring->irq_get = i9xx_ring_get_irq; |
||
1768 | ring->irq_put = i9xx_ring_put_irq; |
||
1769 | } |
||
1770 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
||
1771 | } |
||
1772 | ring->init = init_ring_common; |
||
1773 | |||
2332 | Serge | 1774 | return intel_init_ring_buffer(dev, ring); |
1775 | } |
||
1776 | |||
1777 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
||
1778 | { |
||
1779 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1780 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
||
1781 | |||
3031 | serge | 1782 | ring->name = "blitter ring"; |
1783 | ring->id = BCS; |
||
2332 | Serge | 1784 | |
3031 | serge | 1785 | ring->mmio_base = BLT_RING_BASE; |
1786 | ring->write_tail = ring_write_tail; |
||
1787 | ring->flush = blt_ring_flush; |
||
1788 | ring->add_request = gen6_add_request; |
||
1789 | ring->get_seqno = gen6_ring_get_seqno; |
||
1790 | ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; |
||
1791 | ring->irq_get = gen6_ring_get_irq; |
||
1792 | ring->irq_put = gen6_ring_put_irq; |
||
1793 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
||
1794 | ring->sync_to = gen6_ring_sync; |
||
1795 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; |
||
1796 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; |
||
1797 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; |
||
1798 | ring->signal_mbox[0] = GEN6_RBSYNC; |
||
1799 | ring->signal_mbox[1] = GEN6_VBSYNC; |
||
1800 | ring->init = init_ring_common; |
||
1801 | |||
2332 | Serge | 1802 | return intel_init_ring_buffer(dev, ring); |
1803 | } |
||
3031 | serge | 1804 | |
1805 | int |
||
1806 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) |
||
1807 | { |
||
1808 | int ret; |
||
1809 | |||
1810 | if (!ring->gpu_caches_dirty) |
||
1811 | return 0; |
||
1812 | |||
1813 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); |
||
1814 | if (ret) |
||
1815 | return ret; |
||
1816 | |||
1817 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); |
||
1818 | |||
1819 | ring->gpu_caches_dirty = false; |
||
1820 | return 0; |
||
1821 | } |
||
1822 | |||
1823 | int |
||
1824 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) |
||
1825 | { |
||
1826 | uint32_t flush_domains; |
||
1827 | int ret; |
||
1828 | |||
1829 | flush_domains = 0; |
||
1830 | if (ring->gpu_caches_dirty) |
||
1831 | flush_domains = I915_GEM_GPU_DOMAINS; |
||
1832 | |||
1833 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
||
1834 | if (ret) |
||
1835 | return ret; |
||
1836 | |||
1837 | trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
||
1838 | |||
1839 | ring->gpu_caches_dirty = false; |
||
1840 | return 0; |
||
1841 | }>>>>>>><>><>><>> |