Rev 2352 | Rev 3037 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2352 | Rev 3031 | ||
---|---|---|---|
Line 27... | Line 27... | ||
27 | * |
27 | * |
28 | */ |
28 | */ |
29 | #define iowrite32(v, addr) writel((v), (addr)) |
29 | #define iowrite32(v, addr) writel((v), (addr)) |
30 | #define ioread32(addr) readl(addr) |
30 | #define ioread32(addr) readl(addr) |
Line 31... | Line 31... | ||
31 | 31 | ||
32 | #include "drmP.h" |
- | |
33 | #include "drm.h" |
32 | #include |
34 | #include "i915_drv.h" |
33 | #include "i915_drv.h" |
35 | #include "i915_drm.h" |
34 | #include |
36 | #include "i915_trace.h" |
35 | #include "i915_trace.h" |
Line 37... | Line 36... | ||
37 | #include "intel_drv.h" |
36 | #include "intel_drv.h" |
38 | 37 | ||
Line 52... | Line 51... | ||
52 | if (space < 0) |
51 | if (space < 0) |
53 | space += ring->size; |
52 | space += ring->size; |
54 | return space; |
53 | return space; |
55 | } |
54 | } |
Line -... | Line 55... | ||
- | 55 | ||
56 | 56 | static int |
|
- | 57 | gen2_render_ring_flush(struct intel_ring_buffer *ring, |
|
- | 58 | u32 invalidate_domains, |
|
57 | static u32 i915_gem_get_seqno(struct drm_device *dev) |
59 | u32 flush_domains) |
58 | { |
- | |
59 | drm_i915_private_t *dev_priv = dev->dev_private; |
60 | { |
- | 61 | u32 cmd; |
|
Line -... | Line 62... | ||
- | 62 | int ret; |
|
- | 63 | ||
- | 64 | cmd = MI_FLUSH; |
|
- | 65 | if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) |
|
- | 66 | cmd |= MI_NO_WRITE_FLUSH; |
|
- | 67 | ||
- | 68 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
|
60 | u32 seqno; |
69 | cmd |= MI_READ_FLUSH; |
- | 70 | ||
- | 71 | ret = intel_ring_begin(ring, 2); |
|
Line 61... | Line 72... | ||
61 | 72 | if (ret) |
|
62 | seqno = dev_priv->next_seqno; |
73 | return ret; |
63 | 74 | ||
Line 64... | Line 75... | ||
64 | /* reserve 0 for non-seqno */ |
75 | intel_ring_emit(ring, cmd); |
65 | if (++dev_priv->next_seqno == 0) |
76 | intel_ring_emit(ring, MI_NOOP); |
Line 66... | Line 77... | ||
66 | dev_priv->next_seqno = 1; |
77 | intel_ring_advance(ring); |
67 | 78 | ||
68 | return seqno; |
79 | return 0; |
69 | } |
80 | } |
70 | 81 | ||
71 | static int |
82 | static int |
72 | render_ring_flush(struct intel_ring_buffer *ring, |
83 | gen4_render_ring_flush(struct intel_ring_buffer *ring, |
Line 104... | Line 115... | ||
104 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
115 | * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER |
105 | * are flushed at any MI_FLUSH. |
116 | * are flushed at any MI_FLUSH. |
106 | */ |
117 | */ |
Line 107... | Line 118... | ||
107 | 118 | ||
108 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
119 | cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; |
109 | if ((invalidate_domains|flush_domains) & |
- | |
110 | I915_GEM_DOMAIN_RENDER) |
120 | if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) |
111 | cmd &= ~MI_NO_WRITE_FLUSH; |
- | |
112 | if (INTEL_INFO(dev)->gen < 4) { |
- | |
113 | /* |
- | |
114 | * On the 965, the sampler cache always gets flushed |
- | |
115 | * and this bit is reserved. |
- | |
116 | */ |
- | |
117 | if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) |
- | |
118 | cmd |= MI_READ_FLUSH; |
- | |
119 | } |
121 | cmd &= ~MI_NO_WRITE_FLUSH; |
120 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
122 | if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) |
Line 121... | Line 123... | ||
121 | cmd |= MI_EXE_FLUSH; |
123 | cmd |= MI_EXE_FLUSH; |
122 | 124 | ||
Line 216... | Line 218... | ||
216 | struct pipe_control *pc = ring->private; |
218 | struct pipe_control *pc = ring->private; |
217 | u32 scratch_addr = pc->gtt_offset + 128; |
219 | u32 scratch_addr = pc->gtt_offset + 128; |
218 | int ret; |
220 | int ret; |
Line 219... | Line 221... | ||
219 | 221 | ||
220 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
222 | /* Force SNB workarounds for PIPE_CONTROL flushes */ |
- | 223 | ret = intel_emit_post_sync_nonzero_flush(ring); |
|
- | 224 | if (ret) |
|
Line 221... | Line 225... | ||
221 | intel_emit_post_sync_nonzero_flush(ring); |
225 | return ret; |
222 | 226 | ||
223 | /* Just flush everything. Experiments have shown that reducing the |
227 | /* Just flush everything. Experiments have shown that reducing the |
224 | * number of bits based on the write domains has little performance |
228 | * number of bits based on the write domains has little performance |
- | 229 | * impact. |
|
225 | * impact. |
230 | */ |
- | 231 | if (flush_domains) { |
|
- | 232 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
|
- | 233 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
|
- | 234 | /* |
|
- | 235 | * Ensure that any following seqno writes only happen |
|
- | 236 | * when the render cache is indeed flushed. |
|
- | 237 | */ |
|
- | 238 | flags |= PIPE_CONTROL_CS_STALL; |
|
- | 239 | } |
|
226 | */ |
240 | if (invalidate_domains) { |
227 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
241 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
- | 242 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
|
- | 243 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
|
- | 244 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
|
- | 245 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
|
- | 246 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
|
- | 247 | /* |
|
- | 248 | * TLB invalidate requires a post-sync write. |
|
- | 249 | */ |
|
- | 250 | flags |= PIPE_CONTROL_QW_WRITE; |
|
- | 251 | } |
|
- | 252 | ||
- | 253 | ret = intel_ring_begin(ring, 4); |
|
- | 254 | if (ret) |
|
- | 255 | return ret; |
|
- | 256 | ||
- | 257 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
|
- | 258 | intel_ring_emit(ring, flags); |
|
- | 259 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
|
- | 260 | intel_ring_emit(ring, 0); |
|
- | 261 | intel_ring_advance(ring); |
|
- | 262 | ||
- | 263 | return 0; |
|
- | 264 | } |
|
- | 265 | ||
- | 266 | static int |
|
- | 267 | gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) |
|
- | 268 | { |
|
- | 269 | int ret; |
|
- | 270 | ||
- | 271 | ret = intel_ring_begin(ring, 4); |
|
- | 272 | if (ret) |
|
- | 273 | return ret; |
|
- | 274 | ||
- | 275 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
|
- | 276 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | |
|
- | 277 | PIPE_CONTROL_STALL_AT_SCOREBOARD); |
|
- | 278 | intel_ring_emit(ring, 0); |
|
- | 279 | intel_ring_emit(ring, 0); |
|
- | 280 | intel_ring_advance(ring); |
|
- | 281 | ||
- | 282 | return 0; |
|
- | 283 | } |
|
- | 284 | ||
- | 285 | static int |
|
- | 286 | gen7_render_ring_flush(struct intel_ring_buffer *ring, |
|
- | 287 | u32 invalidate_domains, u32 flush_domains) |
|
- | 288 | { |
|
- | 289 | u32 flags = 0; |
|
- | 290 | struct pipe_control *pc = ring->private; |
|
- | 291 | u32 scratch_addr = pc->gtt_offset + 128; |
|
- | 292 | int ret; |
|
- | 293 | ||
- | 294 | /* |
|
- | 295 | * Ensure that any following seqno writes only happen when the render |
|
- | 296 | * cache is indeed flushed. |
|
- | 297 | * |
|
- | 298 | * Workaround: 4th PIPE_CONTROL command (except the ones with only |
|
- | 299 | * read-cache invalidate bits set) must have the CS_STALL bit set. We |
|
- | 300 | * don't try to be clever and just set it unconditionally. |
|
- | 301 | */ |
|
- | 302 | flags |= PIPE_CONTROL_CS_STALL; |
|
- | 303 | ||
- | 304 | /* Just flush everything. Experiments have shown that reducing the |
|
- | 305 | * number of bits based on the write domains has little performance |
|
- | 306 | * impact. |
|
- | 307 | */ |
|
228 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
308 | if (flush_domains) { |
- | 309 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
|
- | 310 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
|
- | 311 | } |
|
- | 312 | if (invalidate_domains) { |
|
- | 313 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
|
229 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
314 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
230 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
315 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |
231 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
316 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; |
- | 317 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
|
- | 318 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
|
- | 319 | /* |
|
- | 320 | * TLB invalidate requires a post-sync write. |
|
Line -... | Line 321... | ||
- | 321 | */ |
|
- | 322 | flags |= PIPE_CONTROL_QW_WRITE; |
|
- | 323 | ||
- | 324 | /* Workaround: we must issue a pipe_control with CS-stall bit |
|
- | 325 | * set before a pipe_control command that has the state cache |
|
- | 326 | * invalidate bit set. */ |
|
232 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; |
327 | gen7_render_ring_cs_stall_wa(ring); |
233 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; |
328 | } |
234 | 329 | ||
Line 235... | Line 330... | ||
235 | ret = intel_ring_begin(ring, 6); |
330 | ret = intel_ring_begin(ring, 4); |
236 | if (ret) |
331 | if (ret) |
237 | return ret; |
332 | return ret; |
238 | - | ||
239 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); |
- | |
240 | intel_ring_emit(ring, flags); |
333 | |
241 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
334 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
Line 242... | Line 335... | ||
242 | intel_ring_emit(ring, 0); /* lower dword */ |
335 | intel_ring_emit(ring, flags); |
243 | intel_ring_emit(ring, 0); /* uppwer dword */ |
336 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
Line 263... | Line 356... | ||
263 | return I915_READ(acthd_reg); |
356 | return I915_READ(acthd_reg); |
264 | } |
357 | } |
Line 265... | Line 358... | ||
265 | 358 | ||
266 | static int init_ring_common(struct intel_ring_buffer *ring) |
359 | static int init_ring_common(struct intel_ring_buffer *ring) |
- | 360 | { |
|
267 | { |
361 | struct drm_device *dev = ring->dev; |
268 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
362 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | 363 | struct drm_i915_gem_object *obj = ring->obj; |
|
269 | struct drm_i915_gem_object *obj = ring->obj; |
364 | int ret = 0; |
Line -... | Line 365... | ||
- | 365 | u32 head; |
|
- | 366 | ||
- | 367 | if (HAS_FORCE_WAKE(dev)) |
|
270 | u32 head; |
368 | gen6_gt_force_wake_get(dev_priv); |
271 | 369 | ||
272 | /* Stop the ring if it's running. */ |
370 | /* Stop the ring if it's running. */ |
273 | I915_WRITE_CTL(ring, 0); |
371 | I915_WRITE_CTL(ring, 0); |
Line 274... | Line -... | ||
274 | I915_WRITE_HEAD(ring, 0); |
- | |
275 | ring->write_tail(ring, 0); |
- | |
276 | 372 | I915_WRITE_HEAD(ring, 0); |
|
Line 277... | Line 373... | ||
277 | /* Initialize the ring. */ |
373 | ring->write_tail(ring, 0); |
278 | I915_WRITE_START(ring, obj->gtt_offset); |
374 | |
279 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
375 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
Line 299... | Line 395... | ||
299 | I915_READ_TAIL(ring), |
395 | I915_READ_TAIL(ring), |
300 | I915_READ_START(ring)); |
396 | I915_READ_START(ring)); |
301 | } |
397 | } |
302 | } |
398 | } |
Line -... | Line 399... | ||
- | 399 | ||
- | 400 | /* Initialize the ring. This must happen _after_ we've cleared the ring |
|
- | 401 | * registers with the above sequence (the readback of the HEAD registers |
|
- | 402 | * also enforces ordering), otherwise the hw might lose the new ring |
|
- | 403 | * register values. */ |
|
303 | 404 | I915_WRITE_START(ring, obj->gtt_offset); |
|
304 | I915_WRITE_CTL(ring, |
405 | I915_WRITE_CTL(ring, |
305 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
406 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) |
Line 306... | Line 407... | ||
306 | | RING_REPORT_64K | RING_VALID); |
407 | | RING_VALID); |
307 | 408 | ||
308 | /* If the head is still not zero, the ring is dead */ |
409 | /* If the head is still not zero, the ring is dead */ |
309 | if ((I915_READ_CTL(ring) & RING_VALID) == 0 || |
410 | if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && |
310 | I915_READ_START(ring) != obj->gtt_offset || |
411 | I915_READ_START(ring) == obj->gtt_offset && |
311 | (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { |
412 | (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { |
312 | DRM_ERROR("%s initialization failed " |
413 | DRM_ERROR("%s initialization failed " |
313 | "ctl %08x head %08x tail %08x start %08x\n", |
414 | "ctl %08x head %08x tail %08x start %08x\n", |
314 | ring->name, |
415 | ring->name, |
315 | I915_READ_CTL(ring), |
416 | I915_READ_CTL(ring), |
316 | I915_READ_HEAD(ring), |
417 | I915_READ_HEAD(ring), |
317 | I915_READ_TAIL(ring), |
418 | I915_READ_TAIL(ring), |
- | 419 | I915_READ_START(ring)); |
|
318 | I915_READ_START(ring)); |
420 | ret = -EIO; |
Line 319... | Line 421... | ||
319 | return -EIO; |
421 | goto out; |
320 | } |
422 | } |
321 | 423 | ||
- | 424 | ring->head = I915_READ_HEAD(ring); |
|
Line -... | Line 425... | ||
- | 425 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
|
- | 426 | ring->space = ring_space(ring); |
|
- | 427 | ring->last_retired_head = -1; |
|
Line 322... | Line 428... | ||
322 | ring->head = I915_READ_HEAD(ring); |
428 | |
323 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
429 | out: |
Line 324... | Line 430... | ||
324 | ring->space = ring_space(ring); |
430 | if (HAS_FORCE_WAKE(dev)) |
325 | 431 | gen6_gt_force_wake_put(dev_priv); |
|
326 | 432 | ||
Line 348... | Line 454... | ||
348 | goto err; |
454 | goto err; |
349 | } |
455 | } |
Line 350... | Line 456... | ||
350 | 456 | ||
Line 351... | Line 457... | ||
351 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
457 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
352 | 458 | ||
353 | ret = i915_gem_object_pin(obj, 4096, true); |
459 | ret = i915_gem_object_pin(obj, 4096, true, false); |
Line 354... | Line 460... | ||
354 | if (ret) |
460 | if (ret) |
355 | goto err_unref; |
461 | goto err_unref; |
356 | 462 | ||
357 | pc->gtt_offset = obj->gtt_offset; |
463 | pc->gtt_offset = obj->gtt_offset; |
Line 358... | Line 464... | ||
358 | pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW); |
464 | pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW); |
359 | if (pc->cpu_page == NULL) |
465 | if (pc->cpu_page == NULL) |
Line 395... | Line 501... | ||
395 | struct drm_device *dev = ring->dev; |
501 | struct drm_device *dev = ring->dev; |
396 | struct drm_i915_private *dev_priv = dev->dev_private; |
502 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | int ret = init_ring_common(ring); |
503 | int ret = init_ring_common(ring); |
Line 398... | Line 504... | ||
398 | 504 | ||
399 | if (INTEL_INFO(dev)->gen > 3) { |
505 | if (INTEL_INFO(dev)->gen > 3) { |
400 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
- | |
401 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
- | |
402 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
- | |
403 | I915_WRITE(MI_MODE, mode); |
506 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); |
404 | if (IS_GEN7(dev)) |
507 | if (IS_GEN7(dev)) |
405 | I915_WRITE(GFX_MODE_GEN7, |
508 | I915_WRITE(GFX_MODE_GEN7, |
406 | GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
509 | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | |
407 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
510 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
Line 408... | Line 511... | ||
408 | } |
511 | } |
409 | 512 | ||
410 | if (INTEL_INFO(dev)->gen >= 5) { |
513 | if (INTEL_INFO(dev)->gen >= 5) { |
411 | ret = init_pipe_control(ring); |
514 | ret = init_pipe_control(ring); |
412 | if (ret) |
515 | if (ret) |
Line 413... | Line 516... | ||
413 | return ret; |
516 | return ret; |
- | 517 | } |
|
- | 518 | ||
- | 519 | if (IS_GEN6(dev)) { |
|
- | 520 | /* From the Sandybridge PRM, volume 1 part 3, page 24: |
|
- | 521 | * "If this bit is set, STCunit will have LRA as replacement |
|
414 | } |
522 | * policy. [...] This bit must be reset. LRA replacement |
415 | 523 | * policy is not supported." |
|
- | 524 | */ |
|
- | 525 | I915_WRITE(CACHE_MODE_0, |
|
- | 526 | _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); |
|
- | 527 | ||
- | 528 | /* This is not explicitly set for GEN6, so read the register. |
|
- | 529 | * see intel_ring_mi_set_context() for why we care. |
|
- | 530 | * TODO: consider explicitly setting the bit for GEN5 |
|
416 | if (INTEL_INFO(dev)->gen >= 6) { |
531 | */ |
Line -... | Line 532... | ||
- | 532 | ring->itlb_before_ctx_switch = |
|
- | 533 | !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS); |
|
- | 534 | } |
|
- | 535 | ||
- | 536 | if (INTEL_INFO(dev)->gen >= 6) |
|
- | 537 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
|
417 | I915_WRITE(INSTPM, |
538 | |
418 | INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); |
539 | if (HAS_L3_GPU_CACHE(dev)) |
Line 419... | Line 540... | ||
419 | } |
540 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
420 | 541 | ||
Line 464... | Line 585... | ||
464 | return ret; |
585 | return ret; |
Line 465... | Line 586... | ||
465 | 586 | ||
466 | mbox1_reg = ring->signal_mbox[0]; |
587 | mbox1_reg = ring->signal_mbox[0]; |
Line 467... | Line 588... | ||
467 | mbox2_reg = ring->signal_mbox[1]; |
588 | mbox2_reg = ring->signal_mbox[1]; |
Line 468... | Line 589... | ||
468 | 589 | ||
469 | *seqno = i915_gem_get_seqno(ring->dev); |
590 | *seqno = i915_gem_next_request_seqno(ring); |
470 | 591 | ||
471 | update_mboxes(ring, *seqno, mbox1_reg); |
592 | update_mboxes(ring, *seqno, mbox1_reg); |
Line 485... | Line 606... | ||
485 | * @waiter - ring that is waiting |
606 | * @waiter - ring that is waiting |
486 | * @signaller - ring which has, or will signal |
607 | * @signaller - ring which has, or will signal |
487 | * @seqno - seqno which the waiter will block on |
608 | * @seqno - seqno which the waiter will block on |
488 | */ |
609 | */ |
489 | static int |
610 | static int |
490 | intel_ring_sync(struct intel_ring_buffer *waiter, |
611 | gen6_ring_sync(struct intel_ring_buffer *waiter, |
491 | struct intel_ring_buffer *signaller, |
612 | struct intel_ring_buffer *signaller, |
492 | int ring, |
- | |
493 | u32 seqno) |
613 | u32 seqno) |
494 | { |
614 | { |
495 | int ret; |
615 | int ret; |
496 | u32 dw1 = MI_SEMAPHORE_MBOX | |
616 | u32 dw1 = MI_SEMAPHORE_MBOX | |
497 | MI_SEMAPHORE_COMPARE | |
617 | MI_SEMAPHORE_COMPARE | |
498 | MI_SEMAPHORE_REGISTER; |
618 | MI_SEMAPHORE_REGISTER; |
Line -... | Line 619... | ||
- | 619 | ||
- | 620 | /* Throughout all of the GEM code, seqno passed implies our current |
|
- | 621 | * seqno is >= the last seqno executed. However for hardware the |
|
- | 622 | * comparison is strictly greater than. |
|
- | 623 | */ |
|
- | 624 | seqno -= 1; |
|
- | 625 | ||
- | 626 | WARN_ON(signaller->semaphore_register[waiter->id] == |
|
- | 627 | MI_SEMAPHORE_SYNC_INVALID); |
|
499 | 628 | ||
500 | ret = intel_ring_begin(waiter, 4); |
629 | ret = intel_ring_begin(waiter, 4); |
501 | if (ret) |
630 | if (ret) |
Line -... | Line 631... | ||
- | 631 | return ret; |
|
502 | return ret; |
632 | |
503 | 633 | intel_ring_emit(waiter, |
|
504 | intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); |
634 | dw1 | signaller->semaphore_register[waiter->id]); |
505 | intel_ring_emit(waiter, seqno); |
635 | intel_ring_emit(waiter, seqno); |
506 | intel_ring_emit(waiter, 0); |
636 | intel_ring_emit(waiter, 0); |
Line 507... | Line 637... | ||
507 | intel_ring_emit(waiter, MI_NOOP); |
637 | intel_ring_emit(waiter, MI_NOOP); |
508 | intel_ring_advance(waiter); |
638 | intel_ring_advance(waiter); |
Line 509... | Line -... | ||
509 | - | ||
510 | return 0; |
- | |
511 | } |
- | |
512 | - | ||
513 | /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */ |
- | |
514 | int |
- | |
515 | render_ring_sync_to(struct intel_ring_buffer *waiter, |
- | |
516 | struct intel_ring_buffer *signaller, |
- | |
517 | u32 seqno) |
- | |
518 | { |
- | |
519 | WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID); |
- | |
520 | return intel_ring_sync(waiter, |
- | |
521 | signaller, |
- | |
522 | RCS, |
- | |
523 | seqno); |
- | |
524 | } |
- | |
525 | - | ||
526 | /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */ |
- | |
527 | int |
- | |
528 | gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter, |
- | |
529 | struct intel_ring_buffer *signaller, |
- | |
530 | u32 seqno) |
- | |
531 | { |
- | |
532 | WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID); |
- | |
533 | return intel_ring_sync(waiter, |
- | |
534 | signaller, |
- | |
535 | VCS, |
- | |
536 | seqno); |
- | |
537 | } |
- | |
538 | - | ||
539 | /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */ |
- | |
540 | int |
- | |
541 | gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, |
- | |
542 | struct intel_ring_buffer *signaller, |
- | |
543 | u32 seqno) |
- | |
544 | { |
- | |
545 | WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID); |
- | |
546 | return intel_ring_sync(waiter, |
- | |
547 | signaller, |
- | |
548 | BCS, |
- | |
549 | seqno); |
- | |
550 | } |
639 | |
551 | 640 | return 0; |
|
552 | 641 | } |
|
553 | 642 | ||
554 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
643 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
Line 562... | Line 651... | ||
562 | 651 | ||
563 | static int |
652 | static int |
564 | pc_render_add_request(struct intel_ring_buffer *ring, |
653 | pc_render_add_request(struct intel_ring_buffer *ring, |
565 | u32 *result) |
654 | u32 *result) |
566 | { |
- | |
567 | struct drm_device *dev = ring->dev; |
655 | { |
568 | u32 seqno = i915_gem_get_seqno(dev); |
656 | u32 seqno = i915_gem_next_request_seqno(ring); |
569 | struct pipe_control *pc = ring->private; |
657 | struct pipe_control *pc = ring->private; |
570 | u32 scratch_addr = pc->gtt_offset + 128; |
658 | u32 scratch_addr = pc->gtt_offset + 128; |
Line 571... | Line 659... | ||
571 | int ret; |
659 | int ret; |
Line 597... | Line 685... | ||
597 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
685 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
598 | scratch_addr += 128; |
686 | scratch_addr += 128; |
599 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
687 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
600 | scratch_addr += 128; |
688 | scratch_addr += 128; |
601 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
689 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
- | 690 | ||
602 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
691 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
603 | PIPE_CONTROL_WRITE_FLUSH | |
692 | PIPE_CONTROL_WRITE_FLUSH | |
604 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
693 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
605 | PIPE_CONTROL_NOTIFY); |
694 | PIPE_CONTROL_NOTIFY); |
606 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
695 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
Line 610... | Line 699... | ||
610 | 699 | ||
611 | *result = seqno; |
700 | *result = seqno; |
612 | return 0; |
701 | return 0; |
Line 613... | Line -... | ||
613 | } |
- | |
614 | - | ||
615 | static int |
- | |
616 | render_ring_add_request(struct intel_ring_buffer *ring, |
- | |
617 | u32 *result) |
- | |
618 | { |
- | |
619 | struct drm_device *dev = ring->dev; |
- | |
620 | u32 seqno = i915_gem_get_seqno(dev); |
- | |
621 | int ret; |
- | |
622 | - | ||
623 | ret = intel_ring_begin(ring, 4); |
- | |
624 | if (ret) |
- | |
625 | return ret; |
- | |
626 | - | ||
627 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
- | |
628 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
- | |
629 | intel_ring_emit(ring, seqno); |
- | |
630 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
- | |
631 | intel_ring_advance(ring); |
- | |
632 | - | ||
633 | *result = seqno; |
- | |
634 | return 0; |
- | |
635 | } |
702 | } |
636 | 703 | ||
637 | static u32 |
704 | static u32 |
638 | gen6_ring_get_seqno(struct intel_ring_buffer *ring) |
- | |
639 | { |
- | |
640 | struct drm_device *dev = ring->dev; |
705 | gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
641 | 706 | { |
|
642 | /* Workaround to force correct ordering between irq and seqno writes on |
707 | /* Workaround to force correct ordering between irq and seqno writes on |
643 | * ivb (and maybe also on snb) by reading from a CS register (like |
708 | * ivb (and maybe also on snb) by reading from a CS register (like |
644 | * ACTHD) before reading the status page. */ |
709 | * ACTHD) before reading the status page. */ |
645 | if (IS_GEN7(dev)) |
710 | if (!lazy_coherency) |
646 | intel_ring_get_active_head(ring); |
711 | intel_ring_get_active_head(ring); |
Line 647... | Line 712... | ||
647 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
712 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
648 | } |
713 | } |
649 | 714 | ||
650 | static u32 |
715 | static u32 |
651 | ring_get_seqno(struct intel_ring_buffer *ring) |
716 | ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
Line 652... | Line 717... | ||
652 | { |
717 | { |
653 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
718 | return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
654 | } |
719 | } |
655 | 720 | ||
656 | static u32 |
721 | static u32 |
657 | pc_render_get_seqno(struct intel_ring_buffer *ring) |
722 | pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
Line 658... | Line 723... | ||
658 | { |
723 | { |
659 | struct pipe_control *pc = ring->private; |
724 | struct pipe_control *pc = ring->private; |
660 | return pc->cpu_page[0]; |
725 | return pc->cpu_page[0]; |
- | 726 | } |
|
- | 727 | ||
- | 728 | static bool |
|
- | 729 | gen5_ring_get_irq(struct intel_ring_buffer *ring) |
|
- | 730 | { |
|
- | 731 | struct drm_device *dev = ring->dev; |
|
- | 732 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
- | 733 | unsigned long flags; |
|
- | 734 | ||
661 | } |
735 | if (!dev->irq_enabled) |
662 | 736 | return false; |
|
663 | static void |
737 | |
664 | ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
738 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
- | 739 | if (ring->irq_refcount++ == 0) { |
|
- | 740 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; |
|
- | 741 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
|
- | 742 | POSTING_READ(GTIMR); |
|
Line 665... | Line 743... | ||
665 | { |
743 | } |
666 | dev_priv->gt_irq_mask &= ~mask; |
744 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
667 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
745 | |
- | 746 | return true; |
|
- | 747 | } |
|
- | 748 | ||
- | 749 | static void |
|
- | 750 | gen5_ring_put_irq(struct intel_ring_buffer *ring) |
|
- | 751 | { |
|
668 | POSTING_READ(GTIMR); |
752 | struct drm_device *dev = ring->dev; |
669 | } |
753 | drm_i915_private_t *dev_priv = dev->dev_private; |
670 | 754 | unsigned long flags; |
|
671 | static void |
755 | |
- | 756 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
|
- | 757 | if (--ring->irq_refcount == 0) { |
|
Line 672... | Line 758... | ||
672 | ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
758 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; |
673 | { |
759 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
674 | dev_priv->gt_irq_mask |= mask; |
760 | POSTING_READ(GTIMR); |
- | 761 | } |
|
- | 762 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
|
- | 763 | } |
|
- | 764 | ||
- | 765 | static bool |
|
- | 766 | i9xx_ring_get_irq(struct intel_ring_buffer *ring) |
|
- | 767 | { |
|
- | 768 | struct drm_device *dev = ring->dev; |
|
- | 769 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
675 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
770 | unsigned long flags; |
676 | POSTING_READ(GTIMR); |
771 | |
677 | } |
772 | if (!dev->irq_enabled) |
678 | 773 | return false; |
|
- | 774 | ||
- | 775 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
|
- | 776 | if (ring->irq_refcount++ == 0) { |
|
- | 777 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
|
Line 679... | Line 778... | ||
679 | static void |
778 | I915_WRITE(IMR, dev_priv->irq_mask); |
680 | i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) |
779 | POSTING_READ(IMR); |
681 | { |
780 | } |
- | 781 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
|
- | 782 | ||
- | 783 | return true; |
|
- | 784 | } |
|
- | 785 | ||
- | 786 | static void |
|
682 | dev_priv->irq_mask &= ~mask; |
787 | i9xx_ring_put_irq(struct intel_ring_buffer *ring) |
683 | I915_WRITE(IMR, dev_priv->irq_mask); |
788 | { |
684 | POSTING_READ(IMR); |
789 | struct drm_device *dev = ring->dev; |
685 | } |
790 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | 791 | unsigned long flags; |
|
- | 792 | ||
Line 686... | Line 793... | ||
686 | 793 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
|
687 | static void |
794 | if (--ring->irq_refcount == 0) { |
688 | i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) |
795 | dev_priv->irq_mask |= ring->irq_enable_mask; |
689 | { |
796 | I915_WRITE(IMR, dev_priv->irq_mask); |
690 | dev_priv->irq_mask |= mask; |
797 | POSTING_READ(IMR); |
- | 798 | } |
|
Line 691... | Line 799... | ||
691 | I915_WRITE(IMR, dev_priv->irq_mask); |
799 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
692 | POSTING_READ(IMR); |
800 | } |
Line 693... | Line 801... | ||
693 | } |
801 | |
694 | 802 | static bool |
|
695 | static bool |
- | |
696 | render_ring_get_irq(struct intel_ring_buffer *ring) |
803 | i8xx_ring_get_irq(struct intel_ring_buffer *ring) |
697 | { |
804 | { |
698 | struct drm_device *dev = ring->dev; |
- | |
699 | drm_i915_private_t *dev_priv = dev->dev_private; |
805 | struct drm_device *dev = ring->dev; |
700 | 806 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
701 | if (!dev->irq_enabled) |
807 | unsigned long flags; |
Line 702... | Line 808... | ||
702 | return false; |
808 | |
703 | 809 | if (!dev->irq_enabled) |
|
Line 704... | Line 810... | ||
704 | spin_lock(&ring->irq_lock); |
810 | return false; |
705 | if (ring->irq_refcount++ == 0) { |
811 | |
706 | if (HAS_PCH_SPLIT(dev)) |
812 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
707 | ironlake_enable_irq(dev_priv, |
813 | if (ring->irq_refcount++ == 0) { |
708 | GT_PIPE_NOTIFY | GT_USER_INTERRUPT); |
814 | dev_priv->irq_mask &= ~ring->irq_enable_mask; |
- | 815 | I915_WRITE16(IMR, dev_priv->irq_mask); |
|
Line 709... | Line 816... | ||
709 | else |
816 | POSTING_READ16(IMR); |
710 | i915_enable_irq(dev_priv, I915_USER_INTERRUPT); |
817 | } |
711 | } |
- | |
712 | spin_unlock(&ring->irq_lock); |
818 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
713 | 819 | ||
714 | return true; |
820 | return true; |
715 | } |
- | |
716 | - | ||
717 | static void |
821 | } |
718 | render_ring_put_irq(struct intel_ring_buffer *ring) |
822 | |
719 | { |
823 | static void |
Line 720... | Line 824... | ||
720 | struct drm_device *dev = ring->dev; |
824 | i8xx_ring_put_irq(struct intel_ring_buffer *ring) |
721 | drm_i915_private_t *dev_priv = dev->dev_private; |
825 | { |
722 | 826 | struct drm_device *dev = ring->dev; |
|
Line 741... | Line 845... | ||
741 | /* The ring status page addresses are no longer next to the rest of |
845 | /* The ring status page addresses are no longer next to the rest of |
742 | * the ring registers as of gen7. |
846 | * the ring registers as of gen7. |
743 | */ |
847 | */ |
744 | if (IS_GEN7(dev)) { |
848 | if (IS_GEN7(dev)) { |
745 | switch (ring->id) { |
849 | switch (ring->id) { |
746 | case RING_RENDER: |
850 | case RCS: |
747 | mmio = RENDER_HWS_PGA_GEN7; |
851 | mmio = RENDER_HWS_PGA_GEN7; |
748 | break; |
852 | break; |
749 | case RING_BLT: |
853 | case BCS: |
750 | mmio = BLT_HWS_PGA_GEN7; |
854 | mmio = BLT_HWS_PGA_GEN7; |
751 | break; |
855 | break; |
752 | case RING_BSD: |
856 | case VCS: |
753 | mmio = BSD_HWS_PGA_GEN7; |
857 | mmio = BSD_HWS_PGA_GEN7; |
754 | break; |
858 | break; |
755 | } |
859 | } |
756 | } else if (IS_GEN6(ring->dev)) { |
860 | } else if (IS_GEN6(ring->dev)) { |
757 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
861 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); |
Line 779... | Line 883... | ||
779 | intel_ring_advance(ring); |
883 | intel_ring_advance(ring); |
780 | return 0; |
884 | return 0; |
781 | } |
885 | } |
Line 782... | Line 886... | ||
782 | 886 | ||
783 | static int |
887 | static int |
784 | ring_add_request(struct intel_ring_buffer *ring, |
888 | i9xx_add_request(struct intel_ring_buffer *ring, |
785 | u32 *result) |
889 | u32 *result) |
786 | { |
890 | { |
787 | u32 seqno; |
891 | u32 seqno; |
Line 788... | Line 892... | ||
788 | int ret; |
892 | int ret; |
789 | 893 | ||
790 | ret = intel_ring_begin(ring, 4); |
894 | ret = intel_ring_begin(ring, 4); |
Line 791... | Line 895... | ||
791 | if (ret) |
895 | if (ret) |
Line 792... | Line 896... | ||
792 | return ret; |
896 | return ret; |
793 | 897 | ||
794 | seqno = i915_gem_get_seqno(ring->dev); |
898 | seqno = i915_gem_next_request_seqno(ring); |
795 | 899 | ||
Line 802... | Line 906... | ||
802 | *result = seqno; |
906 | *result = seqno; |
803 | return 0; |
907 | return 0; |
804 | } |
908 | } |
Line 805... | Line 909... | ||
805 | 909 | ||
806 | static bool |
910 | static bool |
807 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
911 | gen6_ring_get_irq(struct intel_ring_buffer *ring) |
808 | { |
912 | { |
809 | struct drm_device *dev = ring->dev; |
913 | struct drm_device *dev = ring->dev; |
- | 914 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
Line 810... | Line 915... | ||
810 | drm_i915_private_t *dev_priv = dev->dev_private; |
915 | unsigned long flags; |
811 | 916 | ||
Line 812... | Line 917... | ||
812 | if (!dev->irq_enabled) |
917 | if (!dev->irq_enabled) |
813 | return false; |
918 | return false; |
814 | 919 | ||
815 | /* It looks like we need to prevent the gt from suspending while waiting |
- | |
816 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
920 | /* It looks like we need to prevent the gt from suspending while waiting |
Line 817... | Line 921... | ||
817 | * blt/bsd rings on ivb. */ |
921 | * for an notifiy irq, otherwise irqs seem to get lost on at least the |
818 | if (IS_GEN7(dev)) |
922 | * blt/bsd rings on ivb. */ |
- | 923 | gen6_gt_force_wake_get(dev_priv); |
|
- | 924 | ||
819 | gen6_gt_force_wake_get(dev_priv); |
925 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
- | 926 | if (ring->irq_refcount++ == 0) { |
|
820 | 927 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
|
- | 928 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | |
|
821 | spin_lock(&ring->irq_lock); |
929 | GEN6_RENDER_L3_PARITY_ERROR)); |
- | 930 | else |
|
822 | if (ring->irq_refcount++ == 0) { |
931 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
823 | ring->irq_mask &= ~rflag; |
932 | dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; |
Line 824... | Line 933... | ||
824 | I915_WRITE_IMR(ring, ring->irq_mask); |
933 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
825 | ironlake_enable_irq(dev_priv, gflag); |
934 | POSTING_READ(GTIMR); |
Line 826... | Line 935... | ||
826 | } |
935 | } |
827 | spin_unlock(&ring->irq_lock); |
936 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
828 | 937 | ||
829 | return true; |
938 | return true; |
830 | } |
939 | } |
- | 940 | ||
Line 831... | Line 941... | ||
831 | 941 | static void |
|
832 | static void |
942 | gen6_ring_put_irq(struct intel_ring_buffer *ring) |
833 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
- | |
834 | { |
943 | { |
835 | struct drm_device *dev = ring->dev; |
- | |
836 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
837 | - | ||
838 | spin_lock(&ring->irq_lock); |
- | |
839 | if (--ring->irq_refcount == 0) { |
- | |
840 | ring->irq_mask |= rflag; |
- | |
841 | I915_WRITE_IMR(ring, ring->irq_mask); |
- | |
842 | ironlake_disable_irq(dev_priv, gflag); |
- | |
843 | } |
- | |
844 | spin_unlock(&ring->irq_lock); |
- | |
845 | - | ||
846 | if (IS_GEN7(dev)) |
- | |
847 | gen6_gt_force_wake_put(dev_priv); |
- | |
848 | } |
- | |
849 | - | ||
850 | static bool |
- | |
851 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
- | |
852 | { |
- | |
853 | struct drm_device *dev = ring->dev; |
- | |
854 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
855 | 944 | struct drm_device *dev = ring->dev; |
|
856 | if (!dev->irq_enabled) |
945 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | 946 | unsigned long flags; |
|
857 | return false; |
947 | |
858 | - | ||
859 | spin_lock(&ring->irq_lock); |
948 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
860 | if (ring->irq_refcount++ == 0) { |
- | |
861 | if (IS_G4X(dev)) |
949 | if (--ring->irq_refcount == 0) { |
862 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
950 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
863 | else |
- | |
864 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
- | |
865 | } |
- | |
866 | spin_unlock(&ring->irq_lock); |
- | |
867 | 951 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
|
Line 868... | Line -... | ||
868 | return true; |
- | |
869 | } |
- | |
870 | static void |
- | |
871 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
- | |
872 | { |
- | |
873 | struct drm_device *dev = ring->dev; |
- | |
874 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
875 | 952 | else |
|
876 | spin_lock(&ring->irq_lock); |
953 | I915_WRITE_IMR(ring, ~0); |
Line 877... | Line 954... | ||
877 | if (--ring->irq_refcount == 0) { |
954 | dev_priv->gt_irq_mask |= ring->irq_enable_mask; |
878 | if (IS_G4X(dev)) |
955 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
879 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
956 | POSTING_READ(GTIMR); |
880 | else |
957 | } |
Line 881... | Line 958... | ||
881 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); |
958 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
882 | } |
959 | |
883 | spin_unlock(&ring->irq_lock); |
960 | gen6_gt_force_wake_put(dev_priv); |
Line 884... | Line 961... | ||
884 | } |
961 | } |
885 | 962 | ||
- | 963 | static int |
|
886 | static int |
964 | i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
887 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
965 | { |
888 | { |
966 | int ret; |
Line 889... | Line 967... | ||
889 | int ret; |
967 | |
890 | 968 | ret = intel_ring_begin(ring, 2); |
|
Line 891... | Line 969... | ||
891 | ret = intel_ring_begin(ring, 2); |
969 | if (ret) |
892 | if (ret) |
970 | return ret; |
893 | return ret; |
971 | |
894 | 972 | intel_ring_emit(ring, |
|
895 | intel_ring_emit(ring, |
- | |
896 | MI_BATCH_BUFFER_START | (2 << 6) | |
973 | MI_BATCH_BUFFER_START | |
Line 897... | Line -... | ||
897 | MI_BATCH_NON_SECURE_I965); |
- | |
898 | intel_ring_emit(ring, offset); |
974 | MI_BATCH_GTT | |
899 | intel_ring_advance(ring); |
975 | MI_BATCH_NON_SECURE_I965); |
900 | 976 | intel_ring_emit(ring, offset); |
|
Line 901... | Line 977... | ||
901 | return 0; |
977 | intel_ring_advance(ring); |
902 | } |
978 | |
903 | 979 | return 0; |
|
904 | static int |
980 | } |
- | 981 | ||
- | 982 | static int |
|
- | 983 | i830_dispatch_execbuffer(struct intel_ring_buffer *ring, |
|
- | 984 | u32 offset, u32 len) |
|
- | 985 | { |
|
- | 986 | int ret; |
|
- | 987 | ||
- | 988 | ret = intel_ring_begin(ring, 4); |
|
- | 989 | if (ret) |
|
905 | render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, |
990 | return ret; |
- | 991 | ||
906 | u32 offset, u32 len) |
992 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
907 | { |
993 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
908 | struct drm_device *dev = ring->dev; |
994 | intel_ring_emit(ring, offset + len - 8); |
Line 909... | Line -... | ||
909 | int ret; |
- | |
910 | - | ||
911 | if (IS_I830(dev) || IS_845G(dev)) { |
- | |
912 | ret = intel_ring_begin(ring, 4); |
- | |
913 | if (ret) |
- | |
914 | return ret; |
- | |
915 | - | ||
916 | intel_ring_emit(ring, MI_BATCH_BUFFER); |
995 | intel_ring_emit(ring, 0); |
917 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
996 | intel_ring_advance(ring); |
918 | intel_ring_emit(ring, offset + len - 8); |
- | |
919 | intel_ring_emit(ring, 0); |
- | |
920 | } else { |
997 | |
Line 921... | Line 998... | ||
921 | ret = intel_ring_begin(ring, 2); |
998 | return 0; |
922 | if (ret) |
999 | } |
Line 923... | Line 1000... | ||
923 | return ret; |
1000 | |
924 | 1001 | static int |
|
925 | if (INTEL_INFO(dev)->gen >= 4) { |
- | |
926 | intel_ring_emit(ring, |
1002 | i915_dispatch_execbuffer(struct intel_ring_buffer *ring, |
Line 927... | Line 1003... | ||
927 | MI_BATCH_BUFFER_START | (2 << 6) | |
1003 | u32 offset, u32 len) |
928 | MI_BATCH_NON_SECURE_I965); |
1004 | { |
929 | intel_ring_emit(ring, offset); |
1005 | int ret; |
Line 930... | Line 1006... | ||
930 | } else { |
1006 | |
931 | intel_ring_emit(ring, |
1007 | ret = intel_ring_begin(ring, 2); |
932 | MI_BATCH_BUFFER_START | (2 << 6)); |
1008 | if (ret) |
933 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
1009 | return ret; |
934 | } |
- | |
935 | } |
- | |
936 | intel_ring_advance(ring); |
1010 | |
Line 937... | Line 1011... | ||
937 | 1011 | intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); |
|
938 | return 0; |
1012 | intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); |
939 | } |
1013 | intel_ring_advance(ring); |
940 | - | ||
941 | static void cleanup_status_page(struct intel_ring_buffer *ring) |
1014 | |
942 | { |
1015 | return 0; |
Line 943... | Line 1016... | ||
943 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1016 | } |
944 | struct drm_i915_gem_object *obj; |
1017 | |
Line 969... | Line 1042... | ||
969 | goto err; |
1042 | goto err; |
970 | } |
1043 | } |
Line 971... | Line 1044... | ||
971 | 1044 | ||
Line 972... | Line 1045... | ||
972 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
1045 | i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
973 | 1046 | ||
974 | ret = i915_gem_object_pin(obj, 4096, true); |
1047 | ret = i915_gem_object_pin(obj, 4096, true, false); |
975 | if (ret != 0) { |
1048 | if (ret != 0) { |
Line 976... | Line 1049... | ||
976 | goto err_unref; |
1049 | goto err_unref; |
977 | } |
1050 | } |
978 | 1051 | ||
979 | ring->status_page.gfx_addr = obj->gtt_offset; |
1052 | ring->status_page.gfx_addr = obj->gtt_offset; |
980 | ring->status_page.page_addr = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW); |
1053 | ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW); |
981 | if (ring->status_page.page_addr == NULL) { |
1054 | if (ring->status_page.page_addr == NULL) { |
982 | memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
1055 | ret = -ENOMEM; |
983 | goto err_unpin; |
1056 | goto err_unpin; |
Line 997... | Line 1070... | ||
997 | drm_gem_object_unreference(&obj->base); |
1070 | drm_gem_object_unreference(&obj->base); |
998 | err: |
1071 | err: |
999 | return ret; |
1072 | return ret; |
1000 | } |
1073 | } |
Line 1001... | Line 1074... | ||
1001 | 1074 | ||
1002 | int intel_init_ring_buffer(struct drm_device *dev, |
1075 | static int intel_init_ring_buffer(struct drm_device *dev, |
1003 | struct intel_ring_buffer *ring) |
1076 | struct intel_ring_buffer *ring) |
1004 | { |
1077 | { |
- | 1078 | struct drm_i915_gem_object *obj; |
|
1005 | struct drm_i915_gem_object *obj; |
1079 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 1006... | Line 1080... | ||
1006 | int ret; |
1080 | int ret; |
1007 | 1081 | ||
1008 | ring->dev = dev; |
1082 | ring->dev = dev; |
1009 | INIT_LIST_HEAD(&ring->active_list); |
1083 | INIT_LIST_HEAD(&ring->active_list); |
Line 1010... | Line 1084... | ||
1010 | INIT_LIST_HEAD(&ring->request_list); |
1084 | INIT_LIST_HEAD(&ring->request_list); |
1011 | INIT_LIST_HEAD(&ring->gpu_write_list); |
- | |
1012 | - | ||
Line 1013... | Line 1085... | ||
1013 | init_waitqueue_head(&ring->irq_queue); |
1085 | ring->size = 32 * PAGE_SIZE; |
1014 | spin_lock_init(&ring->irq_lock); |
1086 | |
1015 | ring->irq_mask = ~0; |
1087 | init_waitqueue_head(&ring->irq_queue); |
1016 | 1088 | ||
Line 1027... | Line 1099... | ||
1027 | goto err_hws; |
1099 | goto err_hws; |
1028 | } |
1100 | } |
Line 1029... | Line 1101... | ||
1029 | 1101 | ||
Line 1030... | Line 1102... | ||
1030 | ring->obj = obj; |
1102 | ring->obj = obj; |
1031 | 1103 | ||
1032 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true); |
1104 | ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); |
Line 1033... | Line -... | ||
1033 | if (ret) |
- | |
1034 | goto err_unref; |
1105 | if (ret) |
1035 | 1106 | goto err_unref; |
|
1036 | ring->map.size = ring->size; |
- | |
1037 | ring->map.offset = get_bus_addr() + obj->gtt_offset; |
1107 | |
1038 | ring->map.type = 0; |
- | |
1039 | ring->map.flags = 0; |
- | |
1040 | ring->map.mtrr = 0; |
- | |
1041 | - | ||
Line -... | Line 1108... | ||
- | 1108 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
|
- | 1109 | if (ret) |
|
- | 1110 | goto err_unpin; |
|
1042 | // drm_core_ioremap_wc(&ring->map, dev); |
1111 | |
1043 | 1112 | ring->virtual_start = |
|
1044 | ring->map.handle = ioremap(ring->map.offset, ring->map.size); |
1113 | ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, |
1045 | 1114 | ring->size); |
|
1046 | if (ring->map.handle == NULL) { |
1115 | if (ring->virtual_start == NULL) { |
Line 1047... | Line -... | ||
1047 | DRM_ERROR("Failed to map ringbuffer.\n"); |
- | |
1048 | ret = -EINVAL; |
1116 | DRM_ERROR("Failed to map ringbuffer.\n"); |
1049 | goto err_unpin; |
1117 | ret = -EINVAL; |
1050 | } |
1118 | goto err_unpin; |
Line 1051... | Line 1119... | ||
1051 | 1119 | } |
|
1052 | ring->virtual_start = ring->map.handle; |
1120 | |
1053 | ret = ring->init(ring); |
1121 | ret = ring->init(ring); |
1054 | if (ret) |
1122 | if (ret) |
1055 | goto err_unmap; |
1123 | goto err_unmap; |
1056 | 1124 | ||
1057 | /* Workaround an erratum on the i830 which causes a hang if |
1125 | /* Workaround an erratum on the i830 which causes a hang if |
Line 1058... | Line 1126... | ||
1058 | * the TAIL pointer points to within the last 2 cachelines |
1126 | * the TAIL pointer points to within the last 2 cachelines |
Line 1059... | Line 1127... | ||
1059 | * of the buffer. |
1127 | * of the buffer. |
Line 1105... | Line 1173... | ||
1105 | // cleanup_status_page(ring); |
1173 | // cleanup_status_page(ring); |
1106 | } |
1174 | } |
Line 1107... | Line 1175... | ||
1107 | 1175 | ||
1108 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
1176 | static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
1109 | { |
1177 | { |
1110 | unsigned int *virt; |
1178 | uint32_t __iomem *virt; |
Line 1111... | Line 1179... | ||
1111 | int rem = ring->size - ring->tail; |
1179 | int rem = ring->size - ring->tail; |
Line 1112... | Line 1180... | ||
1112 | 1180 | ||
1113 | ENTER(); |
1181 | ENTER(); |
1114 | 1182 | ||
1115 | if (ring->space < rem) { |
1183 | if (ring->space < rem) { |
1116 | int ret = intel_wait_ring_buffer(ring, rem); |
1184 | int ret = intel_wait_ring_buffer(ring, rem); |
Line 1117... | Line 1185... | ||
1117 | if (ret) |
1185 | if (ret) |
1118 | return ret; |
1186 | return ret; |
1119 | } |
1187 | } |
1120 | - | ||
1121 | virt = (unsigned int *)(ring->virtual_start + ring->tail); |
1188 | |
1122 | rem /= 8; |
- | |
Line 1123... | Line 1189... | ||
1123 | while (rem--) { |
1189 | virt = ring->virtual_start + ring->tail; |
1124 | *virt++ = MI_NOOP; |
1190 | rem /= 4; |
Line 1125... | Line 1191... | ||
1125 | *virt++ = MI_NOOP; |
1191 | while (rem--) |
1126 | } |
1192 | iowrite32(MI_NOOP, virt++); |
1127 | 1193 | ||
Line 1128... | Line 1194... | ||
1128 | ring->tail = 0; |
1194 | ring->tail = 0; |
1129 | ring->space = ring_space(ring); |
1195 | ring->space = ring_space(ring); |
1130 | - | ||
1131 | LEAVE(); |
- | |
1132 | return 0; |
- | |
1133 | } |
1196 | |
Line -... | Line 1197... | ||
- | 1197 | LEAVE(); |
|
- | 1198 | return 0; |
|
- | 1199 | } |
|
- | 1200 | ||
- | 1201 | static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) |
|
- | 1202 | { |
|
- | 1203 | int ret; |
|
1134 | 1204 | ||
- | 1205 | ret = i915_wait_seqno(ring, seqno); |
|
1135 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
1206 | if (!ret) |
- | 1207 | i915_gem_retire_requests_ring(ring); |
|
1136 | { |
1208 | |
- | 1209 | return ret; |
|
1137 | struct drm_device *dev = ring->dev; |
1210 | } |
- | 1211 | ||
1138 | struct drm_i915_private *dev_priv = dev->dev_private; |
1212 | static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) |
1139 | unsigned long end; |
1213 | { |
- | 1214 | struct drm_i915_gem_request *request; |
|
1140 | u32 head; |
1215 | u32 seqno = 0; |
1141 | 1216 | int ret; |
|
1142 | /* If the reported head position has wrapped or hasn't advanced, |
1217 | |
1143 | * fallback to the slow and accurate path. |
1218 | i915_gem_retire_requests_ring(ring); |
Line -... | Line 1219... | ||
- | 1219 | ||
- | 1220 | if (ring->last_retired_head != -1) { |
|
- | 1221 | ring->head = ring->last_retired_head; |
|
- | 1222 | ring->last_retired_head = -1; |
|
- | 1223 | ring->space = ring_space(ring); |
|
- | 1224 | if (ring->space >= n) |
|
- | 1225 | return 0; |
|
- | 1226 | } |
|
- | 1227 | ||
- | 1228 | list_for_each_entry(request, &ring->request_list, list) { |
|
- | 1229 | int space; |
|
- | 1230 | ||
- | 1231 | if (request->tail == -1) |
|
- | 1232 | continue; |
|
- | 1233 | ||
- | 1234 | space = request->tail - (ring->tail + 8); |
|
- | 1235 | if (space < 0) |
|
- | 1236 | space += ring->size; |
|
- | 1237 | if (space >= n) { |
|
- | 1238 | seqno = request->seqno; |
|
- | 1239 | break; |
|
- | 1240 | } |
|
- | 1241 | ||
- | 1242 | /* Consume this request in case we need more space than |
|
- | 1243 | * is available and so need to prevent a race between |
|
- | 1244 | * updating last_retired_head and direct reads of |
|
- | 1245 | * I915_RING_HEAD. It also provides a nice sanity check. |
|
- | 1246 | */ |
|
- | 1247 | request->tail = -1; |
|
- | 1248 | } |
|
- | 1249 | ||
- | 1250 | if (seqno == 0) |
|
- | 1251 | return -ENOSPC; |
|
- | 1252 | ||
- | 1253 | ret = intel_ring_wait_seqno(ring, seqno); |
|
- | 1254 | if (ret) |
|
- | 1255 | return ret; |
|
- | 1256 | ||
- | 1257 | if (WARN_ON(ring->last_retired_head == -1)) |
|
- | 1258 | return -ENOSPC; |
|
- | 1259 | ||
- | 1260 | ring->head = ring->last_retired_head; |
|
- | 1261 | ring->last_retired_head = -1; |
|
- | 1262 | ring->space = ring_space(ring); |
|
- | 1263 | if (WARN_ON(ring->space < n)) |
|
- | 1264 | return -ENOSPC; |
|
- | 1265 | ||
- | 1266 | return 0; |
|
- | 1267 | } |
|
- | 1268 | ||
- | 1269 | int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) |
|
- | 1270 | { |
|
- | 1271 | struct drm_device *dev = ring->dev; |
|
- | 1272 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 1273 | unsigned long end; |
|
- | 1274 | int ret; |
|
- | 1275 | ||
- | 1276 | ret = intel_ring_wait_request(ring, n); |
|
- | 1277 | if (ret != -ENOSPC) |
|
Line 1144... | Line -... | ||
1144 | */ |
- | |
1145 | head = intel_read_status_page(ring, 4); |
1278 | return ret; |
1146 | if (head > ring->head) { |
1279 | |
1147 | ring->head = head; |
1280 | |
1148 | ring->space = ring_space(ring); |
1281 | /* With GEM the hangcheck timer should kick us out of the loop, |
1149 | if (ring->space >= n) |
1282 | * leaving it early runs the risk of corrupting GEM state (due |
1150 | return 0; |
1283 | * to running on almost untested codepaths). But on resume |
1151 | } |
1284 | * timers don't work yet, so prevent a complete hang in that |
Line 1152... | Line 1285... | ||
1152 | 1285 | * case by choosing an insanely large timeout. */ |
|
- | 1286 | end = GetTimerTicks() + 60 * HZ; |
|
1153 | 1287 | ||
- | 1288 | do { |
|
1154 | end = jiffies + 3 * HZ; |
1289 | ring->head = I915_READ_HEAD(ring); |
1155 | do { |
1290 | ring->space = ring_space(ring); |
1156 | ring->head = I915_READ_HEAD(ring); |
1291 | if (ring->space >= n) { |
1157 | ring->space = ring_space(ring); |
1292 | trace_i915_ring_wait_end(ring); |
1158 | if (ring->space >= n) { |
1293 | return 0; |
Line 1159... | Line 1294... | ||
1159 | trace_i915_ring_wait_end(ring); |
1294 | } |
1160 | return 0; |
1295 | |
1161 | } |
1296 | msleep(1); |
1162 | 1297 | ||
1163 | msleep(1); |
1298 | ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
1164 | if (atomic_read(&dev_priv->mm.wedged)) |
1299 | if (ret) |
Line 1165... | Line 1300... | ||
1165 | return -EAGAIN; |
1300 | return ret; |
- | 1301 | } while (!time_after(GetTimerTicks(), end)); |
|
1166 | } while (!time_after(jiffies, end)); |
1302 | trace_i915_ring_wait_end(ring); |
Line 1167... | Line 1303... | ||
1167 | trace_i915_ring_wait_end(ring); |
1303 | return -EBUSY; |
1168 | return -EBUSY; |
1304 | } |
1169 | } |
1305 | |
1170 | 1306 | int intel_ring_begin(struct intel_ring_buffer *ring, |
|
Line 1194... | Line 1330... | ||
1194 | return 0; |
1330 | return 0; |
1195 | } |
1331 | } |
Line 1196... | Line 1332... | ||
1196 | 1332 | ||
1197 | void intel_ring_advance(struct intel_ring_buffer *ring) |
1333 | void intel_ring_advance(struct intel_ring_buffer *ring) |
- | 1334 | { |
|
- | 1335 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
|
1198 | { |
1336 | |
- | 1337 | ring->tail &= ring->size - 1; |
|
- | 1338 | if (dev_priv->stop_rings & intel_ring_flag(ring)) |
|
1199 | ring->tail &= ring->size - 1; |
1339 | return; |
1200 | ring->write_tail(ring, ring->tail); |
1340 | ring->write_tail(ring, ring->tail); |
Line 1201... | Line -... | ||
1201 | } |
- | |
1202 | - | ||
1203 | static const struct intel_ring_buffer render_ring = { |
- | |
1204 | .name = "render ring", |
- | |
1205 | .id = RING_RENDER, |
- | |
1206 | .mmio_base = RENDER_RING_BASE, |
- | |
1207 | .size = 32 * PAGE_SIZE, |
- | |
1208 | .init = init_render_ring, |
- | |
1209 | .write_tail = ring_write_tail, |
- | |
1210 | .flush = render_ring_flush, |
- | |
1211 | .add_request = render_ring_add_request, |
- | |
1212 | .get_seqno = ring_get_seqno, |
- | |
1213 | .irq_get = render_ring_get_irq, |
- | |
1214 | .irq_put = render_ring_put_irq, |
- | |
1215 | .dispatch_execbuffer = render_ring_dispatch_execbuffer, |
- | |
1216 | // .cleanup = render_ring_cleanup, |
- | |
1217 | .sync_to = render_ring_sync_to, |
- | |
1218 | .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID, |
- | |
1219 | MI_SEMAPHORE_SYNC_RV, |
- | |
1220 | MI_SEMAPHORE_SYNC_RB}, |
- | |
1221 | .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC}, |
- | |
1222 | }; |
- | |
1223 | - | ||
1224 | /* ring buffer for bit-stream decoder */ |
- | |
1225 | - | ||
1226 | static const struct intel_ring_buffer bsd_ring = { |
- | |
1227 | .name = "bsd ring", |
- | |
1228 | .id = RING_BSD, |
- | |
1229 | .mmio_base = BSD_RING_BASE, |
- | |
1230 | .size = 32 * PAGE_SIZE, |
- | |
1231 | .init = init_ring_common, |
- | |
1232 | .write_tail = ring_write_tail, |
- | |
1233 | .flush = bsd_ring_flush, |
- | |
1234 | .add_request = ring_add_request, |
- | |
1235 | .get_seqno = ring_get_seqno, |
- | |
1236 | .irq_get = bsd_ring_get_irq, |
- | |
1237 | .irq_put = bsd_ring_put_irq, |
- | |
1238 | .dispatch_execbuffer = ring_dispatch_execbuffer, |
- | |
Line 1239... | Line 1341... | ||
1239 | }; |
1341 | } |
1240 | 1342 | ||
1241 | 1343 | ||
1242 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
1344 | static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, |
Line 1243... | Line 1345... | ||
1243 | u32 value) |
1345 | u32 value) |
- | 1346 | { |
|
- | 1347 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
|
- | 1348 | ||
- | 1349 | /* Every tail move must follow the sequence below */ |
|
1244 | { |
1350 | |
1245 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1351 | /* Disable notification that the ring is IDLE. The GT |
- | 1352 | * will then assume that it is busy and bring it out of rc6. |
|
1246 | 1353 | */ |
|
1247 | /* Every tail move must follow the sequence below */ |
1354 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
Line -... | Line 1355... | ||
- | 1355 | _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); |
|
1248 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1356 | |
1249 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK | |
1357 | /* Clear the context id. Here be magic! */ |
1250 | GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE); |
1358 | I915_WRITE64(GEN6_BSD_RNCID, 0x0); |
1251 | I915_WRITE(GEN6_BSD_RNCID, 0x0); |
1359 | |
Line -... | Line 1360... | ||
- | 1360 | /* Wait for the ring not to be idle, i.e. for it to wake up. */ |
|
1252 | 1361 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
|
- | 1362 | GEN6_BSD_SLEEP_INDICATOR) == 0, |
|
- | 1363 | 50)) |
|
- | 1364 | DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); |
|
- | 1365 | ||
- | 1366 | /* Now that the ring is fully powered up, update the tail */ |
|
1253 | if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & |
1367 | I915_WRITE_TAIL(ring, value); |
1254 | GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, |
- | |
1255 | 50)) |
1368 | POSTING_READ(RING_TAIL(ring->mmio_base)); |
1256 | DRM_ERROR("timed out waiting for IDLE Indicator\n"); |
1369 | |
Line 1257... | Line 1370... | ||
1257 | 1370 | /* Let the ring send IDLE messages to the GT again, |
|
1258 | I915_WRITE_TAIL(ring, value); |
1371 | * and so let it sleep to conserve power when idle. |
1259 | I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, |
1372 | */ |
Line 1298... | Line 1411... | ||
1298 | intel_ring_advance(ring); |
1411 | intel_ring_advance(ring); |
Line 1299... | Line 1412... | ||
1299 | 1412 | ||
1300 | return 0; |
1413 | return 0; |
Line 1301... | Line -... | ||
1301 | } |
- | |
1302 | - | ||
1303 | static bool |
- | |
1304 | gen6_render_ring_get_irq(struct intel_ring_buffer *ring) |
- | |
1305 | { |
- | |
1306 | return gen6_ring_get_irq(ring, |
- | |
1307 | GT_USER_INTERRUPT, |
- | |
1308 | GEN6_RENDER_USER_INTERRUPT); |
- | |
1309 | } |
- | |
1310 | - | ||
1311 | static void |
- | |
1312 | gen6_render_ring_put_irq(struct intel_ring_buffer *ring) |
- | |
1313 | { |
- | |
1314 | return gen6_ring_put_irq(ring, |
- | |
1315 | GT_USER_INTERRUPT, |
- | |
1316 | GEN6_RENDER_USER_INTERRUPT); |
- | |
1317 | } |
- | |
1318 | - | ||
1319 | static bool |
- | |
1320 | gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) |
- | |
1321 | { |
- | |
1322 | return gen6_ring_get_irq(ring, |
- | |
1323 | GT_GEN6_BSD_USER_INTERRUPT, |
- | |
1324 | GEN6_BSD_USER_INTERRUPT); |
- | |
1325 | } |
- | |
1326 | - | ||
1327 | static void |
- | |
1328 | gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) |
- | |
1329 | { |
- | |
1330 | return gen6_ring_put_irq(ring, |
- | |
1331 | GT_GEN6_BSD_USER_INTERRUPT, |
- | |
1332 | GEN6_BSD_USER_INTERRUPT); |
- | |
1333 | } |
- | |
1334 | - | ||
1335 | /* ring buffer for Video Codec for Gen6+ */ |
- | |
1336 | static const struct intel_ring_buffer gen6_bsd_ring = { |
- | |
1337 | .name = "gen6 bsd ring", |
- | |
1338 | .id = RING_BSD, |
- | |
1339 | .mmio_base = GEN6_BSD_RING_BASE, |
- | |
1340 | .size = 32 * PAGE_SIZE, |
- | |
1341 | .init = init_ring_common, |
- | |
1342 | .write_tail = gen6_bsd_ring_write_tail, |
- | |
1343 | .flush = gen6_ring_flush, |
- | |
1344 | .add_request = gen6_add_request, |
- | |
1345 | .get_seqno = gen6_ring_get_seqno, |
- | |
1346 | .irq_get = gen6_bsd_ring_get_irq, |
- | |
1347 | .irq_put = gen6_bsd_ring_put_irq, |
- | |
1348 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
- | |
1349 | .sync_to = gen6_bsd_ring_sync_to, |
- | |
1350 | .semaphore_register = {MI_SEMAPHORE_SYNC_VR, |
- | |
1351 | MI_SEMAPHORE_SYNC_INVALID, |
- | |
1352 | MI_SEMAPHORE_SYNC_VB}, |
- | |
1353 | .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC}, |
- | |
1354 | }; |
1414 | } |
Line 1355... | Line -... | ||
1355 | - | ||
1356 | /* Blitter support (SandyBridge+) */ |
- | |
1357 | - | ||
1358 | static bool |
- | |
1359 | blt_ring_get_irq(struct intel_ring_buffer *ring) |
- | |
1360 | { |
- | |
1361 | return gen6_ring_get_irq(ring, |
- | |
1362 | GT_BLT_USER_INTERRUPT, |
- | |
1363 | GEN6_BLITTER_USER_INTERRUPT); |
- | |
1364 | } |
- | |
1365 | - | ||
1366 | static void |
- | |
1367 | blt_ring_put_irq(struct intel_ring_buffer *ring) |
- | |
1368 | { |
- | |
1369 | gen6_ring_put_irq(ring, |
- | |
1370 | GT_BLT_USER_INTERRUPT, |
- | |
1371 | GEN6_BLITTER_USER_INTERRUPT); |
- | |
1372 | } |
- | |
1373 | - | ||
1374 | - | ||
1375 | /* Workaround for some stepping of SNB, |
- | |
1376 | * each time when BLT engine ring tail moved, |
- | |
1377 | * the first command in the ring to be parsed |
- | |
1378 | * should be MI_BATCH_BUFFER_START |
- | |
1379 | */ |
- | |
1380 | #define NEED_BLT_WORKAROUND(dev) \ |
- | |
1381 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) |
- | |
1382 | - | ||
1383 | static inline struct drm_i915_gem_object * |
- | |
1384 | to_blt_workaround(struct intel_ring_buffer *ring) |
- | |
1385 | { |
- | |
1386 | return ring->private; |
- | |
1387 | } |
- | |
1388 | - | ||
1389 | static int blt_ring_init(struct intel_ring_buffer *ring) |
- | |
1390 | { |
- | |
1391 | if (NEED_BLT_WORKAROUND(ring->dev)) { |
- | |
1392 | struct drm_i915_gem_object *obj; |
- | |
1393 | u32 *ptr; |
- | |
1394 | int ret; |
- | |
1395 | - | ||
1396 | obj = i915_gem_alloc_object(ring->dev, 4096); |
- | |
1397 | if (obj == NULL) |
- | |
1398 | return -ENOMEM; |
- | |
1399 | - | ||
1400 | ret = i915_gem_object_pin(obj, 4096, true); |
- | |
1401 | if (ret) { |
- | |
1402 | drm_gem_object_unreference(&obj->base); |
- | |
1403 | return ret; |
- | |
1404 | } |
- | |
1405 | - | ||
1406 | ptr = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW); |
- | |
1407 | obj->mapped = ptr; |
- | |
1408 | - | ||
1409 | *ptr++ = MI_BATCH_BUFFER_END; |
- | |
1410 | *ptr++ = MI_NOOP; |
- | |
1411 | - | ||
1412 | ret = i915_gem_object_set_to_gtt_domain(obj, false); |
- | |
1413 | if (ret) { |
- | |
1414 | i915_gem_object_unpin(obj); |
- | |
1415 | drm_gem_object_unreference(&obj->base); |
- | |
1416 | FreeKernelSpace(ptr); |
- | |
1417 | obj->mapped = NULL; |
- | |
1418 | return ret; |
- | |
1419 | } |
- | |
1420 | FreeKernelSpace(ptr); |
- | |
1421 | obj->mapped = NULL; |
- | |
1422 | - | ||
1423 | ring->private = obj; |
- | |
1424 | } |
- | |
1425 | - | ||
1426 | return init_ring_common(ring); |
- | |
1427 | } |
- | |
1428 | - | ||
1429 | static int blt_ring_begin(struct intel_ring_buffer *ring, |
- | |
1430 | int num_dwords) |
- | |
1431 | { |
- | |
1432 | if (ring->private) { |
- | |
1433 | int ret = intel_ring_begin(ring, num_dwords+2); |
- | |
1434 | if (ret) |
- | |
1435 | return ret; |
- | |
1436 | - | ||
1437 | intel_ring_emit(ring, MI_BATCH_BUFFER_START); |
- | |
1438 | intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); |
- | |
1439 | - | ||
1440 | return 0; |
- | |
1441 | } else |
- | |
1442 | return intel_ring_begin(ring, 4); |
1415 | |
1443 | } |
1416 | /* Blitter support (SandyBridge+) */ |
1444 | 1417 | ||
1445 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1418 | static int blt_ring_flush(struct intel_ring_buffer *ring, |
1446 | u32 invalidate, u32 flush) |
1419 | u32 invalidate, u32 flush) |
Line 1447... | Line 1420... | ||
1447 | { |
1420 | { |
1448 | uint32_t cmd; |
1421 | uint32_t cmd; |
1449 | int ret; |
1422 | int ret; |
Line 1450... | Line 1423... | ||
1450 | 1423 | ||
1451 | ret = blt_ring_begin(ring, 4); |
1424 | ret = intel_ring_begin(ring, 4); |
Line 1461... | Line 1434... | ||
1461 | intel_ring_emit(ring, MI_NOOP); |
1434 | intel_ring_emit(ring, MI_NOOP); |
1462 | intel_ring_advance(ring); |
1435 | intel_ring_advance(ring); |
1463 | return 0; |
1436 | return 0; |
1464 | } |
1437 | } |
Line 1465... | Line -... | ||
1465 | - | ||
1466 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) |
- | |
1467 | { |
- | |
1468 | if (!ring->private) |
- | |
1469 | return; |
- | |
1470 | - | ||
1471 | i915_gem_object_unpin(ring->private); |
- | |
1472 | drm_gem_object_unreference(ring->private); |
- | |
1473 | ring->private = NULL; |
- | |
1474 | } |
- | |
1475 | - | ||
1476 | static const struct intel_ring_buffer gen6_blt_ring = { |
- | |
1477 | .name = "blt ring", |
- | |
1478 | .id = RING_BLT, |
- | |
1479 | .mmio_base = BLT_RING_BASE, |
- | |
1480 | .size = 32 * PAGE_SIZE, |
- | |
1481 | .init = blt_ring_init, |
- | |
1482 | .write_tail = ring_write_tail, |
- | |
1483 | .flush = blt_ring_flush, |
- | |
1484 | .add_request = gen6_add_request, |
- | |
1485 | .get_seqno = gen6_ring_get_seqno, |
- | |
1486 | .irq_get = blt_ring_get_irq, |
- | |
1487 | .irq_put = blt_ring_put_irq, |
- | |
1488 | .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, |
- | |
1489 | // .cleanup = blt_ring_cleanup, |
- | |
1490 | .sync_to = gen6_blt_ring_sync_to, |
- | |
1491 | .semaphore_register = {MI_SEMAPHORE_SYNC_BR, |
- | |
1492 | MI_SEMAPHORE_SYNC_BV, |
- | |
1493 | MI_SEMAPHORE_SYNC_INVALID}, |
- | |
1494 | .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC}, |
- | |
1495 | }; |
- | |
1496 | 1438 | ||
1497 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1439 | int intel_init_render_ring_buffer(struct drm_device *dev) |
1498 | { |
1440 | { |
1499 | drm_i915_private_t *dev_priv = dev->dev_private; |
1441 | drm_i915_private_t *dev_priv = dev->dev_private; |
Line 1500... | Line 1442... | ||
1500 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
1442 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
- | 1443 | ||
- | 1444 | ring->name = "render ring"; |
|
- | 1445 | ring->id = RCS; |
|
1501 | 1446 | ring->mmio_base = RENDER_RING_BASE; |
|
1502 | *ring = render_ring; |
1447 | |
- | 1448 | if (INTEL_INFO(dev)->gen >= 6) { |
|
- | 1449 | ring->add_request = gen6_add_request; |
|
1503 | if (INTEL_INFO(dev)->gen >= 6) { |
1450 | ring->flush = gen7_render_ring_flush; |
1504 | ring->add_request = gen6_add_request; |
1451 | if (INTEL_INFO(dev)->gen == 6) |
1505 | ring->flush = gen6_render_ring_flush; |
1452 | ring->flush = gen6_render_ring_flush; |
- | 1453 | ring->irq_get = gen6_ring_get_irq; |
|
1506 | ring->irq_get = gen6_render_ring_get_irq; |
1454 | ring->irq_put = gen6_ring_put_irq; |
- | 1455 | ring->irq_enable_mask = GT_USER_INTERRUPT; |
|
- | 1456 | ring->get_seqno = gen6_ring_get_seqno; |
|
- | 1457 | ring->sync_to = gen6_ring_sync; |
|
- | 1458 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; |
|
- | 1459 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; |
|
- | 1460 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB; |
|
1507 | ring->irq_put = gen6_render_ring_put_irq; |
1461 | ring->signal_mbox[0] = GEN6_VRSYNC; |
1508 | ring->get_seqno = gen6_ring_get_seqno; |
1462 | ring->signal_mbox[1] = GEN6_BRSYNC; |
- | 1463 | } else if (IS_GEN5(dev)) { |
|
1509 | } else if (IS_GEN5(dev)) { |
1464 | ring->add_request = pc_render_add_request; |
- | 1465 | ring->flush = gen4_render_ring_flush; |
|
- | 1466 | ring->get_seqno = pc_render_get_seqno; |
|
- | 1467 | ring->irq_get = gen5_ring_get_irq; |
|
- | 1468 | ring->irq_put = gen5_ring_put_irq; |
|
- | 1469 | ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; |
|
- | 1470 | } else { |
|
- | 1471 | ring->add_request = i9xx_add_request; |
|
- | 1472 | if (INTEL_INFO(dev)->gen < 4) |
|
- | 1473 | ring->flush = gen2_render_ring_flush; |
|
- | 1474 | else |
|
- | 1475 | ring->flush = gen4_render_ring_flush; |
|
- | 1476 | ring->get_seqno = ring_get_seqno; |
|
- | 1477 | if (IS_GEN2(dev)) { |
|
- | 1478 | ring->irq_get = i8xx_ring_get_irq; |
|
- | 1479 | ring->irq_put = i8xx_ring_put_irq; |
|
- | 1480 | } else { |
|
1510 | ring->add_request = pc_render_add_request; |
1481 | ring->irq_get = i9xx_ring_get_irq; |
- | 1482 | ring->irq_put = i9xx_ring_put_irq; |
|
- | 1483 | } |
|
- | 1484 | ring->irq_enable_mask = I915_USER_INTERRUPT; |
|
- | 1485 | } |
|
- | 1486 | ring->write_tail = ring_write_tail; |
|
- | 1487 | if (INTEL_INFO(dev)->gen >= 6) |
|
- | 1488 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
|
- | 1489 | else if (INTEL_INFO(dev)->gen >= 4) |
|
- | 1490 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
|
- | 1491 | else if (IS_I830(dev) || IS_845G(dev)) |
|
- | 1492 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
|
- | 1493 | else |
|
- | 1494 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
|
- | 1495 | ring->init = init_render_ring; |
|
Line 1511... | Line 1496... | ||
1511 | ring->get_seqno = pc_render_get_seqno; |
1496 | ring->cleanup = render_ring_cleanup; |
1512 | } |
1497 | |
1513 | 1498 | ||
1514 | if (!I915_NEED_GFX_HWS(dev)) { |
1499 | if (!I915_NEED_GFX_HWS(dev)) { |
Line 1523... | Line 1508... | ||
1523 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1508 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
1524 | { |
1509 | { |
1525 | drm_i915_private_t *dev_priv = dev->dev_private; |
1510 | drm_i915_private_t *dev_priv = dev->dev_private; |
1526 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
1511 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
Line -... | Line 1512... | ||
- | 1512 | ||
- | 1513 | ring->name = "bsd ring"; |
|
- | 1514 | ring->id = VCS; |
|
- | 1515 | ||
1527 | 1516 | ring->write_tail = ring_write_tail; |
|
- | 1517 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
|
- | 1518 | ring->mmio_base = GEN6_BSD_RING_BASE; |
|
- | 1519 | /* gen6 bsd needs a special wa for tail updates */ |
|
- | 1520 | if (IS_GEN6(dev)) |
|
- | 1521 | ring->write_tail = gen6_bsd_ring_write_tail; |
|
- | 1522 | ring->flush = gen6_ring_flush; |
|
- | 1523 | ring->add_request = gen6_add_request; |
|
- | 1524 | ring->get_seqno = gen6_ring_get_seqno; |
|
- | 1525 | ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; |
|
- | 1526 | ring->irq_get = gen6_ring_get_irq; |
|
- | 1527 | ring->irq_put = gen6_ring_put_irq; |
|
1528 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1528 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
- | 1529 | ring->sync_to = gen6_ring_sync; |
|
- | 1530 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR; |
|
- | 1531 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID; |
|
- | 1532 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB; |
|
- | 1533 | ring->signal_mbox[0] = GEN6_RVSYNC; |
|
1529 | *ring = gen6_bsd_ring; |
1534 | ring->signal_mbox[1] = GEN6_BVSYNC; |
- | 1535 | } else { |
|
1530 | else |
1536 | ring->mmio_base = BSD_RING_BASE; |
- | 1537 | ring->flush = bsd_ring_flush; |
|
- | 1538 | ring->add_request = i9xx_add_request; |
|
- | 1539 | ring->get_seqno = ring_get_seqno; |
|
- | 1540 | if (IS_GEN5(dev)) { |
|
- | 1541 | ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
|
- | 1542 | ring->irq_get = gen5_ring_get_irq; |
|
- | 1543 | ring->irq_put = gen5_ring_put_irq; |
|
- | 1544 | } else { |
|
- | 1545 | ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; |
|
- | 1546 | ring->irq_get = i9xx_ring_get_irq; |
|
- | 1547 | ring->irq_put = i9xx_ring_put_irq; |
|
- | 1548 | } |
|
- | 1549 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
|
- | 1550 | } |
|
- | 1551 | ring->init = init_ring_common; |
|
Line 1531... | Line 1552... | ||
1531 | *ring = bsd_ring; |
1552 | |
1532 | 1553 | ||
Line 1533... | Line 1554... | ||
1533 | return intel_init_ring_buffer(dev, ring); |
1554 | return intel_init_ring_buffer(dev, ring); |
1534 | } |
1555 | } |
1535 | 1556 | ||
1536 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
1557 | int intel_init_blt_ring_buffer(struct drm_device *dev) |
Line 1537... | Line 1558... | ||
1537 | { |
1558 | { |
- | 1559 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
- | 1560 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
|
- | 1561 | ||
- | 1562 | ring->name = "blitter ring"; |
|
- | 1563 | ring->id = BCS; |
|
- | 1564 | ||
- | 1565 | ring->mmio_base = BLT_RING_BASE; |
|
- | 1566 | ring->write_tail = ring_write_tail; |
|
- | 1567 | ring->flush = blt_ring_flush; |
|
- | 1568 | ring->add_request = gen6_add_request; |
|
- | 1569 | ring->get_seqno = gen6_ring_get_seqno; |
|
- | 1570 | ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; |
|
- | 1571 | ring->irq_get = gen6_ring_get_irq; |
|
- | 1572 | ring->irq_put = gen6_ring_put_irq; |
|
- | 1573 | ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |
|
- | 1574 | ring->sync_to = gen6_ring_sync; |
|
- | 1575 | ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR; |
|
- | 1576 | ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV; |
|
Line 1538... | Line 1577... | ||
1538 | drm_i915_private_t *dev_priv = dev->dev_private; |
1577 | ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID; |
1539 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
1578 | ring->signal_mbox[0] = GEN6_RBSYNC; |
- | 1579 | ring->signal_mbox[1] = GEN6_VBSYNC; |
|
- | 1580 | ring->init = init_ring_common; |
|
- | 1581 | ||
- | 1582 | return intel_init_ring_buffer(dev, ring); |
|
- | 1583 | } |
|
- | 1584 | ||
- | 1585 | int |
|
- | 1586 | intel_ring_flush_all_caches(struct intel_ring_buffer *ring) |
|
- | 1587 | { |
|
- | 1588 | int ret; |
|
- | 1589 | ||
- | 1590 | if (!ring->gpu_caches_dirty) |
|
- | 1591 | return 0; |
|
- | 1592 | ||
- | 1593 | ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); |
|
- | 1594 | if (ret) |
|
- | 1595 | return ret; |
|
- | 1596 | ||
- | 1597 | trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); |
|
- | 1598 | ||
- | 1599 | ring->gpu_caches_dirty = false; |
|
- | 1600 | return 0; |
|
- | 1601 | } |
|
- | 1602 | ||
- | 1603 | int |
|
- | 1604 | intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) |
|
- | 1605 | { |
|
- | 1606 | uint32_t flush_domains; |
|
- | 1607 | int ret; |
|
- | 1608 | ||
- | 1609 | flush_domains = 0; |
|
- | 1610 | if (ring->gpu_caches_dirty) |
|
- | 1611 | flush_domains = I915_GEM_GPU_DOMAINS; |
|
- | 1612 | ||
- | 1613 | ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); |
|
- | 1614 | if (ret) |
|
- | 1615 | return ret; |
|
- | 1616 |