Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5128 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4560 Rev 5060
Line 31... Line 31...
31
#include "i915_drv.h"
31
#include "i915_drv.h"
32
#include 
32
#include 
33
#include "i915_trace.h"
33
#include "i915_trace.h"
34
#include "intel_drv.h"
34
#include "intel_drv.h"
Line -... Line 35...
-
 
35
 
-
 
36
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
-
 
37
 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
-
 
38
 * to give some inclination as to some of the magic values used in the various
-
 
39
 * workarounds!
-
 
40
 */
-
 
41
#define CACHELINE_BYTES 64
35
 
42
 
36
static inline int ring_space(struct intel_ring_buffer *ring)
43
static inline int __ring_space(int head, int tail, int size)
37
{
44
{
38
	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
45
	int space = head - (tail + I915_RING_FREE_SPACE);
39
	if (space < 0)
46
	if (space < 0)
40
		space += ring->size;
47
		space += size;
41
	return space;
48
	return space;
Line 42... Line 49...
42
}
49
}
-
 
50
 
-
 
51
static inline int ring_space(struct intel_ringbuffer *ringbuf)
-
 
52
{
-
 
53
	return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
-
 
54
}
43
 
55
 
44
void __intel_ring_advance(struct intel_ring_buffer *ring)
56
static bool intel_ring_stopped(struct intel_engine_cs *ring)
-
 
57
{
-
 
58
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
Line -... Line 59...
-
 
59
	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
-
 
60
}
-
 
61
 
45
{
62
void __intel_ring_advance(struct intel_engine_cs *ring)
46
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
63
{
47
 
64
	struct intel_ringbuffer *ringbuf = ring->buffer;
48
	ring->tail &= ring->size - 1;
65
	ringbuf->tail &= ringbuf->size - 1;
49
	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
66
	if (intel_ring_stopped(ring))
Line 50... Line 67...
50
		return;
67
		return;
51
	ring->write_tail(ring, ring->tail);
68
	ring->write_tail(ring, ringbuf->tail);
52
}
69
}
53
 
70
 
54
static int
71
static int
55
gen2_render_ring_flush(struct intel_ring_buffer *ring,
72
gen2_render_ring_flush(struct intel_engine_cs *ring,
56
		       u32	invalidate_domains,
73
		       u32	invalidate_domains,
Line 76... Line 93...
76
 
93
 
77
	return 0;
94
	return 0;
Line 78... Line 95...
78
}
95
}
79
 
96
 
80
static int
97
static int
81
gen4_render_ring_flush(struct intel_ring_buffer *ring,
98
gen4_render_ring_flush(struct intel_engine_cs *ring,
82
		  u32	invalidate_domains,
99
		  u32	invalidate_domains,
83
		  u32	flush_domains)
100
		  u32	flush_domains)
84
{
101
{
Line 171... Line 188...
171
 * Post-sync nonzero is what triggered this second workaround, so we
188
 * Post-sync nonzero is what triggered this second workaround, so we
172
 * can't use that one either.  Notify enable is IRQs, which aren't
189
 * can't use that one either.  Notify enable is IRQs, which aren't
173
 * really our business.  That leaves only stall at scoreboard.
190
 * really our business.  That leaves only stall at scoreboard.
174
 */
191
 */
175
static int
192
static int
176
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
193
intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
177
{
194
{
178
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
195
	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
179
	int ret;
196
	int ret;
Line 180... Line 197...
180
 
197
 
181
 
198
 
Line 206... Line 223...
206
 
223
 
207
	return 0;
224
	return 0;
Line 208... Line 225...
208
}
225
}
209
 
226
 
210
static int
227
static int
211
gen6_render_ring_flush(struct intel_ring_buffer *ring,
228
gen6_render_ring_flush(struct intel_engine_cs *ring,
212
                         u32 invalidate_domains, u32 flush_domains)
229
                         u32 invalidate_domains, u32 flush_domains)
213
{
230
{
214
	u32 flags = 0;
231
	u32 flags = 0;
Line 215... Line 232...
215
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
232
	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
216
	int ret;
233
	int ret;
217
 
234
 
Line 258... Line 275...
258
 
275
 
259
	return 0;
276
	return 0;
Line 260... Line 277...
260
}
277
}
261
 
278
 
262
static int
279
static int
263
gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
280
gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
Line 264... Line 281...
264
{
281
{
265
	int ret;
282
	int ret;
Line 276... Line 293...
276
	intel_ring_advance(ring);
293
	intel_ring_advance(ring);
Line 277... Line 294...
277
 
294
 
278
	return 0;
295
	return 0;
Line 279... Line 296...
279
}
296
}
280
 
297
 
281
static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
298
static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
Line 282... Line 299...
282
{
299
{
283
	int ret;
300
	int ret;
Line 300... Line 317...
300
	ring->fbc_dirty = false;
317
	ring->fbc_dirty = false;
301
	return 0;
318
	return 0;
302
}
319
}
Line 303... Line 320...
303
 
320
 
304
static int
321
static int
305
gen7_render_ring_flush(struct intel_ring_buffer *ring,
322
gen7_render_ring_flush(struct intel_engine_cs *ring,
306
		       u32 invalidate_domains, u32 flush_domains)
323
		       u32 invalidate_domains, u32 flush_domains)
307
{
324
{
308
	u32 flags = 0;
325
	u32 flags = 0;
309
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
326
	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Line 310... Line 327...
310
	int ret;
327
	int ret;
311
 
328
 
312
	/*
329
	/*
Line 361... Line 378...
361
 
378
 
362
	return 0;
379
	return 0;
Line 363... Line 380...
363
}
380
}
-
 
381
 
-
 
382
static int
-
 
383
gen8_emit_pipe_control(struct intel_engine_cs *ring,
-
 
384
		       u32 flags, u32 scratch_addr)
-
 
385
{
-
 
386
	int ret;
-
 
387
 
-
 
388
	ret = intel_ring_begin(ring, 6);
-
 
389
	if (ret)
-
 
390
		return ret;
-
 
391
 
-
 
392
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-
 
393
	intel_ring_emit(ring, flags);
-
 
394
	intel_ring_emit(ring, scratch_addr);
-
 
395
	intel_ring_emit(ring, 0);
-
 
396
	intel_ring_emit(ring, 0);
-
 
397
	intel_ring_emit(ring, 0);
-
 
398
	intel_ring_advance(ring);
-
 
399
 
-
 
400
	return 0;
-
 
401
}
364
 
402
 
365
static int
403
static int
366
gen8_render_ring_flush(struct intel_ring_buffer *ring,
404
gen8_render_ring_flush(struct intel_engine_cs *ring,
367
		       u32 invalidate_domains, u32 flush_domains)
405
		       u32 invalidate_domains, u32 flush_domains)
368
{
406
{
369
	u32 flags = 0;
407
	u32 flags = 0;
Line 370... Line 408...
370
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
408
	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Line 371... Line 409...
371
	int ret;
409
	int ret;
Line 383... Line 421...
383
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
421
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
384
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
422
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
385
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
423
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
386
		flags |= PIPE_CONTROL_QW_WRITE;
424
		flags |= PIPE_CONTROL_QW_WRITE;
387
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
425
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
388
	}
-
 
Line -... Line 426...
-
 
426
 
389
 
427
		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-
 
428
		ret = gen8_emit_pipe_control(ring,
-
 
429
					     PIPE_CONTROL_CS_STALL |
-
 
430
					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
390
	ret = intel_ring_begin(ring, 6);
431
					     0);
391
	if (ret)
432
	if (ret)
-
 
433
		return ret;
Line 392... Line -...
392
		return ret;
-
 
393
 
-
 
394
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
434
	}
395
	intel_ring_emit(ring, flags);
-
 
396
	intel_ring_emit(ring, scratch_addr);
-
 
397
	intel_ring_emit(ring, 0);
-
 
398
	intel_ring_emit(ring, 0);
-
 
399
	intel_ring_emit(ring, 0);
-
 
400
	intel_ring_advance(ring);
-
 
401
 
-
 
402
	return 0;
435
 
Line 403... Line 436...
403
 
436
	return gen8_emit_pipe_control(ring, flags, scratch_addr);
404
}
437
}
405
 
438
 
406
static void ring_write_tail(struct intel_ring_buffer *ring,
439
static void ring_write_tail(struct intel_engine_cs *ring,
407
			    u32 value)
440
			    u32 value)
408
{
441
{
Line 409... Line 442...
409
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
442
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
410
	I915_WRITE_TAIL(ring, value);
443
	I915_WRITE_TAIL(ring, value);
411
}
444
}
412
 
445
 
413
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
-
 
Line -... Line 446...
-
 
446
u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
-
 
447
{
-
 
448
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
449
	u64 acthd;
-
 
450
 
-
 
451
	if (INTEL_INFO(ring->dev)->gen >= 8)
414
{
452
		acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
-
 
453
					 RING_ACTHD_UDW(ring->mmio_base));
-
 
454
	else if (INTEL_INFO(ring->dev)->gen >= 4)
415
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
455
		acthd = I915_READ(RING_ACTHD(ring->mmio_base));
Line 416... Line 456...
416
	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
456
	else
417
			RING_ACTHD(ring->mmio_base) : ACTHD;
457
		acthd = I915_READ(ACTHD);
418
 
458
 
419
	return I915_READ(acthd_reg);
459
	return acthd;
Line 420... Line 460...
420
}
460
}
421
 
461
 
422
static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
462
static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
423
{
463
{
424
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
464
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
Line 425... Line 465...
425
	u32 addr;
465
	u32 addr;
426
 
466
 
427
	addr = dev_priv->status_page_dmah->busaddr;
-
 
428
	if (INTEL_INFO(ring->dev)->gen >= 4)
467
	addr = dev_priv->status_page_dmah->busaddr;
429
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
-
 
430
	I915_WRITE(HWS_PGA, addr);
-
 
431
}
-
 
Line -... Line 468...
-
 
468
	if (INTEL_INFO(ring->dev)->gen >= 4)
432
 
469
		addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
433
static int init_ring_common(struct intel_ring_buffer *ring)
-
 
434
{
470
	I915_WRITE(HWS_PGA, addr);
435
	struct drm_device *dev = ring->dev;
471
}
-
 
472
 
436
	drm_i915_private_t *dev_priv = dev->dev_private;
473
static bool stop_ring(struct intel_engine_cs *ring)
437
	struct drm_i915_gem_object *obj = ring->obj;
-
 
-
 
474
{
Line 438... Line -...
438
	int ret = 0;
-
 
439
	u32 head;
475
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
440
 
476
 
441
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
477
	if (!IS_GEN2(ring->dev)) {
Line -... Line 478...
-
 
478
		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
-
 
479
		if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
-
 
480
			DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
-
 
481
			return false;
-
 
482
		}
442
 
483
	}
-
 
484
 
-
 
485
	I915_WRITE_CTL(ring, 0);
-
 
486
	I915_WRITE_HEAD(ring, 0);
-
 
487
	ring->write_tail(ring, 0);
-
 
488
 
-
 
489
	if (!IS_GEN2(ring->dev)) {
-
 
490
		(void)I915_READ_CTL(ring);
-
 
491
		I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
-
 
492
	}
-
 
493
 
-
 
494
	return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
Line -... Line 495...
-
 
495
}
443
	if (I915_NEED_GFX_HWS(dev))
496
 
444
		intel_ring_setup_status_page(ring);
-
 
445
	else
497
static int init_ring_common(struct intel_engine_cs *ring)
446
		ring_setup_phys_status_page(ring);
498
{
447
 
499
	struct drm_device *dev = ring->dev;
448
	/* Stop the ring if it's running. */
500
	struct drm_i915_private *dev_priv = dev->dev_private;
449
	I915_WRITE_CTL(ring, 0);
501
	struct intel_ringbuffer *ringbuf = ring->buffer;
450
	I915_WRITE_HEAD(ring, 0);
502
	struct drm_i915_gem_object *obj = ringbuf->obj;
451
	ring->write_tail(ring, 0);
503
	int ret = 0;
Line 452... Line 504...
452
 
504
 
453
	head = I915_READ_HEAD(ring) & HEAD_ADDR;
-
 
454
 
-
 
455
	/* G45 ring initialization fails to reset head to zero */
505
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
456
	if (head != 0) {
506
 
457
		DRM_DEBUG_KMS("%s head not reset to zero "
507
	if (!stop_ring(ring)) {
458
			      "ctl %08x head %08x tail %08x start %08x\n",
508
		/* G45 ring initialization often fails to reset head to zero */
459
			      ring->name,
509
		DRM_DEBUG_KMS("%s head not reset to zero "
460
			      I915_READ_CTL(ring),
510
			      "ctl %08x head %08x tail %08x start %08x\n",
461
			      I915_READ_HEAD(ring),
511
			      ring->name,
-
 
512
			      I915_READ_CTL(ring),
-
 
513
			      I915_READ_HEAD(ring),
462
			      I915_READ_TAIL(ring),
514
			      I915_READ_TAIL(ring),
463
			      I915_READ_START(ring));
515
			      I915_READ_START(ring));
Line -... Line 516...
-
 
516
 
-
 
517
		if (!stop_ring(ring)) {
-
 
518
			DRM_ERROR("failed to set %s head to zero "
-
 
519
				  "ctl %08x head %08x tail %08x start %08x\n",
-
 
520
				  ring->name,
-
 
521
				  I915_READ_CTL(ring),
-
 
522
				  I915_READ_HEAD(ring),
-
 
523
				  I915_READ_TAIL(ring),
464
 
524
				  I915_READ_START(ring));
465
		I915_WRITE_HEAD(ring, 0);
525
			ret = -EIO;
466
 
526
			goto out;
467
		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
527
		}
468
			DRM_ERROR("failed to set %s head to zero "
528
	}
469
				  "ctl %08x head %08x tail %08x start %08x\n",
529
 
470
				  ring->name,
530
	if (I915_NEED_GFX_HWS(dev))
471
				  I915_READ_CTL(ring),
531
		intel_ring_setup_status_page(ring);
Line 472... Line 532...
472
				  I915_READ_HEAD(ring),
532
	else
473
				  I915_READ_TAIL(ring),
533
		ring_setup_phys_status_page(ring);
474
				  I915_READ_START(ring));
534
 
475
		}
535
	/* Enforce ordering by reading HEAD register back */
476
	}
536
	I915_READ_HEAD(ring);
477
 
537
 
478
	/* Initialize the ring. This must happen _after_ we've cleared the ring
538
	/* Initialize the ring. This must happen _after_ we've cleared the ring
479
	 * registers with the above sequence (the readback of the HEAD registers
-
 
480
	 * also enforces ordering), otherwise the hw might lose the new ring
539
	 * registers with the above sequence (the readback of the HEAD registers
481
	 * register values. */
540
	 * also enforces ordering), otherwise the hw might lose the new ring
482
	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
541
	 * register values. */
483
	I915_WRITE_CTL(ring,
542
	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
484
			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
543
	I915_WRITE_CTL(ring,
485
			| RING_VALID);
544
			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
Line -... Line 545...
-
 
545
			| RING_VALID);
486
 
546
 
487
	/* If the head is still not zero, the ring is dead */
547
	/* If the head is still not zero, the ring is dead */
488
	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
548
	if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
489
		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
549
		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
Line 490... Line 550...
490
		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
550
		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
Line 491... Line 551...
491
		DRM_ERROR("%s initialization failed "
551
		DRM_ERROR("%s initialization failed "
492
				"ctl %08x head %08x tail %08x start %08x\n",
552
			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
Line 493... Line 553...
493
				ring->name,
553
				ring->name,
494
				I915_READ_CTL(ring),
554
			  I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
Line 495... Line 555...
495
				I915_READ_HEAD(ring),
555
			  I915_READ_HEAD(ring), I915_READ_TAIL(ring),
496
				I915_READ_TAIL(ring),
556
			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
497
				I915_READ_START(ring));
557
		ret = -EIO;
498
		ret = -EIO;
558
		goto out;
Line 499... Line 559...
499
		goto out;
559
	}
500
	}
560
 
Line 525... Line 585...
525
		DRM_ERROR("Failed to allocate seqno page\n");
585
		DRM_ERROR("Failed to allocate seqno page\n");
526
		ret = -ENOMEM;
586
		ret = -ENOMEM;
527
		goto err;
587
		goto err;
528
	}
588
	}
Line 529... Line 589...
529
 
589
 
-
 
590
	ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
-
 
591
	if (ret)
Line 530... Line 592...
530
	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
592
		goto err_unref;
531
 
593
 
532
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
594
	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
Line 533... Line 595...
533
	if (ret)
595
	if (ret)
534
		goto err_unref;
596
		goto err_unref;
Line 543... Line 605...
543
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
605
	DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
544
			 ring->name, ring->scratch.gtt_offset);
606
			 ring->name, ring->scratch.gtt_offset);
545
	return 0;
607
	return 0;
Line 546... Line 608...
546
 
608
 
547
err_unpin:
609
err_unpin:
548
	i915_gem_object_unpin(ring->scratch.obj);
610
	i915_gem_object_ggtt_unpin(ring->scratch.obj);
549
err_unref:
611
err_unref:
550
	drm_gem_object_unreference(&ring->scratch.obj->base);
612
	drm_gem_object_unreference(&ring->scratch.obj->base);
551
err:
613
err:
552
	return ret;
614
	return ret;
Line 553... Line 615...
553
}
615
}
554
 
616
 
555
static int init_render_ring(struct intel_ring_buffer *ring)
617
static int init_render_ring(struct intel_engine_cs *ring)
556
{
618
{
557
	struct drm_device *dev = ring->dev;
619
	struct drm_device *dev = ring->dev;
-
 
620
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
621
	int ret = init_ring_common(ring);
Line -... Line 622...
-
 
622
	if (ret)
558
	struct drm_i915_private *dev_priv = dev->dev_private;
623
		return ret;
559
	int ret = init_ring_common(ring);
624
 
Line 560... Line 625...
560
 
625
	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
561
	if (INTEL_INFO(dev)->gen > 3)
626
	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
562
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
627
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
563
 
628
 
564
	/* We need to disable the AsyncFlip performance optimisations in order
629
	/* We need to disable the AsyncFlip performance optimisations in order
565
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
630
	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
566
	 * programmed to '1' on all products.
631
	 * programmed to '1' on all products.
567
	 *
632
	 *
Line 568... Line 633...
568
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
633
	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
-
 
634
	 */
569
	 */
635
	if (INTEL_INFO(dev)->gen >= 6)
570
	if (INTEL_INFO(dev)->gen >= 6)
636
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
571
		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
637
 
Line -... Line 638...
-
 
638
	/* Required for the hardware to program scanline values for waiting */
572
 
639
	/* WaEnableFlushTlbInvalidationMode:snb */
573
	/* Required for the hardware to program scanline values for waiting */
640
	if (INTEL_INFO(dev)->gen == 6)
574
	if (INTEL_INFO(dev)->gen == 6)
641
		I915_WRITE(GFX_MODE,
575
		I915_WRITE(GFX_MODE,
642
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
Line 576... Line 643...
576
			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
643
 
577
 
644
	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
578
		if (IS_GEN7(dev))
645
		if (IS_GEN7(dev))
Line 592... Line 659...
592
		 *  policy. [...] This bit must be reset.  LRA replacement
659
		 *  policy. [...] This bit must be reset.  LRA replacement
593
		 *  policy is not supported."
660
		 *  policy is not supported."
594
		 */
661
		 */
595
		I915_WRITE(CACHE_MODE_0,
662
		I915_WRITE(CACHE_MODE_0,
596
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
663
			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
597
 
-
 
598
		/* This is not explicitly set for GEN6, so read the register.
-
 
599
		 * see intel_ring_mi_set_context() for why we care.
-
 
600
		 * TODO: consider explicitly setting the bit for GEN5
-
 
601
		 */
-
 
602
		ring->itlb_before_ctx_switch =
-
 
603
			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
-
 
604
	}
664
	}
Line 605... Line 665...
605
 
665
 
606
	if (INTEL_INFO(dev)->gen >= 6)
666
	if (INTEL_INFO(dev)->gen >= 6)
Line 610... Line 670...
610
		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
670
		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
Line 611... Line 671...
611
 
671
 
612
	return ret;
672
	return ret;
Line 613... Line 673...
613
}
673
}
614
 
674
 
615
static void render_ring_cleanup(struct intel_ring_buffer *ring)
675
static void render_ring_cleanup(struct intel_engine_cs *ring)
Line 616... Line 676...
616
{
676
{
617
	struct drm_device *dev = ring->dev;
677
	struct drm_device *dev = ring->dev;
Line 618... Line 678...
618
 
678
 
619
	if (ring->scratch.obj == NULL)
679
	if (ring->scratch.obj == NULL)
620
		return;
680
		return;
621
 
681
 
Line 622... Line 682...
622
	if (INTEL_INFO(dev)->gen >= 5) {
682
	if (INTEL_INFO(dev)->gen >= 5) {
623
//       kunmap(sg_page(ring->scratch.obj->pages->sgl));
683
//		kunmap(sg_page(ring->scratch.obj->pages->sgl));
624
		i915_gem_object_unpin(ring->scratch.obj);
684
		i915_gem_object_ggtt_unpin(ring->scratch.obj);
Line 625... Line -...
625
	}
-
 
626
 
685
	}
627
	drm_gem_object_unreference(&ring->scratch.obj->base);
686
 
628
	ring->scratch.obj = NULL;
687
	drm_gem_object_unreference(&ring->scratch.obj->base);
-
 
688
	ring->scratch.obj = NULL;
-
 
689
}
-
 
690
 
-
 
691
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
-
 
692
			   unsigned int num_dwords)
-
 
693
{
-
 
694
#define MBOX_UPDATE_DWORDS 8
-
 
695
	struct drm_device *dev = signaller->dev;
-
 
696
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
697
	struct intel_engine_cs *waiter;
-
 
698
	int i, ret, num_rings;
-
 
699
 
-
 
700
	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
-
 
701
	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-
 
702
#undef MBOX_UPDATE_DWORDS
629
}
703
 
-
 
704
	ret = intel_ring_begin(signaller, num_dwords);
-
 
705
	if (ret)
-
 
706
		return ret;
-
 
707
 
-
 
708
	for_each_ring(waiter, dev_priv, i) {
-
 
709
		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
-
 
710
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
-
 
711
			continue;
-
 
712
 
630
 
713
		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-
 
714
		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
-
 
715
					   PIPE_CONTROL_QW_WRITE |
-
 
716
					   PIPE_CONTROL_FLUSH_ENABLE);
-
 
717
		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
-
 
718
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-
 
719
		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
-
 
720
		intel_ring_emit(signaller, 0);
-
 
721
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-
 
722
					   MI_SEMAPHORE_TARGET(waiter->id));
631
static void
723
		intel_ring_emit(signaller, 0);
-
 
724
	}
-
 
725
 
-
 
726
	return 0;
-
 
727
}
-
 
728
 
-
 
729
static int gen8_xcs_signal(struct intel_engine_cs *signaller,
-
 
730
			   unsigned int num_dwords)
-
 
731
{
-
 
732
#define MBOX_UPDATE_DWORDS 6
-
 
733
	struct drm_device *dev = signaller->dev;
-
 
734
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
735
	struct intel_engine_cs *waiter;
-
 
736
	int i, ret, num_rings;
-
 
737
 
-
 
738
	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
-
 
739
	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-
 
740
#undef MBOX_UPDATE_DWORDS
632
update_mboxes(struct intel_ring_buffer *ring,
741
 
-
 
742
	ret = intel_ring_begin(signaller, num_dwords);
-
 
743
	if (ret)
-
 
744
		return ret;
-
 
745
 
-
 
746
	for_each_ring(waiter, dev_priv, i) {
-
 
747
		u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
-
 
748
		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
-
 
749
			continue;
-
 
750
 
-
 
751
		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
-
 
752
					   MI_FLUSH_DW_OP_STOREDW);
-
 
753
		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
633
	    u32 mmio_offset)
754
					   MI_FLUSH_DW_USE_GTT);
-
 
755
		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-
 
756
		intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
-
 
757
		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-
 
758
					   MI_SEMAPHORE_TARGET(waiter->id));
-
 
759
		intel_ring_emit(signaller, 0);
-
 
760
	}
-
 
761
 
-
 
762
	return 0;
-
 
763
}
-
 
764
 
-
 
765
static int gen6_signal(struct intel_engine_cs *signaller,
-
 
766
		       unsigned int num_dwords)
634
{
767
{
-
 
768
	struct drm_device *dev = signaller->dev;
-
 
769
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
770
	struct intel_engine_cs *useless;
-
 
771
	int i, ret, num_rings;
-
 
772
 
-
 
773
#define MBOX_UPDATE_DWORDS 3
-
 
774
	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
-
 
775
	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-
 
776
#undef MBOX_UPDATE_DWORDS
-
 
777
 
-
 
778
	ret = intel_ring_begin(signaller, num_dwords);
635
/* NB: In order to be able to do semaphore MBOX updates for varying number
779
	if (ret)
636
 * of rings, it's easiest if we round up each individual update to a
780
		return ret;
637
 * multiple of 2 (since ring updates must always be a multiple of 2)
781
 
-
 
782
	for_each_ring(useless, dev_priv, i) {
-
 
783
		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
-
 
784
		if (mbox_reg != GEN6_NOSYNC) {
-
 
785
			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-
 
786
			intel_ring_emit(signaller, mbox_reg);
638
 * even though the actual update only requires 3 dwords.
787
			intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
-
 
788
		}
-
 
789
	}
639
 */
790
 
Line 640... Line 791...
640
#define MBOX_UPDATE_DWORDS 4
791
	/* If num_dwords was rounded, make sure the tail pointer is correct */
641
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
792
	if (num_rings % 2 == 0)
642
	intel_ring_emit(ring, mmio_offset);
793
		intel_ring_emit(signaller, MI_NOOP);
Line 652... Line 803...
652
 *
803
 *
653
 * Update the mailbox registers in the *other* rings with the current seqno.
804
 * Update the mailbox registers in the *other* rings with the current seqno.
654
 * This acts like a signal in the canonical semaphore.
805
 * This acts like a signal in the canonical semaphore.
655
 */
806
 */
656
static int
807
static int
657
gen6_add_request(struct intel_ring_buffer *ring)
808
gen6_add_request(struct intel_engine_cs *ring)
658
{
809
{
659
	struct drm_device *dev = ring->dev;
-
 
660
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
661
	struct intel_ring_buffer *useless;
-
 
662
	int i, ret, num_dwords = 4;
810
	int ret;
Line 663... Line 811...
663
 
811
 
664
	if (i915_semaphore_is_enabled(dev))
812
	if (ring->semaphore.signal)
-
 
813
	ret = ring->semaphore.signal(ring, 4);
665
		num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
814
	else
Line 666... Line -...
666
#undef MBOX_UPDATE_DWORDS
-
 
667
 
815
		ret = intel_ring_begin(ring, 4);
668
	ret = intel_ring_begin(ring, num_dwords);
816
 
Line 669... Line -...
669
	if (ret)
-
 
670
		return ret;
-
 
671
 
-
 
672
	if (i915_semaphore_is_enabled(dev)) {
-
 
673
	for_each_ring(useless, dev_priv, i) {
-
 
674
		u32 mbox_reg = ring->signal_mbox[i];
-
 
675
		if (mbox_reg != GEN6_NOSYNC)
-
 
676
			update_mboxes(ring, mbox_reg);
-
 
677
	}
817
	if (ret)
678
	}
818
		return ret;
679
 
819
 
680
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
820
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
681
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
821
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Line 698... Line 838...
698
 *
838
 *
699
 * @waiter - ring that is waiting
839
 * @waiter - ring that is waiting
700
 * @signaller - ring which has, or will signal
840
 * @signaller - ring which has, or will signal
701
 * @seqno - seqno which the waiter will block on
841
 * @seqno - seqno which the waiter will block on
702
 */
842
 */
-
 
843
 
703
static int
844
static int
704
gen6_ring_sync(struct intel_ring_buffer *waiter,
845
gen8_ring_sync(struct intel_engine_cs *waiter,
705
		struct intel_ring_buffer *signaller,
846
	       struct intel_engine_cs *signaller,
706
		u32 seqno)
847
	       u32 seqno)
707
{
848
{
-
 
849
	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
708
	int ret;
850
	int ret;
-
 
851
 
-
 
852
	ret = intel_ring_begin(waiter, 4);
-
 
853
	if (ret)
-
 
854
		return ret;
-
 
855
 
-
 
856
	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
-
 
857
				MI_SEMAPHORE_GLOBAL_GTT |
-
 
858
				MI_SEMAPHORE_POLL |
-
 
859
				MI_SEMAPHORE_SAD_GTE_SDD);
-
 
860
	intel_ring_emit(waiter, seqno);
-
 
861
	intel_ring_emit(waiter,
-
 
862
			lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-
 
863
	intel_ring_emit(waiter,
-
 
864
			upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-
 
865
	intel_ring_advance(waiter);
-
 
866
	return 0;
-
 
867
}
-
 
868
 
-
 
869
static int
-
 
870
gen6_ring_sync(struct intel_engine_cs *waiter,
-
 
871
	       struct intel_engine_cs *signaller,
-
 
872
		u32 seqno)
-
 
873
{
709
	u32 dw1 = MI_SEMAPHORE_MBOX |
874
	u32 dw1 = MI_SEMAPHORE_MBOX |
710
		  MI_SEMAPHORE_COMPARE |
875
		  MI_SEMAPHORE_COMPARE |
711
		  MI_SEMAPHORE_REGISTER;
876
		  MI_SEMAPHORE_REGISTER;
-
 
877
	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
-
 
878
	int ret;
Line 712... Line 879...
712
 
879
 
713
	/* Throughout all of the GEM code, seqno passed implies our current
880
	/* Throughout all of the GEM code, seqno passed implies our current
714
	 * seqno is >= the last seqno executed. However for hardware the
881
	 * seqno is >= the last seqno executed. However for hardware the
715
	 * comparison is strictly greater than.
882
	 * comparison is strictly greater than.
716
	 */
883
	 */
Line 717... Line -...
717
	seqno -= 1;
-
 
718
 
884
	seqno -= 1;
Line 719... Line 885...
719
	WARN_ON(signaller->semaphore_register[waiter->id] ==
885
 
720
		MI_SEMAPHORE_SYNC_INVALID);
886
	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
721
 
887
 
Line 722... Line 888...
722
	ret = intel_ring_begin(waiter, 4);
888
	ret = intel_ring_begin(waiter, 4);
723
	if (ret)
889
	if (ret)
724
		return ret;
890
		return ret;
725
 
-
 
726
	/* If seqno wrap happened, omit the wait with no-ops */
-
 
727
	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
891
 
728
	intel_ring_emit(waiter,
892
	/* If seqno wrap happened, omit the wait with no-ops */
729
				dw1 |
893
	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
730
				signaller->semaphore_register[waiter->id]);
894
		intel_ring_emit(waiter, dw1 | wait_mbox);
731
	intel_ring_emit(waiter, seqno);
895
	intel_ring_emit(waiter, seqno);
Line 750... Line 914...
750
	intel_ring_emit(ring__, 0);							\
914
	intel_ring_emit(ring__, 0);							\
751
	intel_ring_emit(ring__, 0);							\
915
	intel_ring_emit(ring__, 0);							\
752
} while (0)
916
} while (0)
Line 753... Line 917...
753
 
917
 
754
static int
918
static int
755
pc_render_add_request(struct intel_ring_buffer *ring)
919
pc_render_add_request(struct intel_engine_cs *ring)
756
{
920
{
757
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
921
	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
Line 758... Line 922...
758
	int ret;
922
	int ret;
759
 
923
 
760
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
924
	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
Line 774... Line 938...
774
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
938
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
775
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
939
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
776
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
940
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
777
	intel_ring_emit(ring, 0);
941
	intel_ring_emit(ring, 0);
778
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
942
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
779
	scratch_addr += 128; /* write to separate cachelines */
943
	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
780
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
944
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
781
	scratch_addr += 128;
945
	scratch_addr += 2 * CACHELINE_BYTES;
782
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
946
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
783
	scratch_addr += 128;
947
	scratch_addr += 2 * CACHELINE_BYTES;
784
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
948
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
785
	scratch_addr += 128;
949
	scratch_addr += 2 * CACHELINE_BYTES;
786
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
950
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
787
	scratch_addr += 128;
951
	scratch_addr += 2 * CACHELINE_BYTES;
788
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
952
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
Line 789... Line 953...
789
 
953
 
790
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
954
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
791
			PIPE_CONTROL_WRITE_FLUSH |
955
			PIPE_CONTROL_WRITE_FLUSH |
Line 798... Line 962...
798
 
962
 
799
	return 0;
963
	return 0;
Line 800... Line 964...
800
}
964
}
801
 
965
 
802
static u32
966
static u32
803
gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
967
gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
804
{
968
{
805
	/* Workaround to force correct ordering between irq and seqno writes on
969
	/* Workaround to force correct ordering between irq and seqno writes on
806
	 * ivb (and maybe also on snb) by reading from a CS register (like
970
	 * ivb (and maybe also on snb) by reading from a CS register (like
-
 
971
	 * ACTHD) before reading the status page. */
807
	 * ACTHD) before reading the status page. */
972
	if (!lazy_coherency) {
-
 
973
		struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
974
		POSTING_READ(RING_ACTHD(ring->mmio_base));
808
	if (!lazy_coherency)
975
	}
809
		intel_ring_get_active_head(ring);
976
 
Line 810... Line 977...
810
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
977
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
811
}
978
}
812
 
979
 
813
static u32
980
static u32
814
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
981
ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
Line 815... Line 982...
815
{
982
{
816
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
983
	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
817
}
984
}
818
 
985
 
819
static void
986
static void
Line 820... Line 987...
820
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
987
ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
821
{
988
{
822
	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
989
	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
823
}
990
}
824
 
991
 
Line 825... Line 992...
825
static u32
992
static u32
826
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
993
pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
827
{
994
{
828
	return ring->scratch.cpu_page[0];
995
	return ring->scratch.cpu_page[0];
829
}
996
}
Line 830... Line 997...
830
 
997
 
831
static void
998
static void
832
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
999
pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
833
{
1000
{
834
	ring->scratch.cpu_page[0] = seqno;
1001
	ring->scratch.cpu_page[0] = seqno;
835
}
1002
}
Line 836... Line 1003...
836
 
1003
 
837
static bool
1004
static bool
Line 838... Line 1005...
838
gen5_ring_get_irq(struct intel_ring_buffer *ring)
1005
gen5_ring_get_irq(struct intel_engine_cs *ring)
839
{
1006
{
840
	struct drm_device *dev = ring->dev;
1007
	struct drm_device *dev = ring->dev;
841
	drm_i915_private_t *dev_priv = dev->dev_private;
1008
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 842... Line 1009...
842
	unsigned long flags;
1009
	unsigned long flags;
843
 
1010
 
Line 844... Line 1011...
844
	if (!dev->irq_enabled)
1011
	if (!dev->irq_enabled)
845
		return false;
1012
		return false;
846
 
1013
 
847
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1014
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
848
	if (ring->irq_refcount++ == 0)
1015
	if (ring->irq_refcount++ == 0)
849
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1016
		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
Line 850... Line 1017...
850
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1017
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
851
 
1018
 
852
	return true;
1019
	return true;
853
}
1020
}
854
 
1021
 
Line 855... Line 1022...
855
static void
1022
static void
856
gen5_ring_put_irq(struct intel_ring_buffer *ring)
1023
gen5_ring_put_irq(struct intel_engine_cs *ring)
857
{
1024
{
858
	struct drm_device *dev = ring->dev;
1025
	struct drm_device *dev = ring->dev;
859
	drm_i915_private_t *dev_priv = dev->dev_private;
1026
	struct drm_i915_private *dev_priv = dev->dev_private;
860
	unsigned long flags;
1027
	unsigned long flags;
Line 861... Line 1028...
861
 
1028
 
862
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1029
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
Line 885... Line 1052...
885
 
1052
 
886
	return true;
1053
	return true;
Line 887... Line 1054...
887
}
1054
}
888
 
1055
 
889
static void
1056
static void
890
i9xx_ring_put_irq(struct intel_ring_buffer *ring)
1057
i9xx_ring_put_irq(struct intel_engine_cs *ring)
891
{
1058
{
892
	struct drm_device *dev = ring->dev;
1059
	struct drm_device *dev = ring->dev;
Line 893... Line 1060...
893
	drm_i915_private_t *dev_priv = dev->dev_private;
1060
	struct drm_i915_private *dev_priv = dev->dev_private;
894
	unsigned long flags;
1061
	unsigned long flags;
895
 
1062
 
Line 901... Line 1068...
901
	}
1068
	}
902
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1069
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
903
}
1070
}
Line 904... Line 1071...
904
 
1071
 
905
static bool
1072
static bool
906
i8xx_ring_get_irq(struct intel_ring_buffer *ring)
1073
i8xx_ring_get_irq(struct intel_engine_cs *ring)
907
{
1074
{
908
	struct drm_device *dev = ring->dev;
1075
	struct drm_device *dev = ring->dev;
909
	drm_i915_private_t *dev_priv = dev->dev_private;
1076
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 910... Line 1077...
910
	unsigned long flags;
1077
	unsigned long flags;
911
 
1078
 
Line 922... Line 1089...
922
 
1089
 
923
	return true;
1090
	return true;
Line 924... Line 1091...
924
}
1091
}
925
 
1092
 
926
static void
1093
static void
927
i8xx_ring_put_irq(struct intel_ring_buffer *ring)
1094
i8xx_ring_put_irq(struct intel_engine_cs *ring)
928
{
1095
{
929
	struct drm_device *dev = ring->dev;
1096
	struct drm_device *dev = ring->dev;
Line 930... Line 1097...
930
	drm_i915_private_t *dev_priv = dev->dev_private;
1097
	struct drm_i915_private *dev_priv = dev->dev_private;
931
	unsigned long flags;
1098
	unsigned long flags;
932
 
1099
 
Line 937... Line 1104...
937
		POSTING_READ16(IMR);
1104
		POSTING_READ16(IMR);
938
	}
1105
	}
939
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1106
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
940
}
1107
}
Line 941... Line 1108...
941
 
1108
 
942
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
1109
void intel_ring_setup_status_page(struct intel_engine_cs *ring)
943
{
1110
{
944
	struct drm_device *dev = ring->dev;
1111
	struct drm_device *dev = ring->dev;
945
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1112
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
Line 946... Line 1113...
946
	u32 mmio = 0;
1113
	u32 mmio = 0;
947
 
1114
 
948
	/* The ring status page addresses are no longer next to the rest of
1115
	/* The ring status page addresses are no longer next to the rest of
Line 954... Line 1121...
954
			mmio = RENDER_HWS_PGA_GEN7;
1121
			mmio = RENDER_HWS_PGA_GEN7;
955
			break;
1122
			break;
956
		case BCS:
1123
		case BCS:
957
			mmio = BLT_HWS_PGA_GEN7;
1124
			mmio = BLT_HWS_PGA_GEN7;
958
			break;
1125
			break;
-
 
1126
		/*
-
 
1127
		 * VCS2 actually doesn't exist on Gen7. Only shut up
-
 
1128
		 * gcc switch check warning
-
 
1129
		 */
-
 
1130
		case VCS2:
959
		case VCS:
1131
		case VCS:
960
			mmio = BSD_HWS_PGA_GEN7;
1132
			mmio = BSD_HWS_PGA_GEN7;
961
			break;
1133
			break;
962
		case VECS:
1134
		case VECS:
963
			mmio = VEBOX_HWS_PGA_GEN7;
1135
			mmio = VEBOX_HWS_PGA_GEN7;
Line 971... Line 1143...
971
	}
1143
	}
Line 972... Line 1144...
972
 
1144
 
973
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
1145
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
Line -... Line 1146...
-
 
1146
	POSTING_READ(mmio);
974
	POSTING_READ(mmio);
1147
 
-
 
1148
	/*
-
 
1149
	 * Flush the TLB for this page
-
 
1150
	 *
-
 
1151
	 * FIXME: These two bits have disappeared on gen8, so a question
-
 
1152
	 * arises: do we still need this and if so how should we go about
975
 
1153
	 * invalidating the TLB?
976
	/* Flush the TLB for this page */
1154
	 */
-
 
1155
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
-
 
1156
		u32 reg = RING_INSTPM(ring->mmio_base);
-
 
1157
 
-
 
1158
		/* ring should be idle before issuing a sync flush*/
977
	if (INTEL_INFO(dev)->gen >= 6) {
1159
		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
978
		u32 reg = RING_INSTPM(ring->mmio_base);
1160
 
979
		I915_WRITE(reg,
1161
		I915_WRITE(reg,
980
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
1162
			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
981
					      INSTPM_SYNC_FLUSH));
1163
					      INSTPM_SYNC_FLUSH));
Line 985... Line 1167...
985
				  ring->name);
1167
				  ring->name);
986
	}
1168
	}
987
}
1169
}
Line 988... Line 1170...
988
 
1170
 
989
static int
1171
static int
990
bsd_ring_flush(struct intel_ring_buffer *ring,
1172
bsd_ring_flush(struct intel_engine_cs *ring,
991
	       u32     invalidate_domains,
1173
	       u32     invalidate_domains,
992
	       u32     flush_domains)
1174
	       u32     flush_domains)
993
{
1175
{
Line 1002... Line 1184...
1002
	intel_ring_advance(ring);
1184
	intel_ring_advance(ring);
1003
	return 0;
1185
	return 0;
1004
}
1186
}
Line 1005... Line 1187...
1005
 
1187
 
1006
static int
1188
static int
1007
i9xx_add_request(struct intel_ring_buffer *ring)
1189
i9xx_add_request(struct intel_engine_cs *ring)
1008
{
1190
{
Line 1009... Line 1191...
1009
	int ret;
1191
	int ret;
1010
 
1192
 
Line 1020... Line 1202...
1020
 
1202
 
1021
	return 0;
1203
	return 0;
Line 1022... Line 1204...
1022
}
1204
}
1023
 
1205
 
1024
static bool
1206
static bool
1025
gen6_ring_get_irq(struct intel_ring_buffer *ring)
1207
gen6_ring_get_irq(struct intel_engine_cs *ring)
1026
{
1208
{
1027
	struct drm_device *dev = ring->dev;
1209
	struct drm_device *dev = ring->dev;
Line 1028... Line 1210...
1028
	drm_i915_private_t *dev_priv = dev->dev_private;
1210
	struct drm_i915_private *dev_priv = dev->dev_private;
1029
	unsigned long flags;
1211
	unsigned long flags;
Line 1037... Line 1219...
1037
			I915_WRITE_IMR(ring,
1219
			I915_WRITE_IMR(ring,
1038
				       ~(ring->irq_enable_mask |
1220
				       ~(ring->irq_enable_mask |
1039
					 GT_PARITY_ERROR(dev)));
1221
					 GT_PARITY_ERROR(dev)));
1040
		else
1222
		else
1041
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1223
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1042
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1224
		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1043
	}
1225
	}
1044
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1226
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 1045... Line 1227...
1045
 
1227
 
1046
    return true;
1228
    return true;
Line 1047... Line 1229...
1047
}
1229
}
1048
 
1230
 
1049
static void
1231
static void
1050
gen6_ring_put_irq(struct intel_ring_buffer *ring)
1232
gen6_ring_put_irq(struct intel_engine_cs *ring)
1051
{
1233
{
1052
	struct drm_device *dev = ring->dev;
1234
	struct drm_device *dev = ring->dev;
Line 1053... Line 1235...
1053
	drm_i915_private_t *dev_priv = dev->dev_private;
1235
	struct drm_i915_private *dev_priv = dev->dev_private;
1054
	unsigned long flags;
1236
	unsigned long flags;
1055
 
1237
 
1056
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1238
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1057
	if (--ring->irq_refcount == 0) {
1239
	if (--ring->irq_refcount == 0) {
1058
		if (HAS_L3_DPF(dev) && ring->id == RCS)
1240
		if (HAS_L3_DPF(dev) && ring->id == RCS)
1059
			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1241
			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1060
		else
1242
		else
1061
			I915_WRITE_IMR(ring, ~0);
1243
			I915_WRITE_IMR(ring, ~0);
1062
		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1244
		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
Line 1063... Line 1245...
1063
	}
1245
	}
1064
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1246
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1065
}
1247
}
1066
 
1248
 
1067
static bool
1249
static bool
1068
hsw_vebox_get_irq(struct intel_ring_buffer *ring)
1250
hsw_vebox_get_irq(struct intel_engine_cs *ring)
Line 1075... Line 1257...
1075
		return false;
1257
		return false;
Line 1076... Line 1258...
1076
 
1258
 
1077
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1259
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1078
	if (ring->irq_refcount++ == 0) {
1260
	if (ring->irq_refcount++ == 0) {
1079
		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1261
		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1080
		snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1262
		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
1081
	}
1263
	}
Line 1082... Line 1264...
1082
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1264
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1083
 
1265
 
Line 1084... Line 1266...
1084
	return true;
1266
	return true;
1085
}
1267
}
1086
 
1268
 
1087
static void
1269
static void
1088
hsw_vebox_put_irq(struct intel_ring_buffer *ring)
1270
hsw_vebox_put_irq(struct intel_engine_cs *ring)
1089
{
1271
{
Line 1095... Line 1277...
1095
		return;
1277
		return;
Line 1096... Line 1278...
1096
 
1278
 
1097
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1279
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1098
	if (--ring->irq_refcount == 0) {
1280
	if (--ring->irq_refcount == 0) {
1099
		I915_WRITE_IMR(ring, ~0);
1281
		I915_WRITE_IMR(ring, ~0);
1100
		snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1282
		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1101
	}
1283
	}
1102
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1284
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 1103... Line 1285...
1103
}
1285
}
1104
 
1286
 
1105
static bool
1287
static bool
1106
gen8_ring_get_irq(struct intel_ring_buffer *ring)
1288
gen8_ring_get_irq(struct intel_engine_cs *ring)
1107
{
1289
{
1108
	struct drm_device *dev = ring->dev;
1290
	struct drm_device *dev = ring->dev;
Line 1127... Line 1309...
1127
 
1309
 
1128
	return true;
1310
	return true;
Line 1129... Line 1311...
1129
}
1311
}
1130
 
1312
 
1131
static void
1313
static void
1132
gen8_ring_put_irq(struct intel_ring_buffer *ring)
1314
gen8_ring_put_irq(struct intel_engine_cs *ring)
1133
{
1315
{
1134
	struct drm_device *dev = ring->dev;
1316
	struct drm_device *dev = ring->dev;
Line 1147... Line 1329...
1147
	}
1329
	}
1148
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1330
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1149
}
1331
}
Line 1150... Line 1332...
1150
 
1332
 
1151
static int
1333
static int
1152
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1334
i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1153
			 u32 offset, u32 length,
1335
			 u64 offset, u32 length,
1154
			 unsigned flags)
1336
			 unsigned flags)
1155
{
1337
{
Line 1156... Line 1338...
1156
	int ret;
1338
	int ret;
Line 1170... Line 1352...
1170
}
1352
}
Line 1171... Line 1353...
1171
 
1353
 
1172
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1354
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1173
#define I830_BATCH_LIMIT (256*1024)
1355
#define I830_BATCH_LIMIT (256*1024)
1174
static int
1356
static int
1175
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1357
i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1176
				u32 offset, u32 len,
1358
				u64 offset, u32 len,
1177
				unsigned flags)
1359
				unsigned flags)
1178
{
1360
{
Line 1179... Line 1361...
1179
	int ret;
1361
	int ret;
Line 1221... Line 1403...
1221
 
1403
 
1222
	return 0;
1404
	return 0;
Line 1223... Line 1405...
1223
}
1405
}
1224
 
1406
 
1225
static int
1407
static int
1226
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1408
i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1227
			 u32 offset, u32 len,
1409
			 u64 offset, u32 len,
1228
			 unsigned flags)
1410
			 unsigned flags)
Line 1229... Line 1411...
1229
{
1411
{
Line 1238... Line 1420...
1238
	intel_ring_advance(ring);
1420
	intel_ring_advance(ring);
Line 1239... Line 1421...
1239
 
1421
 
1240
	return 0;
1422
	return 0;
Line 1241... Line 1423...
1241
}
1423
}
1242
 
1424
 
1243
static void cleanup_status_page(struct intel_ring_buffer *ring)
1425
static void cleanup_status_page(struct intel_engine_cs *ring)
Line 1244... Line 1426...
1244
{
1426
{
1245
	struct drm_i915_gem_object *obj;
1427
	struct drm_i915_gem_object *obj;
1246
 
1428
 
Line 1247... Line 1429...
1247
	obj = ring->status_page.obj;
1429
	obj = ring->status_page.obj;
1248
	if (obj == NULL)
1430
	if (obj == NULL)
1249
		return;
1431
		return;
1250
 
1432
 
1251
//   kunmap(sg_page(obj->pages->sgl));
1433
//	kunmap(sg_page(obj->pages->sgl));
Line 1252... Line 1434...
1252
    i915_gem_object_unpin(obj);
1434
	i915_gem_object_ggtt_unpin(obj);
1253
	drm_gem_object_unreference(&obj->base);
1435
	drm_gem_object_unreference(&obj->base);
1254
	ring->status_page.obj = NULL;
-
 
1255
}
1436
	ring->status_page.obj = NULL;
-
 
1437
}
-
 
1438
 
-
 
1439
static int init_status_page(struct intel_engine_cs *ring)
1256
 
1440
{
Line 1257... Line 1441...
1257
static int init_status_page(struct intel_ring_buffer *ring)
1441
	struct drm_i915_gem_object *obj;
1258
{
1442
 
1259
	struct drm_device *dev = ring->dev;
1443
	if ((obj = ring->status_page.obj) == NULL) {
1260
	struct drm_i915_gem_object *obj;
1444
		unsigned flags;
1261
	int ret;
-
 
1262
 
1445
	int ret;
Line 1263... Line 1446...
1263
	obj = i915_gem_alloc_object(dev, 4096);
1446
 
1264
	if (obj == NULL) {
-
 
1265
		DRM_ERROR("Failed to allocate status page\n");
-
 
1266
		ret = -ENOMEM;
1447
		obj = i915_gem_alloc_object(ring->dev, 4096);
1267
		goto err;
1448
	if (obj == NULL) {
-
 
1449
		DRM_ERROR("Failed to allocate status page\n");
-
 
1450
			return -ENOMEM;
-
 
1451
	}
-
 
1452
 
-
 
1453
	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-
 
1454
	if (ret)
-
 
1455
		goto err_unref;
-
 
1456
 
-
 
1457
		flags = 0;
-
 
1458
		if (!HAS_LLC(ring->dev))
-
 
1459
			/* On g33, we cannot place HWS above 256MiB, so
-
 
1460
			 * restrict its pinning to the low mappable arena.
-
 
1461
			 * Though this restriction is not documented for
-
 
1462
			 * gen4, gen5, or byt, they also behave similarly
-
 
1463
			 * and hang if the HWS is placed at the top of the
-
 
1464
			 * GTT. To generalise, it appears that all !llc
-
 
1465
			 * platforms have issues with us placing the HWS
-
 
1466
			 * above the mappable region (even though we never
-
 
1467
			 * actualy map it).
-
 
1468
			 */
-
 
1469
			flags |= PIN_MAPPABLE;
-
 
1470
		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
1268
	}
1471
		if (ret) {
Line 1269... Line 1472...
1269
 
1472
err_unref:
1270
	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1473
			drm_gem_object_unreference(&obj->base);
1271
 
-
 
1272
	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
-
 
1273
	if (ret != 0) {
-
 
1274
		goto err_unref;
-
 
1275
	}
-
 
1276
 
1474
			return ret;
Line 1277... Line 1475...
1277
	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1475
		}
1278
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
1476
 
Line 1279... Line 1477...
1279
	if (ring->status_page.page_addr == NULL) {
1477
		ring->status_page.obj = obj;
1280
		ret = -ENOMEM;
-
 
1281
		goto err_unpin;
-
 
1282
	}
-
 
1283
	ring->status_page.obj = obj;
-
 
1284
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-
 
1285
 
-
 
1286
	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
-
 
1287
			ring->name, ring->status_page.gfx_addr);
1478
	}
Line 1288... Line 1479...
1288
 
1479
 
1289
	return 0;
1480
	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
1290
 
1481
    ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
Line 1291... Line 1482...
1291
err_unpin:
1482
	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1292
	i915_gem_object_unpin(obj);
1483
 
Line 1311... Line 1502...
1311
    memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1502
    memset(ring->status_page.page_addr, 0, PAGE_SIZE);
Line 1312... Line 1503...
1312
 
1503
 
1313
    return 0;
1504
    return 0;
Line 1314... Line 1505...
1314
}
1505
}
1315
 
-
 
1316
static int intel_init_ring_buffer(struct drm_device *dev,
1506
 
1317
			   struct intel_ring_buffer *ring)
1507
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1318
{
-
 
1319
	struct drm_i915_gem_object *obj;
1508
{
Line 1320... Line 1509...
1320
	struct drm_i915_private *dev_priv = dev->dev_private;
1509
	if (!ringbuf->obj)
1321
	int ret;
1510
		return;
1322
 
1511
 
1323
	ring->dev = dev;
1512
	iounmap(ringbuf->virtual_start);
1324
	INIT_LIST_HEAD(&ring->active_list);
-
 
-
 
1513
	i915_gem_object_ggtt_unpin(ringbuf->obj);
Line -... Line 1514...
-
 
1514
	drm_gem_object_unreference(&ringbuf->obj->base);
1325
	INIT_LIST_HEAD(&ring->request_list);
1515
	ringbuf->obj = NULL;
-
 
1516
}
-
 
1517
 
-
 
1518
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-
 
1519
				      struct intel_ringbuffer *ringbuf)
Line 1326... Line -...
1326
	ring->size = 32 * PAGE_SIZE;
-
 
1327
	memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
-
 
1328
 
-
 
1329
	init_waitqueue_head(&ring->irq_queue);
-
 
1330
 
-
 
1331
	if (I915_NEED_GFX_HWS(dev)) {
-
 
1332
       ret = init_status_page(ring);
-
 
1333
       if (ret)
1520
{
1334
           return ret;
1521
	struct drm_i915_private *dev_priv = to_i915(dev);
1335
	} else {
-
 
Line 1336... Line 1522...
1336
		BUG_ON(ring->id != RCS);
1522
	struct drm_i915_gem_object *obj;
1337
		ret = init_phys_status_page(ring);
1523
	int ret;
1338
		if (ret)
1524
 
1339
			return ret;
1525
	if (ringbuf->obj)
1340
	}
1526
		return 0;
1341
 
1527
 
1342
	obj = NULL;
-
 
1343
	if (!HAS_LLC(dev))
1528
	obj = NULL;
1344
		obj = i915_gem_object_create_stolen(dev, ring->size);
-
 
1345
	if (obj == NULL)
-
 
Line -... Line 1529...
-
 
1529
	if (!HAS_LLC(dev))
1346
        obj = i915_gem_alloc_object(dev, ring->size);
1530
		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
Line 1347... Line 1531...
1347
	if (obj == NULL) {
1531
	if (obj == NULL)
1348
		DRM_ERROR("Failed to allocate ringbuffer\n");
1532
		obj = i915_gem_alloc_object(dev, ringbuf->size);
1349
		ret = -ENOMEM;
1533
	if (obj == NULL)
Line 1350... Line 1534...
1350
		goto err_hws;
1534
		return -ENOMEM;
1351
	}
1535
 
1352
 
1536
	/* mark ring buffers as read-only from GPU side by default */
Line 1353... Line 1537...
1353
	ring->obj = obj;
1537
	obj->gt_ro = 1;
1354
 
1538
 
1355
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
1539
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1356
	if (ret)
1540
	if (ret)
1357
		goto err_unref;
-
 
1358
 
1541
		goto err_unref;
1359
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
1542
 
1360
	if (ret)
1543
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
Line -... Line 1544...
-
 
1544
	if (ret)
-
 
1545
		goto err_unpin;
-
 
1546
 
-
 
1547
	ringbuf->virtual_start =
-
 
1548
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
-
 
1549
				ringbuf->size);
-
 
1550
	if (ringbuf->virtual_start == NULL) {
-
 
1551
		ret = -EINVAL;
-
 
1552
		goto err_unpin;
-
 
1553
	}
-
 
1554
 
-
 
1555
	ringbuf->obj = obj;
-
 
1556
	return 0;
-
 
1557
 
-
 
1558
err_unpin:
-
 
1559
	i915_gem_object_ggtt_unpin(obj);
-
 
1560
err_unref:
-
 
1561
	drm_gem_object_unreference(&obj->base);
-
 
1562
	return ret;
-
 
1563
}
-
 
1564
 
-
 
1565
static int intel_init_ring_buffer(struct drm_device *dev,
-
 
1566
				  struct intel_engine_cs *ring)
-
 
1567
{
-
 
1568
	struct intel_ringbuffer *ringbuf = ring->buffer;
-
 
1569
	int ret;
-
 
1570
 
-
 
1571
	if (ringbuf == NULL) {
-
 
1572
		ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-
 
1573
		if (!ringbuf)
-
 
1574
			return -ENOMEM;
-
 
1575
		ring->buffer = ringbuf;
1361
		goto err_unpin;
1576
	}
-
 
1577
 
-
 
1578
	ring->dev = dev;
-
 
1579
	INIT_LIST_HEAD(&ring->active_list);
-
 
1580
	INIT_LIST_HEAD(&ring->request_list);
-
 
1581
	ringbuf->size = 32 * PAGE_SIZE;
1362
 
1582
	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
-
 
1583
 
-
 
1584
	init_waitqueue_head(&ring->irq_queue);
-
 
1585
 
-
 
1586
	if (I915_NEED_GFX_HWS(dev)) {
-
 
1587
		ret = init_status_page(ring);
-
 
1588
		if (ret)
1363
	ring->virtual_start =
1589
			goto error;
-
 
1590
	} else {
Line 1364... Line 1591...
1364
		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1591
		BUG_ON(ring->id != RCS);
1365
			   ring->size);
1592
		ret = init_phys_status_page(ring);
1366
	if (ring->virtual_start == NULL) {
1593
	if (ret)
1367
		DRM_ERROR("Failed to map ringbuffer.\n");
1594
			goto error;
1368
		ret = -EINVAL;
1595
	}
1369
		goto err_unpin;
1596
 
1370
	}
1597
	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-
 
1598
	if (ret) {
-
 
1599
		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
-
 
1600
		goto error;
-
 
1601
	}
-
 
1602
 
-
 
1603
	/* Workaround an erratum on the i830 which causes a hang if
-
 
1604
	 * the TAIL pointer points to within the last 2 cachelines
-
 
1605
	 * of the buffer.
Line 1371... Line 1606...
1371
 
1606
	 */
Line 1372... Line 1607...
1372
	ret = ring->init(ring);
1607
	ringbuf->effective_size = ringbuf->size;
1373
	if (ret)
1608
	if (IS_I830(dev) || IS_845G(dev))
1374
		goto err_unmap;
-
 
1375
 
-
 
1376
	/* Workaround an erratum on the i830 which causes a hang if
-
 
1377
	 * the TAIL pointer points to within the last 2 cachelines
-
 
1378
	 * of the buffer.
1609
		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1379
	 */
-
 
1380
	ring->effective_size = ring->size;
-
 
1381
	if (IS_I830(ring->dev) || IS_845G(ring->dev))
1610
 
1382
		ring->effective_size -= 128;
1611
	ret = i915_cmd_parser_init_ring(ring);
Line 1383... Line 1612...
1383
 
1612
	if (ret)
1384
	return 0;
1613
		goto error;
1385
 
1614
 
1386
err_unmap:
1615
	ret = ring->init(ring);
Line 1387... Line 1616...
1387
	iounmap(ring->virtual_start);
1616
	if (ret)
1388
err_unpin:
1617
		goto error;
Line 1389... Line -...
1389
	i915_gem_object_unpin(obj);
-
 
1390
err_unref:
-
 
1391
	drm_gem_object_unreference(&obj->base);
1618
 
1392
	ring->obj = NULL;
-
 
1393
err_hws:
1619
	return 0;
1394
//   cleanup_status_page(ring);
-
 
1395
	return ret;
-
 
1396
}
-
 
1397
 
-
 
1398
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
-
 
Line 1399... Line -...
1399
{
-
 
1400
	struct drm_i915_private *dev_priv;
1620
 
1401
	int ret;
-
 
1402
 
1621
error:
1403
	if (ring->obj == NULL)
1622
	kfree(ringbuf);
Line 1404... Line 1623...
1404
		return;
1623
	ring->buffer = NULL;
1405
 
1624
	return ret;
Line 1406... Line 1625...
1406
	/* Disable the ring buffer. The ring must be idle at this point */
1625
}
1407
	dev_priv = ring->dev->dev_private;
-
 
Line 1408... Line 1626...
1408
	ret = intel_ring_idle(ring);
1626
 
1409
	if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
-
 
1410
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-
 
Line 1411... Line -...
1411
			  ring->name, ret);
-
 
1412
 
1627
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1413
	I915_WRITE_CTL(ring, 0);
-
 
1414
 
-
 
1415
	iounmap(ring->virtual_start);
1628
{
1416
 
1629
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
Line 1417... Line 1630...
1417
    i915_gem_object_unpin(ring->obj);
1630
	struct intel_ringbuffer *ringbuf = ring->buffer;
1418
	drm_gem_object_unreference(&ring->obj->base);
1631
 
-
 
1632
	if (!intel_ring_initialized(ring))
1419
	ring->obj = NULL;
1633
		return;
1420
	ring->preallocated_lazy_request = NULL;
1634
 
1421
	ring->outstanding_lazy_seqno = 0;
1635
	intel_stop_ring_buffer(ring);
Line -... Line 1636...
-
 
1636
	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
-
 
1637
 
1422
 
1638
	intel_destroy_ringbuffer_obj(ringbuf);
Line 1423... Line -...
1423
	if (ring->cleanup)
-
 
1424
		ring->cleanup(ring);
-
 
1425
 
-
 
1426
//   cleanup_status_page(ring);
1639
	ring->preallocated_lazy_request = NULL;
1427
}
1640
	ring->outstanding_lazy_seqno = 0;
1428
 
1641
 
1429
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1642
	if (ring->cleanup)
Line 1430... Line 1643...
1430
{
1643
		ring->cleanup(ring);
1431
	int ret;
-
 
1432
 
-
 
1433
	ret = i915_wait_seqno(ring, seqno);
-
 
1434
	if (!ret)
-
 
1435
		i915_gem_retire_requests_ring(ring);
-
 
1436
 
1644
 
1437
	return ret;
-
 
1438
}
-
 
1439
 
-
 
1440
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1645
//	cleanup_status_page(ring);
1441
{
1646
 
1442
	struct drm_i915_gem_request *request;
1647
	i915_cmd_parser_fini_ring(ring);
1443
	u32 seqno = 0;
-
 
1444
	int ret;
-
 
1445
 
-
 
1446
	i915_gem_retire_requests_ring(ring);
-
 
1447
 
-
 
1448
	if (ring->last_retired_head != -1) {
-
 
1449
		ring->head = ring->last_retired_head;
-
 
1450
		ring->last_retired_head = -1;
1648
 
Line 1451... Line 1649...
1451
		ring->space = ring_space(ring);
1649
	kfree(ringbuf);
1452
		if (ring->space >= n)
1650
	ring->buffer = NULL;
Line 1453... Line 1651...
1453
			return 0;
1651
}
1454
	}
1652
 
1455
 
1653
static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
Line 1456... Line 1654...
1456
	list_for_each_entry(request, &ring->request_list, list) {
1654
{
1457
		int space;
-
 
1458
 
-
 
1459
		if (request->tail == -1)
1655
	struct intel_ringbuffer *ringbuf = ring->buffer;
1460
			continue;
1656
	struct drm_i915_gem_request *request;
1461
 
-
 
1462
		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
-
 
1463
		if (space < 0)
-
 
Line -... Line 1657...
-
 
1657
	u32 seqno = 0;
1464
			space += ring->size;
1658
	int ret;
1465
		if (space >= n) {
1659
 
Line 1466... Line 1660...
1466
			seqno = request->seqno;
1660
	if (ringbuf->last_retired_head != -1) {
1467
			break;
1661
		ringbuf->head = ringbuf->last_retired_head;
1468
		}
1662
		ringbuf->last_retired_head = -1;
1469
 
1663
 
-
 
1664
		ringbuf->space = ring_space(ringbuf);
1470
		/* Consume this request in case we need more space than
1665
		if (ringbuf->space >= n)
1471
		 * is available and so need to prevent a race between
1666
			return 0;
Line 1472... Line 1667...
1472
		 * updating last_retired_head and direct reads of
1667
	}
1473
		 * I915_RING_HEAD. It also provides a nice sanity check.
1668
 
1474
		 */
1669
	list_for_each_entry(request, &ring->request_list, list) {
Line 1475... Line 1670...
1475
		request->tail = -1;
1670
		if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
1476
	}
1671
			seqno = request->seqno;
Line 1477... Line -...
1477
 
-
 
1478
	if (seqno == 0)
1672
			break;
1479
		return -ENOSPC;
1673
		}
1480
 
1674
	}
1481
	ret = intel_ring_wait_seqno(ring, seqno);
1675
 
1482
	if (ret)
1676
	if (seqno == 0)
1483
		return ret;
1677
		return -ENOSPC;
Line -... Line 1678...
-
 
1678
 
1484
 
1679
	ret = i915_wait_seqno(ring, seqno);
1485
	if (WARN_ON(ring->last_retired_head == -1))
1680
	if (ret)
1486
		return -ENOSPC;
1681
		return ret;
1487
 
1682
 
1488
	ring->head = ring->last_retired_head;
1683
	i915_gem_retire_requests_ring(ring);
1489
	ring->last_retired_head = -1;
1684
	ringbuf->head = ringbuf->last_retired_head;
1490
	ring->space = ring_space(ring);
1685
	ringbuf->last_retired_head = -1;
Line -... Line 1686...
-
 
1686
 
1491
	if (WARN_ON(ring->space < n))
1687
	ringbuf->space = ring_space(ringbuf);
Line 1492... Line 1688...
1492
		return -ENOSPC;
1688
	return 0;
1493
 
1689
}
1494
	return 0;
1690
 
1495
}
1691
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-
 
1692
{
1496
 
1693
	struct drm_device *dev = ring->dev;
-
 
1694
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1695
	struct intel_ringbuffer *ringbuf = ring->buffer;
-
 
1696
	unsigned long end;
-
 
1697
	int ret;
1497
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1698
 
1498
{
1699
	ret = intel_ring_wait_request(ring, n);
1499
	struct drm_device *dev = ring->dev;
1700
	if (ret != -ENOSPC)
Line 1500... Line 1701...
1500
	struct drm_i915_private *dev_priv = dev->dev_private;
1701
		return ret;
1501
	unsigned long end;
1702
 
1502
	int ret;
1703
	/* force the tail write in case we have been skipping them */
-
 
1704
	__intel_ring_advance(ring);
1503
 
1705
 
Line 1504... Line 1706...
1504
	ret = intel_ring_wait_request(ring, n);
1706
	/* With GEM the hangcheck timer should kick us out of the loop,
1505
	if (ret != -ENOSPC)
1707
	 * leaving it early runs the risk of corrupting GEM state (due
1506
		return ret;
1708
	 * to running on almost untested codepaths). But on resume
1507
 
1709
	 * timers don't work yet, so prevent a complete hang in that
1508
	/* force the tail write in case we have been skipping them */
1710
	 * case by choosing an insanely large timeout. */
Line 1509... Line 1711...
1509
	__intel_ring_advance(ring);
1711
	end = jiffies + 60 * HZ;
1510
 
1712
 
1511
	trace_i915_ring_wait_begin(ring);
1713
	trace_i915_ring_wait_begin(ring);
1512
	/* With GEM the hangcheck timer should kick us out of the loop,
1714
	do {
Line 1513... Line 1715...
1513
	 * leaving it early runs the risk of corrupting GEM state (due
1715
		ringbuf->head = I915_READ_HEAD(ring);
1514
	 * to running on almost untested codepaths). But on resume
1716
		ringbuf->space = ring_space(ringbuf);
Line 1515... Line 1717...
1515
	 * timers don't work yet, so prevent a complete hang in that
1717
		if (ringbuf->space >= n) {
1516
	 * case by choosing an insanely large timeout. */
1718
			ret = 0;
Line 1517... Line 1719...
1517
	end = GetTimerTicks() + 60 * HZ;
1719
			break;
1518
 
1720
		}
1519
	do {
1721
 
1520
		ring->head = I915_READ_HEAD(ring);
1722
 
Line 1521... Line 1723...
1521
		ring->space = ring_space(ring);
1723
		msleep(1);
Line 1579... Line 1781...
1579
 
1781
 
1580
	return i915_wait_seqno(ring, seqno);
1782
	return i915_wait_seqno(ring, seqno);
Line 1581... Line 1783...
1581
}
1783
}
1582
 
1784
 
1583
static int
1785
static int
1584
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1786
intel_ring_alloc_seqno(struct intel_engine_cs *ring)
1585
{
1787
{
Line 1586... Line 1788...
1586
	if (ring->outstanding_lazy_seqno)
1788
	if (ring->outstanding_lazy_seqno)
Line 1597... Line 1799...
1597
	}
1799
	}
Line 1598... Line 1800...
1598
 
1800
 
1599
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1801
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
Line 1600... Line 1802...
1600
}
1802
}
1601
 
1803
 
1602
static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1804
static int __intel_ring_prepare(struct intel_engine_cs *ring,
-
 
1805
			      int bytes)
1603
			      int bytes)
1806
{
Line 1604... Line 1807...
1604
{
1807
	struct intel_ringbuffer *ringbuf = ring->buffer;
1605
	int ret;
1808
	int ret;
1606
 
1809
 
1607
	if (unlikely(ring->tail + bytes > ring->effective_size)) {
1810
	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
1608
		ret = intel_wrap_ring_buffer(ring);
1811
		ret = intel_wrap_ring_buffer(ring);
Line 1609... Line 1812...
1609
		if (unlikely(ret))
1812
		if (unlikely(ret))
1610
			return ret;
1813
			return ret;
1611
	}
1814
	}
1612
 
1815
 
1613
	if (unlikely(ring->space < bytes)) {
1816
	if (unlikely(ringbuf->space < bytes)) {
Line 1614... Line 1817...
1614
		ret = ring_wait_for_space(ring, bytes);
1817
		ret = ring_wait_for_space(ring, bytes);
1615
		if (unlikely(ret))
1818
		if (unlikely(ret))
Line 1616... Line 1819...
1616
			return ret;
1819
			return ret;
1617
	}
1820
	}
1618
 
1821
 
1619
	return 0;
1822
	return 0;
1620
}
1823
}
Line 1621... Line 1824...
1621
 
1824
 
1622
int intel_ring_begin(struct intel_ring_buffer *ring,
1825
int intel_ring_begin(struct intel_engine_cs *ring,
1623
		     int num_dwords)
1826
		     int num_dwords)
Line 1637... Line 1840...
1637
	/* Preallocate the olr before touching the ring */
1840
	/* Preallocate the olr before touching the ring */
1638
	ret = intel_ring_alloc_seqno(ring);
1841
	ret = intel_ring_alloc_seqno(ring);
1639
	if (ret)
1842
	if (ret)
1640
		return ret;
1843
		return ret;
Line 1641... Line 1844...
1641
 
1844
 
1642
	ring->space -= num_dwords * sizeof(uint32_t);
1845
	ring->buffer->space -= num_dwords * sizeof(uint32_t);
1643
	return 0;
1846
	return 0;
Line -... Line 1847...
-
 
1847
}
1644
}
1848
 
1645
 
1849
/* Align the ring tail to a cacheline boundary */
-
 
1850
int intel_ring_cacheline_align(struct intel_engine_cs *ring)
-
 
1851
{
-
 
1852
	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-
 
1853
	int ret;
-
 
1854
 
-
 
1855
	if (num_dwords == 0)
-
 
1856
		return 0;
-
 
1857
 
-
 
1858
	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-
 
1859
	ret = intel_ring_begin(ring, num_dwords);
-
 
1860
	if (ret)
-
 
1861
		return ret;
-
 
1862
 
-
 
1863
	while (num_dwords--)
-
 
1864
		intel_ring_emit(ring, MI_NOOP);
-
 
1865
 
-
 
1866
	intel_ring_advance(ring);
-
 
1867
 
-
 
1868
	return 0;
-
 
1869
}
-
 
1870
 
-
 
1871
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
1646
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1872
{
Line 1647... Line 1873...
1647
{
1873
	struct drm_device *dev = ring->dev;
Line 1648... Line 1874...
1648
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1874
	struct drm_i915_private *dev_priv = dev->dev_private;
1649
 
1875
 
1650
	BUG_ON(ring->outstanding_lazy_seqno);
1876
	BUG_ON(ring->outstanding_lazy_seqno);
1651
 
1877
 
1652
	if (INTEL_INFO(ring->dev)->gen >= 6) {
1878
	if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
1653
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1879
		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
Line 1654... Line 1880...
1654
		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1880
		I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1655
		if (HAS_VEBOX(ring->dev))
1881
		if (HAS_VEBOX(dev))
1656
			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
1882
			I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
Line 1657... Line 1883...
1657
	}
1883
	}
1658
 
1884
 
1659
	ring->set_seqno(ring, seqno);
1885
	ring->set_seqno(ring, seqno);
1660
	ring->hangcheck.seqno = seqno;
1886
	ring->hangcheck.seqno = seqno;
Line 1661... Line 1887...
1661
}
1887
}
Line 1662... Line 1888...
1662
 
1888
 
1663
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1889
static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
Line 1691... Line 1917...
1691
	 */
1917
	 */
1692
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1918
       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1693
		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1919
		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1694
}
1920
}
Line 1695... Line 1921...
1695
 
1921
 
1696
static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
1922
static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
1697
			   u32 invalidate, u32 flush)
1923
			   u32 invalidate, u32 flush)
1698
{
1924
{
1699
	uint32_t cmd;
1925
	uint32_t cmd;
Line 1727... Line 1953...
1727
	intel_ring_advance(ring);
1953
	intel_ring_advance(ring);
1728
	return 0;
1954
	return 0;
1729
}
1955
}
Line 1730... Line 1956...
1730
 
1956
 
1731
static int
1957
static int
1732
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1958
gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1733
			      u32 offset, u32 len,
1959
			      u64 offset, u32 len,
1734
			      unsigned flags)
1960
			      unsigned flags)
1735
{
1961
{
1736
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1962
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1737
	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1963
	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
Line 1742... Line 1968...
1742
	if (ret)
1968
	if (ret)
1743
		return ret;
1969
		return ret;
Line 1744... Line 1970...
1744
 
1970
 
1745
	/* FIXME(BDW): Address space and security selectors. */
1971
	/* FIXME(BDW): Address space and security selectors. */
1746
	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1972
	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1747
	intel_ring_emit(ring, offset);
1973
	intel_ring_emit(ring, lower_32_bits(offset));
1748
	intel_ring_emit(ring, 0);
1974
	intel_ring_emit(ring, upper_32_bits(offset));
1749
	intel_ring_emit(ring, MI_NOOP);
1975
	intel_ring_emit(ring, MI_NOOP);
Line 1750... Line 1976...
1750
	intel_ring_advance(ring);
1976
	intel_ring_advance(ring);
1751
 
1977
 
Line 1752... Line 1978...
1752
	return 0;
1978
	return 0;
1753
}
1979
}
1754
 
1980
 
1755
static int
1981
static int
1756
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1982
hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1757
			      u32 offset, u32 len,
1983
			      u64 offset, u32 len,
Line 1758... Line 1984...
1758
			      unsigned flags)
1984
			      unsigned flags)
Line 1772... Line 1998...
1772
 
1998
 
1773
	return 0;
1999
	return 0;
Line 1774... Line 2000...
1774
}
2000
}
1775
 
2001
 
1776
static int
2002
static int
1777
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
2003
gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1778
			      u32 offset, u32 len,
2004
			      u64 offset, u32 len,
1779
			      unsigned flags)
2005
			      unsigned flags)
Line 1780... Line 2006...
1780
{
2006
{
Line 1794... Line 2020...
1794
       return 0;
2020
       return 0;
1795
}
2021
}
Line 1796... Line 2022...
1796
 
2022
 
Line 1797... Line 2023...
1797
/* Blitter support (SandyBridge+) */
2023
/* Blitter support (SandyBridge+) */
1798
 
2024
 
1799
static int gen6_ring_flush(struct intel_ring_buffer *ring,
2025
static int gen6_ring_flush(struct intel_engine_cs *ring,
1800
			  u32 invalidate, u32 flush)
2026
			  u32 invalidate, u32 flush)
1801
{
2027
{
1802
	struct drm_device *dev = ring->dev;
2028
	struct drm_device *dev = ring->dev;
Line 1836... Line 2062...
1836
	return 0;
2062
	return 0;
1837
}
2063
}
Line 1838... Line 2064...
1838
 
2064
 
1839
int intel_init_render_ring_buffer(struct drm_device *dev)
2065
int intel_init_render_ring_buffer(struct drm_device *dev)
1840
{
2066
{
1841
	drm_i915_private_t *dev_priv = dev->dev_private;
2067
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2068
	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
-
 
2069
	struct drm_i915_gem_object *obj;
Line 1842... Line 2070...
1842
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2070
	int ret;
1843
 
2071
 
1844
	ring->name = "render ring";
2072
	ring->name = "render ring";
Line 1845... Line -...
1845
	ring->id = RCS;
-
 
1846
	ring->mmio_base = RENDER_RING_BASE;
-
 
1847
 
-
 
1848
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
1849
       ring->add_request = gen6_add_request;
-
 
1850
		ring->flush = gen7_render_ring_flush;
2073
	ring->id = RCS;
-
 
2074
	ring->mmio_base = RENDER_RING_BASE;
-
 
2075
 
-
 
2076
	if (INTEL_INFO(dev)->gen >= 8) {
-
 
2077
		if (i915_semaphore_is_enabled(dev)) {
-
 
2078
			obj = i915_gem_alloc_object(dev, 4096);
-
 
2079
			if (obj == NULL) {
-
 
2080
				DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-
 
2081
				i915.semaphores = 0;
-
 
2082
			} else {
-
 
2083
				i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-
 
2084
				ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-
 
2085
				if (ret != 0) {
-
 
2086
					drm_gem_object_unreference(&obj->base);
-
 
2087
					DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-
 
2088
					i915.semaphores = 0;
-
 
2089
				} else
-
 
2090
					dev_priv->semaphore_obj = obj;
1851
		if (INTEL_INFO(dev)->gen == 6)
2091
			}
1852
		ring->flush = gen6_render_ring_flush;
2092
		}
1853
		if (INTEL_INFO(dev)->gen >= 8) {
2093
		ring->add_request = gen6_add_request;
-
 
2094
		ring->flush = gen8_render_ring_flush;
-
 
2095
		ring->irq_get = gen8_ring_get_irq;
-
 
2096
		ring->irq_put = gen8_ring_put_irq;
-
 
2097
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-
 
2098
		ring->get_seqno = gen6_ring_get_seqno;
-
 
2099
		ring->set_seqno = ring_set_seqno;
-
 
2100
		if (i915_semaphore_is_enabled(dev)) {
-
 
2101
			WARN_ON(!dev_priv->semaphore_obj);
1854
			ring->flush = gen8_render_ring_flush;
2102
			ring->semaphore.sync_to = gen8_ring_sync;
-
 
2103
			ring->semaphore.signal = gen8_rcs_signal;
-
 
2104
			GEN8_RING_SEMAPHORE_INIT;
-
 
2105
		}
-
 
2106
	} else if (INTEL_INFO(dev)->gen >= 6) {
-
 
2107
       ring->add_request = gen6_add_request;
1855
			ring->irq_get = gen8_ring_get_irq;
2108
		ring->flush = gen7_render_ring_flush;
1856
			ring->irq_put = gen8_ring_put_irq;
2109
		if (INTEL_INFO(dev)->gen == 6)
1857
		} else {
-
 
1858
		ring->irq_get = gen6_ring_get_irq;
2110
		ring->flush = gen6_render_ring_flush;
1859
		ring->irq_put = gen6_ring_put_irq;
2111
		ring->irq_get = gen6_ring_get_irq;
1860
		}
2112
		ring->irq_put = gen6_ring_put_irq;
-
 
2113
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1861
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2114
		ring->get_seqno = gen6_ring_get_seqno;
-
 
2115
		ring->set_seqno = ring_set_seqno;
-
 
2116
		if (i915_semaphore_is_enabled(dev)) {
-
 
2117
		ring->semaphore.sync_to = gen6_ring_sync;
-
 
2118
		ring->semaphore.signal = gen6_signal;
-
 
2119
		/*
-
 
2120
			 * The current semaphore is only applied on pre-gen8
-
 
2121
			 * platform.  And there is no VCS2 ring on the pre-gen8
-
 
2122
			 * platform. So the semaphore between RCS and VCS2 is
1862
		ring->get_seqno = gen6_ring_get_seqno;
2123
			 * initialized as INVALID.  Gen8 will initialize the
1863
		ring->set_seqno = ring_set_seqno;
2124
			 * sema between VCS2 and RCS later.
1864
		ring->sync_to = gen6_ring_sync;
2125
		 */
1865
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2126
		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2127
		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
1866
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
2128
		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
1867
		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
2129
		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
1868
		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
2130
		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
1869
		ring->signal_mbox[RCS] = GEN6_NOSYNC;
2131
		ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-
 
2132
		ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-
 
2133
		ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
1870
		ring->signal_mbox[VCS] = GEN6_VRSYNC;
2134
		ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
1871
		ring->signal_mbox[BCS] = GEN6_BRSYNC;
2135
		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
1872
		ring->signal_mbox[VECS] = GEN6_VERSYNC;
2136
		}
1873
	} else if (IS_GEN5(dev)) {
2137
	} else if (IS_GEN5(dev)) {
1874
       ring->add_request = pc_render_add_request;
2138
       ring->add_request = pc_render_add_request;
Line 1895... Line 2159...
1895
			ring->irq_put = i9xx_ring_put_irq;
2159
			ring->irq_put = i9xx_ring_put_irq;
1896
		}
2160
		}
1897
		ring->irq_enable_mask = I915_USER_INTERRUPT;
2161
		ring->irq_enable_mask = I915_USER_INTERRUPT;
1898
	}
2162
	}
1899
	ring->write_tail = ring_write_tail;
2163
	ring->write_tail = ring_write_tail;
-
 
2164
 
1900
	if (IS_HASWELL(dev))
2165
	if (IS_HASWELL(dev))
1901
		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2166
		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1902
	else if (IS_GEN8(dev))
2167
	else if (IS_GEN8(dev))
1903
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2168
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1904
	else if (INTEL_INFO(dev)->gen >= 6)
2169
	else if (INTEL_INFO(dev)->gen >= 6)
Line 1912... Line 2177...
1912
	ring->init = init_render_ring;
2177
	ring->init = init_render_ring;
1913
	ring->cleanup = render_ring_cleanup;
2178
	ring->cleanup = render_ring_cleanup;
Line 1914... Line 2179...
1914
 
2179
 
1915
	/* Workaround batchbuffer to combat CS tlb bug. */
2180
	/* Workaround batchbuffer to combat CS tlb bug. */
1916
	if (HAS_BROKEN_CS_TLB(dev)) {
-
 
1917
		struct drm_i915_gem_object *obj;
-
 
1918
		int ret;
-
 
1919
 
2181
	if (HAS_BROKEN_CS_TLB(dev)) {
1920
		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
2182
		obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1921
		if (obj == NULL) {
2183
		if (obj == NULL) {
1922
			DRM_ERROR("Failed to allocate batch bo\n");
2184
			DRM_ERROR("Failed to allocate batch bo\n");
1923
			return -ENOMEM;
2185
			return -ENOMEM;
Line 1924... Line 2186...
1924
		}
2186
		}
1925
 
2187
 
1926
		ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
2188
		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
1927
		if (ret != 0) {
2189
		if (ret != 0) {
1928
			drm_gem_object_unreference(&obj->base);
2190
			drm_gem_object_unreference(&obj->base);
1929
			DRM_ERROR("Failed to ping batch bo\n");
2191
			DRM_ERROR("Failed to ping batch bo\n");
Line 1938... Line 2200...
1938
}
2200
}
Line 1939... Line 2201...
1939
 
2201
 
1940
#if 0
2202
#if 0
1941
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2203
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1942
{
2204
{
1943
	drm_i915_private_t *dev_priv = dev->dev_private;
2205
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2206
	struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1944
	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2207
	struct intel_ringbuffer *ringbuf = ring->buffer;
Line -... Line 2208...
-
 
2208
	int ret;
-
 
2209
 
-
 
2210
	if (ringbuf == NULL) {
-
 
2211
		ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-
 
2212
		if (!ringbuf)
-
 
2213
			return -ENOMEM;
-
 
2214
		ring->buffer = ringbuf;
1945
	int ret;
2215
	}
1946
 
2216
 
1947
	ring->name = "render ring";
2217
	ring->name = "render ring";
Line 1948... Line 2218...
1948
	ring->id = RCS;
2218
	ring->id = RCS;
1949
	ring->mmio_base = RENDER_RING_BASE;
2219
	ring->mmio_base = RENDER_RING_BASE;
1950
 
2220
 
-
 
2221
	if (INTEL_INFO(dev)->gen >= 6) {
1951
	if (INTEL_INFO(dev)->gen >= 6) {
2222
		/* non-kms not supported on gen6+ */
Line 1952... Line 2223...
1952
		/* non-kms not supported on gen6+ */
2223
		ret = -ENODEV;
1953
		return -ENODEV;
2224
		goto err_ringbuf;
1954
	}
2225
	}
Line 1983... Line 2254...
1983
 
2254
 
1984
	ring->dev = dev;
2255
	ring->dev = dev;
1985
	INIT_LIST_HEAD(&ring->active_list);
2256
	INIT_LIST_HEAD(&ring->active_list);
Line 1986... Line 2257...
1986
	INIT_LIST_HEAD(&ring->request_list);
2257
	INIT_LIST_HEAD(&ring->request_list);
1987
 
2258
 
1988
	ring->size = size;
2259
	ringbuf->size = size;
1989
	ring->effective_size = ring->size;
2260
	ringbuf->effective_size = ringbuf->size;
Line 1990... Line 2261...
1990
	if (IS_I830(ring->dev) || IS_845G(ring->dev))
2261
	if (IS_I830(ring->dev) || IS_845G(ring->dev))
1991
		ring->effective_size -= 128;
2262
		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
1992
 
2263
 
1993
	ring->virtual_start = ioremap_wc(start, size);
2264
	ringbuf->virtual_start = ioremap_wc(start, size);
1994
	if (ring->virtual_start == NULL) {
2265
	if (ringbuf->virtual_start == NULL) {
-
 
2266
		DRM_ERROR("can not ioremap virtual address for"
1995
		DRM_ERROR("can not ioremap virtual address for"
2267
			  " ring buffer\n");
Line 1996... Line 2268...
1996
			  " ring buffer\n");
2268
		ret = -ENOMEM;
1997
		return -ENOMEM;
2269
		goto err_ringbuf;
1998
	}
2270
	}
1999
 
2271
 
2000
	if (!I915_NEED_GFX_HWS(dev)) {
2272
	if (!I915_NEED_GFX_HWS(dev)) {
Line 2001... Line 2273...
2001
		ret = init_phys_status_page(ring);
2273
		ret = init_phys_status_page(ring);
-
 
2274
		if (ret)
-
 
2275
			goto err_vstart;
-
 
2276
	}
-
 
2277
 
-
 
2278
	return 0;
-
 
2279
 
-
 
2280
err_vstart:
2002
		if (ret)
2281
	iounmap(ringbuf->virtual_start);
2003
			return ret;
2282
err_ringbuf:
Line 2004... Line 2283...
2004
	}
2283
	kfree(ringbuf);
2005
 
2284
	ring->buffer = NULL;
2006
	return 0;
2285
	return ret;
2007
}
2286
}
Line 2008... Line 2287...
2008
#endif
2287
#endif
2009
 
2288
 
Line 2010... Line 2289...
2010
int intel_init_bsd_ring_buffer(struct drm_device *dev)
2289
int intel_init_bsd_ring_buffer(struct drm_device *dev)
Line 2030... Line 2309...
2030
				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2309
				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2031
			ring->irq_get = gen8_ring_get_irq;
2310
			ring->irq_get = gen8_ring_get_irq;
2032
			ring->irq_put = gen8_ring_put_irq;
2311
			ring->irq_put = gen8_ring_put_irq;
2033
			ring->dispatch_execbuffer =
2312
			ring->dispatch_execbuffer =
2034
				gen8_ring_dispatch_execbuffer;
2313
				gen8_ring_dispatch_execbuffer;
-
 
2314
			if (i915_semaphore_is_enabled(dev)) {
-
 
2315
				ring->semaphore.sync_to = gen8_ring_sync;
-
 
2316
				ring->semaphore.signal = gen8_xcs_signal;
-
 
2317
				GEN8_RING_SEMAPHORE_INIT;
-
 
2318
			}
2035
		} else {
2319
		} else {
2036
		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2320
		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2037
		ring->irq_get = gen6_ring_get_irq;
2321
		ring->irq_get = gen6_ring_get_irq;
2038
		ring->irq_put = gen6_ring_put_irq;
2322
		ring->irq_put = gen6_ring_put_irq;
2039
			ring->dispatch_execbuffer =
2323
			ring->dispatch_execbuffer =
2040
				gen6_ring_dispatch_execbuffer;
2324
				gen6_ring_dispatch_execbuffer;
-
 
2325
			if (i915_semaphore_is_enabled(dev)) {
-
 
2326
		ring->semaphore.sync_to = gen6_ring_sync;
-
 
2327
		ring->semaphore.signal = gen6_signal;
-
 
2328
		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-
 
2329
		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2330
		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-
 
2331
		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-
 
2332
		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2333
		ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-
 
2334
		ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-
 
2335
		ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-
 
2336
		ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-
 
2337
		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
 
2338
			}
2041
		}
2339
		}
2042
		ring->sync_to = gen6_ring_sync;
-
 
2043
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
-
 
2044
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2045
		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
-
 
2046
		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
-
 
2047
		ring->signal_mbox[RCS] = GEN6_RVSYNC;
-
 
2048
		ring->signal_mbox[VCS] = GEN6_NOSYNC;
-
 
2049
		ring->signal_mbox[BCS] = GEN6_BVSYNC;
-
 
2050
		ring->signal_mbox[VECS] = GEN6_VEVSYNC;
-
 
2051
	} else {
2340
	} else {
2052
		ring->mmio_base = BSD_RING_BASE;
2341
		ring->mmio_base = BSD_RING_BASE;
2053
		ring->flush = bsd_ring_flush;
2342
		ring->flush = bsd_ring_flush;
2054
		ring->add_request = i9xx_add_request;
2343
		ring->add_request = i9xx_add_request;
2055
		ring->get_seqno = ring_get_seqno;
2344
		ring->get_seqno = ring_get_seqno;
Line 2068... Line 2357...
2068
	ring->init = init_ring_common;
2357
	ring->init = init_ring_common;
Line 2069... Line 2358...
2069
 
2358
 
2070
	return intel_init_ring_buffer(dev, ring);
2359
	return intel_init_ring_buffer(dev, ring);
Line -... Line 2360...
-
 
2360
}
-
 
2361
 
-
 
2362
/**
-
 
2363
 * Initialize the second BSD ring for Broadwell GT3.
-
 
2364
 * It is noted that this only exists on Broadwell GT3.
-
 
2365
 */
-
 
2366
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
-
 
2367
{
-
 
2368
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2369
	struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
 
2370
 
-
 
2371
	if ((INTEL_INFO(dev)->gen != 8)) {
-
 
2372
		DRM_ERROR("No dual-BSD ring on non-BDW machine\n");
-
 
2373
		return -EINVAL;
-
 
2374
	}
-
 
2375
 
-
 
2376
	ring->name = "bsd2 ring";
-
 
2377
	ring->id = VCS2;
-
 
2378
 
-
 
2379
	ring->write_tail = ring_write_tail;
-
 
2380
	ring->mmio_base = GEN8_BSD2_RING_BASE;
-
 
2381
	ring->flush = gen6_bsd_ring_flush;
-
 
2382
	ring->add_request = gen6_add_request;
-
 
2383
	ring->get_seqno = gen6_ring_get_seqno;
-
 
2384
	ring->set_seqno = ring_set_seqno;
-
 
2385
	ring->irq_enable_mask =
-
 
2386
			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-
 
2387
	ring->irq_get = gen8_ring_get_irq;
-
 
2388
	ring->irq_put = gen8_ring_put_irq;
-
 
2389
	ring->dispatch_execbuffer =
-
 
2390
			gen8_ring_dispatch_execbuffer;
-
 
2391
	if (i915_semaphore_is_enabled(dev)) {
-
 
2392
		ring->semaphore.sync_to = gen8_ring_sync;
-
 
2393
		ring->semaphore.signal = gen8_xcs_signal;
-
 
2394
		GEN8_RING_SEMAPHORE_INIT;
-
 
2395
	}
-
 
2396
	ring->init = init_ring_common;
-
 
2397
 
-
 
2398
	return intel_init_ring_buffer(dev, ring);
2071
}
2399
}
2072
 
2400
 
2073
int intel_init_blt_ring_buffer(struct drm_device *dev)
2401
int intel_init_blt_ring_buffer(struct drm_device *dev)
2074
{
2402
{
Line 2075... Line 2403...
2075
	drm_i915_private_t *dev_priv = dev->dev_private;
2403
	struct drm_i915_private *dev_priv = dev->dev_private;
2076
	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2404
	struct intel_engine_cs *ring = &dev_priv->ring[BCS];
Line 2077... Line 2405...
2077
 
2405
 
Line 2088... Line 2416...
2088
		ring->irq_enable_mask =
2416
		ring->irq_enable_mask =
2089
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2417
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
2090
		ring->irq_get = gen8_ring_get_irq;
2418
		ring->irq_get = gen8_ring_get_irq;
2091
		ring->irq_put = gen8_ring_put_irq;
2419
		ring->irq_put = gen8_ring_put_irq;
2092
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2420
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-
 
2421
		if (i915_semaphore_is_enabled(dev)) {
-
 
2422
			ring->semaphore.sync_to = gen8_ring_sync;
-
 
2423
			ring->semaphore.signal = gen8_xcs_signal;
-
 
2424
			GEN8_RING_SEMAPHORE_INIT;
-
 
2425
		}
2093
	} else {
2426
	} else {
2094
	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2427
	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2095
	ring->irq_get = gen6_ring_get_irq;
2428
	ring->irq_get = gen6_ring_get_irq;
2096
	ring->irq_put = gen6_ring_put_irq;
2429
	ring->irq_put = gen6_ring_put_irq;
2097
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2430
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-
 
2431
		if (i915_semaphore_is_enabled(dev)) {
-
 
2432
			ring->semaphore.signal = gen6_signal;
-
 
2433
	ring->semaphore.sync_to = gen6_ring_sync;
-
 
2434
	/*
-
 
2435
			 * The current semaphore is only applied on pre-gen8
-
 
2436
			 * platform.  And there is no VCS2 ring on the pre-gen8
-
 
2437
			 * platform. So the semaphore between BCS and VCS2 is
-
 
2438
			 * initialized as INVALID.  Gen8 will initialize the
-
 
2439
			 * sema between BCS and VCS2 later.
-
 
2440
	 */
-
 
2441
	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-
 
2442
	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-
 
2443
	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2444
	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-
 
2445
	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2446
	ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-
 
2447
	ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-
 
2448
	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-
 
2449
	ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-
 
2450
	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
 
2451
		}
2098
	}
2452
	}
2099
	ring->sync_to = gen6_ring_sync;
-
 
2100
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
-
 
2101
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
-
 
2102
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2103
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
-
 
2104
	ring->signal_mbox[RCS] = GEN6_RBSYNC;
-
 
2105
	ring->signal_mbox[VCS] = GEN6_VBSYNC;
-
 
2106
	ring->signal_mbox[BCS] = GEN6_NOSYNC;
-
 
2107
	ring->signal_mbox[VECS] = GEN6_VEBSYNC;
-
 
2108
	ring->init = init_ring_common;
2453
	ring->init = init_ring_common;
Line 2109... Line 2454...
2109
 
2454
 
2110
	return intel_init_ring_buffer(dev, ring);
2455
	return intel_init_ring_buffer(dev, ring);
Line 2111... Line 2456...
2111
}
2456
}
2112
 
2457
 
2113
int intel_init_vebox_ring_buffer(struct drm_device *dev)
2458
int intel_init_vebox_ring_buffer(struct drm_device *dev)
2114
{
2459
{
Line 2115... Line 2460...
2115
	drm_i915_private_t *dev_priv = dev->dev_private;
2460
	struct drm_i915_private *dev_priv = dev->dev_private;
2116
	struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2461
	struct intel_engine_cs *ring = &dev_priv->ring[VECS];
Line 2117... Line 2462...
2117
 
2462
 
Line 2129... Line 2474...
2129
		ring->irq_enable_mask =
2474
		ring->irq_enable_mask =
2130
			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2475
			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
2131
		ring->irq_get = gen8_ring_get_irq;
2476
		ring->irq_get = gen8_ring_get_irq;
2132
		ring->irq_put = gen8_ring_put_irq;
2477
		ring->irq_put = gen8_ring_put_irq;
2133
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2478
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-
 
2479
		if (i915_semaphore_is_enabled(dev)) {
-
 
2480
			ring->semaphore.sync_to = gen8_ring_sync;
-
 
2481
			ring->semaphore.signal = gen8_xcs_signal;
-
 
2482
			GEN8_RING_SEMAPHORE_INIT;
-
 
2483
		}
2134
	} else {
2484
	} else {
2135
	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2485
	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2136
	ring->irq_get = hsw_vebox_get_irq;
2486
	ring->irq_get = hsw_vebox_get_irq;
2137
	ring->irq_put = hsw_vebox_put_irq;
2487
	ring->irq_put = hsw_vebox_put_irq;
2138
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2488
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-
 
2489
		if (i915_semaphore_is_enabled(dev)) {
-
 
2490
	ring->semaphore.sync_to = gen6_ring_sync;
-
 
2491
	ring->semaphore.signal = gen6_signal;
-
 
2492
	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-
 
2493
	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-
 
2494
	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-
 
2495
	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2496
	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2497
	ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-
 
2498
	ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-
 
2499
	ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-
 
2500
	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-
 
2501
	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
 
2502
		}
2139
	}
2503
	}
2140
	ring->sync_to = gen6_ring_sync;
-
 
2141
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
-
 
2142
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
-
 
2143
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
-
 
2144
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-
 
2145
	ring->signal_mbox[RCS] = GEN6_RVESYNC;
-
 
2146
	ring->signal_mbox[VCS] = GEN6_VVESYNC;
-
 
2147
	ring->signal_mbox[BCS] = GEN6_BVESYNC;
-
 
2148
	ring->signal_mbox[VECS] = GEN6_NOSYNC;
-
 
2149
	ring->init = init_ring_common;
2504
	ring->init = init_ring_common;
Line 2150... Line 2505...
2150
 
2505
 
2151
	return intel_init_ring_buffer(dev, ring);
2506
	return intel_init_ring_buffer(dev, ring);
Line 2152... Line 2507...
2152
}
2507
}
2153
 
2508
 
2154
int
2509
int
2155
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
2510
intel_ring_flush_all_caches(struct intel_engine_cs *ring)
Line 2156... Line 2511...
2156
{
2511
{
2157
	int ret;
2512
	int ret;
Line 2168... Line 2523...
2168
	ring->gpu_caches_dirty = false;
2523
	ring->gpu_caches_dirty = false;
2169
	return 0;
2524
	return 0;
2170
}
2525
}
Line 2171... Line 2526...
2171
 
2526
 
2172
int
2527
int
2173
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
2528
intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)
2174
{
2529
{
2175
	uint32_t flush_domains;
2530
	uint32_t flush_domains;
Line 2176... Line 2531...
2176
	int ret;
2531
	int ret;
Line 2186... Line 2541...
2186
	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
2541
	trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
Line 2187... Line 2542...
2187
 
2542
 
2188
	ring->gpu_caches_dirty = false;
2543
	ring->gpu_caches_dirty = false;
2189
	return 0;
2544
	return 0;
-
 
2545
}
-
 
2546
 
-
 
2547
void
-
 
2548
intel_stop_ring_buffer(struct intel_engine_cs *ring)
-
 
2549
{
-
 
2550
	int ret;
-
 
2551
 
-
 
2552
	if (!intel_ring_initialized(ring))
-
 
2553
		return;
-
 
2554
 
-
 
2555
	ret = intel_ring_idle(ring);
-
 
2556
	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
-
 
2557
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
-
 
2558
			  ring->name, ret);
-
 
2559
 
-
 
2560
	stop_ring(ring);