Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4539 Rev 4560
Line 39... Line 39...
39
	if (space < 0)
39
	if (space < 0)
40
		space += ring->size;
40
		space += ring->size;
41
	return space;
41
	return space;
42
}
42
}
Line -... Line 43...
-
 
43
 
-
 
44
void __intel_ring_advance(struct intel_ring_buffer *ring)
-
 
45
{
-
 
46
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
47
 
-
 
48
	ring->tail &= ring->size - 1;
-
 
49
	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-
 
50
		return;
-
 
51
	ring->write_tail(ring, ring->tail);
-
 
52
}
43
 
53
 
44
static int
54
static int
45
gen2_render_ring_flush(struct intel_ring_buffer *ring,
55
gen2_render_ring_flush(struct intel_ring_buffer *ring,
46
		       u32	invalidate_domains,
56
		       u32	invalidate_domains,
47
		       u32	flush_domains)
57
		       u32	flush_domains)
Line 273... Line 283...
273
	int ret;
283
	int ret;
Line 274... Line 284...
274
 
284
 
275
	if (!ring->fbc_dirty)
285
	if (!ring->fbc_dirty)
Line 276... Line 286...
276
		return 0;
286
		return 0;
277
 
287
 
278
	ret = intel_ring_begin(ring, 4);
288
	ret = intel_ring_begin(ring, 6);
279
	if (ret)
-
 
280
		return ret;
289
	if (ret)
281
	intel_ring_emit(ring, MI_NOOP);
290
		return ret;
282
	/* WaFbcNukeOn3DBlt:ivb/hsw */
291
	/* WaFbcNukeOn3DBlt:ivb/hsw */
283
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
292
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-
 
293
	intel_ring_emit(ring, MSG_FBC_REND_STATE);
-
 
294
	intel_ring_emit(ring, value);
-
 
295
	intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
284
	intel_ring_emit(ring, MSG_FBC_REND_STATE);
296
	intel_ring_emit(ring, MSG_FBC_REND_STATE);
Line 285... Line 297...
285
	intel_ring_emit(ring, value);
297
	intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
286
	intel_ring_advance(ring);
298
	intel_ring_advance(ring);
287
 
299
 
Line 342... Line 354...
342
	intel_ring_emit(ring, flags);
354
	intel_ring_emit(ring, flags);
343
	intel_ring_emit(ring, scratch_addr);
355
	intel_ring_emit(ring, scratch_addr);
344
	intel_ring_emit(ring, 0);
356
	intel_ring_emit(ring, 0);
345
	intel_ring_advance(ring);
357
	intel_ring_advance(ring);
Line 346... Line 358...
346
 
358
 
347
	if (flush_domains)
359
	if (!invalidate_domains && flush_domains)
Line 348... Line 360...
348
		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
360
		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
349
 
361
 
Line -... Line 362...
-
 
362
	return 0;
-
 
363
}
-
 
364
 
-
 
365
static int
-
 
366
gen8_render_ring_flush(struct intel_ring_buffer *ring,
-
 
367
		       u32 invalidate_domains, u32 flush_domains)
-
 
368
{
-
 
369
	u32 flags = 0;
-
 
370
	u32 scratch_addr = ring->scratch.gtt_offset + 128;
-
 
371
	int ret;
-
 
372
 
-
 
373
	flags |= PIPE_CONTROL_CS_STALL;
-
 
374
 
-
 
375
	if (flush_domains) {
-
 
376
		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
-
 
377
		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-
 
378
	}
-
 
379
	if (invalidate_domains) {
-
 
380
		flags |= PIPE_CONTROL_TLB_INVALIDATE;
-
 
381
		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
-
 
382
		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
-
 
383
		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
-
 
384
		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
-
 
385
		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
-
 
386
		flags |= PIPE_CONTROL_QW_WRITE;
-
 
387
		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-
 
388
	}
-
 
389
 
-
 
390
	ret = intel_ring_begin(ring, 6);
-
 
391
	if (ret)
-
 
392
		return ret;
-
 
393
 
-
 
394
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-
 
395
	intel_ring_emit(ring, flags);
-
 
396
	intel_ring_emit(ring, scratch_addr);
-
 
397
	intel_ring_emit(ring, 0);
-
 
398
	intel_ring_emit(ring, 0);
-
 
399
	intel_ring_emit(ring, 0);
-
 
400
	intel_ring_advance(ring);
-
 
401
 
-
 
402
	return 0;
350
	return 0;
403
 
351
}
404
}
352
 
405
 
353
static void ring_write_tail(struct intel_ring_buffer *ring,
406
static void ring_write_tail(struct intel_ring_buffer *ring,
354
			    u32 value)
407
			    u32 value)
Line 383... Line 436...
383
	drm_i915_private_t *dev_priv = dev->dev_private;
436
	drm_i915_private_t *dev_priv = dev->dev_private;
384
	struct drm_i915_gem_object *obj = ring->obj;
437
	struct drm_i915_gem_object *obj = ring->obj;
385
	int ret = 0;
438
	int ret = 0;
386
	u32 head;
439
	u32 head;
Line 387... Line -...
387
 
-
 
388
	if (HAS_FORCE_WAKE(dev))
440
 
Line 389... Line 441...
389
		gen6_gt_force_wake_get(dev_priv);
441
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
390
 
442
 
391
	if (I915_NEED_GFX_HWS(dev))
443
	if (I915_NEED_GFX_HWS(dev))
392
		intel_ring_setup_status_page(ring);
444
		intel_ring_setup_status_page(ring);
Line 453... Line 505...
453
		ring->last_retired_head = -1;
505
		ring->last_retired_head = -1;
Line 454... Line 506...
454
 
506
 
Line 455... Line 507...
455
	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
507
	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
456
 
-
 
457
out:
508
 
Line 458... Line 509...
458
	if (HAS_FORCE_WAKE(dev))
509
out:
459
		gen6_gt_force_wake_put(dev_priv);
510
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
Line 460... Line 511...
460
 
511
 
Line 553... Line 604...
553
	}
604
	}
Line 554... Line 605...
554
 
605
 
555
	if (INTEL_INFO(dev)->gen >= 6)
606
	if (INTEL_INFO(dev)->gen >= 6)
Line 556... Line 607...
556
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
607
		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
557
 
608
 
Line 558... Line 609...
558
	if (HAS_L3_GPU_CACHE(dev))
609
	if (HAS_L3_DPF(dev))
559
		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
610
		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
Line 560... Line 611...
560
 
611
 
Line 587... Line 638...
587
 * even though the actual update only requires 3 dwords.
638
 * even though the actual update only requires 3 dwords.
588
 */
639
 */
589
#define MBOX_UPDATE_DWORDS 4
640
#define MBOX_UPDATE_DWORDS 4
590
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
641
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
591
	intel_ring_emit(ring, mmio_offset);
642
	intel_ring_emit(ring, mmio_offset);
592
	intel_ring_emit(ring, ring->outstanding_lazy_request);
643
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
593
	intel_ring_emit(ring, MI_NOOP);
644
	intel_ring_emit(ring, MI_NOOP);
594
}
645
}
Line 595... Line 646...
595
 
646
 
596
/**
647
/**
Line 606... Line 657...
606
gen6_add_request(struct intel_ring_buffer *ring)
657
gen6_add_request(struct intel_ring_buffer *ring)
607
{
658
{
608
	struct drm_device *dev = ring->dev;
659
	struct drm_device *dev = ring->dev;
609
	struct drm_i915_private *dev_priv = dev->dev_private;
660
	struct drm_i915_private *dev_priv = dev->dev_private;
610
	struct intel_ring_buffer *useless;
661
	struct intel_ring_buffer *useless;
611
	int i, ret;
662
	int i, ret, num_dwords = 4;
Line -... Line 663...
-
 
663
 
612
 
664
	if (i915_semaphore_is_enabled(dev))
613
	ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
665
		num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
-
 
666
#undef MBOX_UPDATE_DWORDS
614
				      MBOX_UPDATE_DWORDS) +
667
 
615
				      4);
668
	ret = intel_ring_begin(ring, num_dwords);
616
	if (ret)
669
	if (ret)
617
		return ret;
-
 
Line -... Line 670...
-
 
670
		return ret;
618
#undef MBOX_UPDATE_DWORDS
671
 
619
 
672
	if (i915_semaphore_is_enabled(dev)) {
620
	for_each_ring(useless, dev_priv, i) {
673
	for_each_ring(useless, dev_priv, i) {
621
		u32 mbox_reg = ring->signal_mbox[i];
674
		u32 mbox_reg = ring->signal_mbox[i];
622
		if (mbox_reg != GEN6_NOSYNC)
675
		if (mbox_reg != GEN6_NOSYNC)
-
 
676
			update_mboxes(ring, mbox_reg);
Line 623... Line 677...
623
			update_mboxes(ring, mbox_reg);
677
	}
624
	}
678
	}
625
 
679
 
626
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
680
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
627
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
681
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
Line 628... Line 682...
628
	intel_ring_emit(ring, ring->outstanding_lazy_request);
682
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
629
	intel_ring_emit(ring, MI_USER_INTERRUPT);
683
	intel_ring_emit(ring, MI_USER_INTERRUPT);
Line 630... Line 684...
630
	intel_ring_advance(ring);
684
	__intel_ring_advance(ring);
Line 717... Line 771...
717
 
771
 
718
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
772
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
719
			PIPE_CONTROL_WRITE_FLUSH |
773
			PIPE_CONTROL_WRITE_FLUSH |
720
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
774
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
721
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
775
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
722
	intel_ring_emit(ring, ring->outstanding_lazy_request);
776
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
723
	intel_ring_emit(ring, 0);
777
	intel_ring_emit(ring, 0);
724
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
778
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
725
	scratch_addr += 128; /* write to separate cachelines */
779
	scratch_addr += 128; /* write to separate cachelines */
726
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
780
	PIPE_CONTROL_FLUSH(ring, scratch_addr);
Line 736... Line 790...
736
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
790
	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
737
			PIPE_CONTROL_WRITE_FLUSH |
791
			PIPE_CONTROL_WRITE_FLUSH |
738
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
792
			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
739
			PIPE_CONTROL_NOTIFY);
793
			PIPE_CONTROL_NOTIFY);
740
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
794
	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
741
	intel_ring_emit(ring, ring->outstanding_lazy_request);
795
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
742
	intel_ring_emit(ring, 0);
796
	intel_ring_emit(ring, 0);
743
	intel_ring_advance(ring);
797
	__intel_ring_advance(ring);
Line 744... Line 798...
744
 
798
 
745
	return 0;
799
	return 0;
Line 746... Line 800...
746
}
800
}
Line 910... Line 964...
910
			break;
964
			break;
911
		}
965
		}
912
	} else if (IS_GEN6(ring->dev)) {
966
	} else if (IS_GEN6(ring->dev)) {
913
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967
		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
914
	} else {
968
	} else {
-
 
969
		/* XXX: gen8 returns to sanity */
915
		mmio = RING_HWS_PGA(ring->mmio_base);
970
		mmio = RING_HWS_PGA(ring->mmio_base);
916
	}
971
	}
Line 917... Line 972...
917
 
972
 
918
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
973
	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
Line 957... Line 1012...
957
	if (ret)
1012
	if (ret)
958
		return ret;
1013
		return ret;
Line 959... Line 1014...
959
 
1014
 
960
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1015
	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
961
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1016
	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
962
	intel_ring_emit(ring, ring->outstanding_lazy_request);
1017
	intel_ring_emit(ring, ring->outstanding_lazy_seqno);
963
	intel_ring_emit(ring, MI_USER_INTERRUPT);
1018
	intel_ring_emit(ring, MI_USER_INTERRUPT);
Line 964... Line 1019...
964
	intel_ring_advance(ring);
1019
	__intel_ring_advance(ring);
965
 
1020
 
Line 966... Line 1021...
966
	return 0;
1021
	return 0;
Line 974... Line 1029...
974
	unsigned long flags;
1029
	unsigned long flags;
Line 975... Line 1030...
975
 
1030
 
976
	if (!dev->irq_enabled)
1031
	if (!dev->irq_enabled)
Line 977... Line -...
977
	       return false;
-
 
978
 
-
 
979
	/* It looks like we need to prevent the gt from suspending while waiting
-
 
980
	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
-
 
981
	 * blt/bsd rings on ivb. */
-
 
982
		gen6_gt_force_wake_get(dev_priv);
1032
	       return false;
983
 
1033
 
984
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1034
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
985
	if (ring->irq_refcount++ == 0) {
1035
	if (ring->irq_refcount++ == 0) {
986
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1036
		if (HAS_L3_DPF(dev) && ring->id == RCS)
987
			I915_WRITE_IMR(ring,
1037
			I915_WRITE_IMR(ring,
988
				       ~(ring->irq_enable_mask |
1038
				       ~(ring->irq_enable_mask |
989
					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1039
					 GT_PARITY_ERROR(dev)));
990
		else
1040
		else
991
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
1041
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
992
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
1042
		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
Line 1003... Line 1053...
1003
	drm_i915_private_t *dev_priv = dev->dev_private;
1053
	drm_i915_private_t *dev_priv = dev->dev_private;
1004
	unsigned long flags;
1054
	unsigned long flags;
Line 1005... Line 1055...
1005
 
1055
 
1006
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1056
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1007
	if (--ring->irq_refcount == 0) {
1057
	if (--ring->irq_refcount == 0) {
1008
		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
1058
		if (HAS_L3_DPF(dev) && ring->id == RCS)
1009
			I915_WRITE_IMR(ring,
-
 
1010
				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1059
			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
1011
		else
1060
		else
1012
			I915_WRITE_IMR(ring, ~0);
1061
			I915_WRITE_IMR(ring, ~0);
1013
		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1062
		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
1014
	}
1063
	}
1015
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
1016
 
-
 
1017
		gen6_gt_force_wake_put(dev_priv);
1064
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
Line 1018... Line 1065...
1018
}
1065
}
1019
 
1066
 
1020
static bool
1067
static bool
Line 1053... Line 1100...
1053
		snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1100
		snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
1054
	}
1101
	}
1055
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1102
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1056
}
1103
}
Line -... Line 1104...
-
 
1104
 
-
 
1105
static bool
-
 
1106
gen8_ring_get_irq(struct intel_ring_buffer *ring)
-
 
1107
{
-
 
1108
	struct drm_device *dev = ring->dev;
-
 
1109
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1110
	unsigned long flags;
-
 
1111
 
-
 
1112
	if (!dev->irq_enabled)
-
 
1113
		return false;
-
 
1114
 
-
 
1115
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
1116
	if (ring->irq_refcount++ == 0) {
-
 
1117
		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-
 
1118
			I915_WRITE_IMR(ring,
-
 
1119
				       ~(ring->irq_enable_mask |
-
 
1120
					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-
 
1121
		} else {
-
 
1122
			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-
 
1123
		}
-
 
1124
		POSTING_READ(RING_IMR(ring->mmio_base));
-
 
1125
	}
-
 
1126
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
1127
 
-
 
1128
	return true;
-
 
1129
}
-
 
1130
 
-
 
1131
static void
-
 
1132
gen8_ring_put_irq(struct intel_ring_buffer *ring)
-
 
1133
{
-
 
1134
	struct drm_device *dev = ring->dev;
-
 
1135
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1136
	unsigned long flags;
-
 
1137
 
-
 
1138
	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-
 
1139
	if (--ring->irq_refcount == 0) {
-
 
1140
		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-
 
1141
			I915_WRITE_IMR(ring,
-
 
1142
				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-
 
1143
		} else {
-
 
1144
			I915_WRITE_IMR(ring, ~0);
-
 
1145
		}
-
 
1146
		POSTING_READ(RING_IMR(ring->mmio_base));
-
 
1147
	}
-
 
1148
	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
 
1149
}
1057
 
1150
 
1058
static int
1151
static int
1059
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1152
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
1060
			 u32 offset, u32 length,
1153
			 u32 offset, u32 length,
1061
			 unsigned flags)
1154
			 unsigned flags)
Line 1311... Line 1404...
1311
		return;
1404
		return;
Line 1312... Line 1405...
1312
 
1405
 
1313
	/* Disable the ring buffer. The ring must be idle at this point */
1406
	/* Disable the ring buffer. The ring must be idle at this point */
1314
	dev_priv = ring->dev->dev_private;
1407
	dev_priv = ring->dev->dev_private;
1315
	ret = intel_ring_idle(ring);
1408
	ret = intel_ring_idle(ring);
1316
	if (ret)
1409
	if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
1317
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1410
		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
Line 1318... Line 1411...
1318
			  ring->name, ret);
1411
			  ring->name, ret);
Line 1319... Line 1412...
1319
 
1412
 
Line 1320... Line 1413...
1320
	I915_WRITE_CTL(ring, 0);
1413
	I915_WRITE_CTL(ring, 0);
1321
 
1414
 
1322
	iounmap(ring->virtual_start);
1415
	iounmap(ring->virtual_start);
-
 
1416
 
-
 
1417
    i915_gem_object_unpin(ring->obj);
Line 1323... Line 1418...
1323
 
1418
	drm_gem_object_unreference(&ring->obj->base);
1324
    i915_gem_object_unpin(ring->obj);
1419
	ring->obj = NULL;
Line 1325... Line 1420...
1325
	drm_gem_object_unreference(&ring->obj->base);
1420
	ring->preallocated_lazy_request = NULL;
Line 1408... Line 1503...
1408
 
1503
 
1409
	ret = intel_ring_wait_request(ring, n);
1504
	ret = intel_ring_wait_request(ring, n);
1410
	if (ret != -ENOSPC)
1505
	if (ret != -ENOSPC)
Line -... Line 1506...
-
 
1506
		return ret;
-
 
1507
 
-
 
1508
	/* force the tail write in case we have been skipping them */
1411
		return ret;
1509
	__intel_ring_advance(ring);
1412
 
1510
 
1413
	trace_i915_ring_wait_begin(ring);
1511
	trace_i915_ring_wait_begin(ring);
1414
	/* With GEM the hangcheck timer should kick us out of the loop,
1512
	/* With GEM the hangcheck timer should kick us out of the loop,
1415
	 * leaving it early runs the risk of corrupting GEM state (due
1513
	 * leaving it early runs the risk of corrupting GEM state (due
Line 1463... Line 1561...
1463
{
1561
{
1464
	u32 seqno;
1562
	u32 seqno;
1465
	int ret;
1563
	int ret;
Line 1466... Line 1564...
1466
 
1564
 
1467
	/* We need to add any requests required to flush the objects and ring */
1565
	/* We need to add any requests required to flush the objects and ring */
1468
	if (ring->outstanding_lazy_request) {
1566
	if (ring->outstanding_lazy_seqno) {
1469
		ret = i915_add_request(ring, NULL);
1567
		ret = i915_add_request(ring, NULL);
1470
		if (ret)
1568
		if (ret)
1471
			return ret;
1569
			return ret;
Line 1483... Line 1581...
1483
}
1581
}
Line 1484... Line 1582...
1484
 
1582
 
1485
static int
1583
static int
1486
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1584
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1487
{
1585
{
1488
	if (ring->outstanding_lazy_request)
1586
	if (ring->outstanding_lazy_seqno)
Line 1489... Line 1587...
1489
		return 0;
1587
		return 0;
-
 
1588
 
-
 
1589
	if (ring->preallocated_lazy_request == NULL) {
-
 
1590
		struct drm_i915_gem_request *request;
-
 
1591
 
-
 
1592
		request = kmalloc(sizeof(*request), GFP_KERNEL);
-
 
1593
		if (request == NULL)
-
 
1594
			return -ENOMEM;
1490
 
1595
 
Line -... Line 1596...
-
 
1596
		ring->preallocated_lazy_request = request;
-
 
1597
	}
-
 
1598
 
1491
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1599
	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
1492
}
1600
}
1493
 
1601
 
1494
static int __intel_ring_begin(struct intel_ring_buffer *ring,
1602
static int __intel_ring_prepare(struct intel_ring_buffer *ring,
Line 1495... Line 1603...
1495
			      int bytes)
1603
			      int bytes)
Line 1506... Line 1614...
1506
		ret = ring_wait_for_space(ring, bytes);
1614
		ret = ring_wait_for_space(ring, bytes);
1507
		if (unlikely(ret))
1615
		if (unlikely(ret))
1508
			return ret;
1616
			return ret;
1509
	}
1617
	}
Line 1510... Line -...
1510
 
-
 
1511
	ring->space -= bytes;
1618
 
1512
	return 0;
1619
	return 0;
Line 1513... Line 1620...
1513
}
1620
}
1514
 
1621
 
Line 1521... Line 1628...
1521
	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1628
	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1522
				   dev_priv->mm.interruptible);
1629
				   dev_priv->mm.interruptible);
1523
	if (ret)
1630
	if (ret)
1524
		return ret;
1631
		return ret;
Line -... Line 1632...
-
 
1632
 
-
 
1633
	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
-
 
1634
	if (ret)
-
 
1635
		return ret;
1525
 
1636
 
1526
	/* Preallocate the olr before touching the ring */
1637
	/* Preallocate the olr before touching the ring */
1527
	ret = intel_ring_alloc_seqno(ring);
1638
	ret = intel_ring_alloc_seqno(ring);
1528
	if (ret)
1639
	if (ret)
Line 1529... Line 1640...
1529
		return ret;
1640
		return ret;
-
 
1641
 
1530
 
1642
	ring->space -= num_dwords * sizeof(uint32_t);
Line 1531... Line 1643...
1531
	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
1643
	return 0;
1532
}
1644
}
1533
 
1645
 
Line 1534... Line 1646...
1534
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1646
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
Line 1535... Line 1647...
1535
{
1647
{
1536
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1648
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1537
 
1649
 
1538
	BUG_ON(ring->outstanding_lazy_request);
1650
	BUG_ON(ring->outstanding_lazy_seqno);
Line 1546... Line 1658...
1546
 
1658
 
1547
	ring->set_seqno(ring, seqno);
1659
	ring->set_seqno(ring, seqno);
1548
	ring->hangcheck.seqno = seqno;
1660
	ring->hangcheck.seqno = seqno;
Line 1549... Line -...
1549
}
-
 
1550
 
-
 
1551
void intel_ring_advance(struct intel_ring_buffer *ring)
-
 
1552
{
-
 
1553
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1554
 
-
 
1555
	ring->tail &= ring->size - 1;
-
 
1556
	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-
 
1557
		return;
-
 
1558
	ring->write_tail(ring, ring->tail);
-
 
1559
}
-
 
1560
 
1661
}
1561
 
1662
 
1562
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1663
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1563
				     u32 value)
1664
				     u32 value)
Line 1601... Line 1702...
1601
	ret = intel_ring_begin(ring, 4);
1702
	ret = intel_ring_begin(ring, 4);
1602
	if (ret)
1703
	if (ret)
1603
		return ret;
1704
		return ret;
Line 1604... Line 1705...
1604
 
1705
 
-
 
1706
	cmd = MI_FLUSH_DW;
-
 
1707
	if (INTEL_INFO(ring->dev)->gen >= 8)
1605
	cmd = MI_FLUSH_DW;
1708
		cmd += 1;
1606
	/*
1709
	/*
1607
	 * Bspec vol 1c.5 - video engine command streamer:
1710
	 * Bspec vol 1c.5 - video engine command streamer:
1608
	 * "If ENABLED, all TLBs will be invalidated once the flush
1711
	 * "If ENABLED, all TLBs will be invalidated once the flush
1609
	 * operation is complete. This bit is only valid when the
1712
	 * operation is complete. This bit is only valid when the
Line 1612... Line 1715...
1612
	if (invalidate & I915_GEM_GPU_DOMAINS)
1715
	if (invalidate & I915_GEM_GPU_DOMAINS)
1613
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1716
		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1614
			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1717
			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1615
	intel_ring_emit(ring, cmd);
1718
	intel_ring_emit(ring, cmd);
1616
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1719
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-
 
1720
	if (INTEL_INFO(ring->dev)->gen >= 8) {
-
 
1721
		intel_ring_emit(ring, 0); /* upper addr */
-
 
1722
		intel_ring_emit(ring, 0); /* value */
-
 
1723
	} else  {
-
 
1724
	intel_ring_emit(ring, 0);
-
 
1725
	intel_ring_emit(ring, MI_NOOP);
-
 
1726
	}
-
 
1727
	intel_ring_advance(ring);
-
 
1728
	return 0;
-
 
1729
}
-
 
1730
 
-
 
1731
static int
-
 
1732
gen8_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-
 
1733
			      u32 offset, u32 len,
-
 
1734
			      unsigned flags)
-
 
1735
{
-
 
1736
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
 
1737
	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
-
 
1738
		!(flags & I915_DISPATCH_SECURE);
-
 
1739
	int ret;
-
 
1740
 
-
 
1741
	ret = intel_ring_begin(ring, 4);
-
 
1742
	if (ret)
-
 
1743
		return ret;
-
 
1744
 
-
 
1745
	/* FIXME(BDW): Address space and security selectors. */
-
 
1746
	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
-
 
1747
	intel_ring_emit(ring, offset);
1617
	intel_ring_emit(ring, 0);
1748
	intel_ring_emit(ring, 0);
1618
	intel_ring_emit(ring, MI_NOOP);
1749
	intel_ring_emit(ring, MI_NOOP);
1619
	intel_ring_advance(ring);
1750
	intel_ring_advance(ring);
-
 
1751
 
1620
	return 0;
1752
	return 0;
1621
}
1753
}
Line 1622... Line 1754...
1622
 
1754
 
1623
static int
1755
static int
Line 1674... Line 1806...
1674
	ret = intel_ring_begin(ring, 4);
1806
	ret = intel_ring_begin(ring, 4);
1675
	if (ret)
1807
	if (ret)
1676
		return ret;
1808
		return ret;
Line 1677... Line 1809...
1677
 
1809
 
-
 
1810
	cmd = MI_FLUSH_DW;
-
 
1811
	if (INTEL_INFO(ring->dev)->gen >= 8)
1678
	cmd = MI_FLUSH_DW;
1812
		cmd += 1;
1679
	/*
1813
	/*
1680
	 * Bspec vol 1c.3 - blitter engine command streamer:
1814
	 * Bspec vol 1c.3 - blitter engine command streamer:
1681
	 * "If ENABLED, all TLBs will be invalidated once the flush
1815
	 * "If ENABLED, all TLBs will be invalidated once the flush
1682
	 * operation is complete. This bit is only valid when the
1816
	 * operation is complete. This bit is only valid when the
Line 1685... Line 1819...
1685
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1819
	if (invalidate & I915_GEM_DOMAIN_RENDER)
1686
		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1820
		cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1687
			MI_FLUSH_DW_OP_STOREDW;
1821
			MI_FLUSH_DW_OP_STOREDW;
1688
	intel_ring_emit(ring, cmd);
1822
	intel_ring_emit(ring, cmd);
1689
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1823
	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-
 
1824
	if (INTEL_INFO(ring->dev)->gen >= 8) {
-
 
1825
		intel_ring_emit(ring, 0); /* upper addr */
-
 
1826
		intel_ring_emit(ring, 0); /* value */
-
 
1827
	} else  {
1690
	intel_ring_emit(ring, 0);
1828
	intel_ring_emit(ring, 0);
1691
	intel_ring_emit(ring, MI_NOOP);
1829
	intel_ring_emit(ring, MI_NOOP);
-
 
1830
	}
1692
	intel_ring_advance(ring);
1831
	intel_ring_advance(ring);
Line 1693... Line 1832...
1693
 
1832
 
1694
	if (IS_GEN7(dev) && flush)
1833
	if (IS_GEN7(dev) && !invalidate && flush)
Line 1695... Line 1834...
1695
		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1834
		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
1696
 
1835
 
Line 1709... Line 1848...
1709
	if (INTEL_INFO(dev)->gen >= 6) {
1848
	if (INTEL_INFO(dev)->gen >= 6) {
1710
       ring->add_request = gen6_add_request;
1849
       ring->add_request = gen6_add_request;
1711
		ring->flush = gen7_render_ring_flush;
1850
		ring->flush = gen7_render_ring_flush;
1712
		if (INTEL_INFO(dev)->gen == 6)
1851
		if (INTEL_INFO(dev)->gen == 6)
1713
		ring->flush = gen6_render_ring_flush;
1852
		ring->flush = gen6_render_ring_flush;
-
 
1853
		if (INTEL_INFO(dev)->gen >= 8) {
-
 
1854
			ring->flush = gen8_render_ring_flush;
-
 
1855
			ring->irq_get = gen8_ring_get_irq;
-
 
1856
			ring->irq_put = gen8_ring_put_irq;
-
 
1857
		} else {
1714
		ring->irq_get = gen6_ring_get_irq;
1858
		ring->irq_get = gen6_ring_get_irq;
1715
		ring->irq_put = gen6_ring_put_irq;
1859
		ring->irq_put = gen6_ring_put_irq;
-
 
1860
		}
1716
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1861
		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1717
		ring->get_seqno = gen6_ring_get_seqno;
1862
		ring->get_seqno = gen6_ring_get_seqno;
1718
		ring->set_seqno = ring_set_seqno;
1863
		ring->set_seqno = ring_set_seqno;
1719
		ring->sync_to = gen6_ring_sync;
1864
		ring->sync_to = gen6_ring_sync;
1720
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
1865
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
Line 1752... Line 1897...
1752
		ring->irq_enable_mask = I915_USER_INTERRUPT;
1897
		ring->irq_enable_mask = I915_USER_INTERRUPT;
1753
	}
1898
	}
1754
	ring->write_tail = ring_write_tail;
1899
	ring->write_tail = ring_write_tail;
1755
	if (IS_HASWELL(dev))
1900
	if (IS_HASWELL(dev))
1756
		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1901
		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
-
 
1902
	else if (IS_GEN8(dev))
-
 
1903
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
1757
	else if (INTEL_INFO(dev)->gen >= 6)
1904
	else if (INTEL_INFO(dev)->gen >= 6)
1758
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1905
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1759
	else if (INTEL_INFO(dev)->gen >= 4)
1906
	else if (INTEL_INFO(dev)->gen >= 4)
1760
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1907
		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1761
	else if (IS_I830(dev) || IS_845G(dev))
1908
	else if (IS_I830(dev) || IS_845G(dev))
Line 1867... Line 2014...
1867
 
2014
 
1868
	ring->name = "bsd ring";
2015
	ring->name = "bsd ring";
Line 1869... Line 2016...
1869
	ring->id = VCS;
2016
	ring->id = VCS;
1870
 
2017
 
1871
	ring->write_tail = ring_write_tail;
2018
	ring->write_tail = ring_write_tail;
1872
	if (IS_GEN6(dev) || IS_GEN7(dev)) {
2019
	if (INTEL_INFO(dev)->gen >= 6) {
1873
		ring->mmio_base = GEN6_BSD_RING_BASE;
2020
		ring->mmio_base = GEN6_BSD_RING_BASE;
1874
		/* gen6 bsd needs a special wa for tail updates */
2021
		/* gen6 bsd needs a special wa for tail updates */
1875
		if (IS_GEN6(dev))
2022
		if (IS_GEN6(dev))
1876
			ring->write_tail = gen6_bsd_ring_write_tail;
2023
			ring->write_tail = gen6_bsd_ring_write_tail;
1877
		ring->flush = gen6_bsd_ring_flush;
2024
		ring->flush = gen6_bsd_ring_flush;
1878
		ring->add_request = gen6_add_request;
2025
		ring->add_request = gen6_add_request;
-
 
2026
		ring->get_seqno = gen6_ring_get_seqno;
-
 
2027
		ring->set_seqno = ring_set_seqno;
-
 
2028
		if (INTEL_INFO(dev)->gen >= 8) {
-
 
2029
			ring->irq_enable_mask =
-
 
2030
				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-
 
2031
			ring->irq_get = gen8_ring_get_irq;
-
 
2032
			ring->irq_put = gen8_ring_put_irq;
-
 
2033
			ring->dispatch_execbuffer =
1879
		ring->get_seqno = gen6_ring_get_seqno;
2034
				gen8_ring_dispatch_execbuffer;
1880
		ring->set_seqno = ring_set_seqno;
2035
		} else {
1881
		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2036
		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-
 
2037
		ring->irq_get = gen6_ring_get_irq;
1882
		ring->irq_get = gen6_ring_get_irq;
2038
		ring->irq_put = gen6_ring_put_irq;
-
 
2039
			ring->dispatch_execbuffer =
1883
		ring->irq_put = gen6_ring_put_irq;
2040
				gen6_ring_dispatch_execbuffer;
1884
		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2041
		}
1885
		ring->sync_to = gen6_ring_sync;
2042
		ring->sync_to = gen6_ring_sync;
1886
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
2043
		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
1887
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
2044
		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
Line 1925... Line 2082...
1925
	ring->write_tail = ring_write_tail;
2082
	ring->write_tail = ring_write_tail;
1926
	ring->flush = gen6_ring_flush;
2083
	ring->flush = gen6_ring_flush;
1927
	ring->add_request = gen6_add_request;
2084
	ring->add_request = gen6_add_request;
1928
	ring->get_seqno = gen6_ring_get_seqno;
2085
	ring->get_seqno = gen6_ring_get_seqno;
1929
	ring->set_seqno = ring_set_seqno;
2086
	ring->set_seqno = ring_set_seqno;
-
 
2087
	if (INTEL_INFO(dev)->gen >= 8) {
-
 
2088
		ring->irq_enable_mask =
-
 
2089
			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-
 
2090
		ring->irq_get = gen8_ring_get_irq;
-
 
2091
		ring->irq_put = gen8_ring_put_irq;
-
 
2092
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-
 
2093
	} else {
1930
	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2094
	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1931
	ring->irq_get = gen6_ring_get_irq;
2095
	ring->irq_get = gen6_ring_get_irq;
1932
	ring->irq_put = gen6_ring_put_irq;
2096
	ring->irq_put = gen6_ring_put_irq;
1933
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2097
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-
 
2098
	}
1934
	ring->sync_to = gen6_ring_sync;
2099
	ring->sync_to = gen6_ring_sync;
1935
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
2100
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
1936
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
2101
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
1937
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
2102
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
1938
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
2103
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
Line 1957... Line 2122...
1957
	ring->write_tail = ring_write_tail;
2122
	ring->write_tail = ring_write_tail;
1958
	ring->flush = gen6_ring_flush;
2123
	ring->flush = gen6_ring_flush;
1959
	ring->add_request = gen6_add_request;
2124
	ring->add_request = gen6_add_request;
1960
	ring->get_seqno = gen6_ring_get_seqno;
2125
	ring->get_seqno = gen6_ring_get_seqno;
1961
	ring->set_seqno = ring_set_seqno;
2126
	ring->set_seqno = ring_set_seqno;
-
 
2127
 
-
 
2128
	if (INTEL_INFO(dev)->gen >= 8) {
-
 
2129
		ring->irq_enable_mask =
-
 
2130
			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-
 
2131
		ring->irq_get = gen8_ring_get_irq;
-
 
2132
		ring->irq_put = gen8_ring_put_irq;
-
 
2133
		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-
 
2134
	} else {
1962
	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2135
	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1963
	ring->irq_get = hsw_vebox_get_irq;
2136
	ring->irq_get = hsw_vebox_get_irq;
1964
	ring->irq_put = hsw_vebox_put_irq;
2137
	ring->irq_put = hsw_vebox_put_irq;
1965
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2138
	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-
 
2139
	}
1966
	ring->sync_to = gen6_ring_sync;
2140
	ring->sync_to = gen6_ring_sync;
1967
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
2141
	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
1968
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
2142
	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
1969
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
2143
	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
1970
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
2144
	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;