Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6660 Rev 6937
Line 25... Line 25...
25
 *    Zou Nan hai 
25
 *    Zou Nan hai 
26
 *    Xiang Hai hao
26
 *    Xiang Hai hao
27
 *
27
 *
28
 */
28
 */
Line -... Line 29...
-
 
29
 
29
 
30
#include 
30
#include 
31
#include 
31
#include "i915_drv.h"
32
#include "i915_drv.h"
32
#include 
33
#include 
33
#include "i915_trace.h"
34
#include "i915_trace.h"
Line 34... Line -...
34
#include "intel_drv.h"
-
 
35
 
-
 
36
bool
-
 
37
intel_ring_initialized(struct intel_engine_cs *ring)
-
 
38
{
-
 
39
	struct drm_device *dev = ring->dev;
-
 
40
 
-
 
41
	if (!dev)
-
 
42
		return false;
-
 
43
 
-
 
44
	if (i915.enable_execlists) {
-
 
45
		struct intel_context *dctx = ring->default_context;
-
 
46
		struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
-
 
47
 
-
 
48
		return ringbuf->obj;
-
 
49
	} else
-
 
50
		return ring->buffer && ring->buffer->obj;
-
 
51
}
35
#include "intel_drv.h"
52
 
36
 
53
int __intel_ring_space(int head, int tail, int size)
37
int __intel_ring_space(int head, int tail, int size)
54
{
38
{
55
	int space = head - tail;
39
	int space = head - tail;
Line 481... Line 465...
481
 
465
 
482
static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
466
static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
483
{
467
{
484
	struct drm_device *dev = ring->dev;
468
	struct drm_device *dev = ring->dev;
485
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
469
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
Line 486... Line 470...
486
	u32 mmio = 0;
470
	i915_reg_t mmio;
487
 
471
 
488
	/* The ring status page addresses are no longer next to the rest of
472
	/* The ring status page addresses are no longer next to the rest of
489
	 * the ring registers as of gen7.
473
	 * the ring registers as of gen7.
Line 524... Line 508...
524
	 * FIXME: These two bits have disappeared on gen8, so a question
508
	 * FIXME: These two bits have disappeared on gen8, so a question
525
	 * arises: do we still need this and if so how should we go about
509
	 * arises: do we still need this and if so how should we go about
526
	 * invalidating the TLB?
510
	 * invalidating the TLB?
527
	 */
511
	 */
528
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
512
	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
529
		u32 reg = RING_INSTPM(ring->mmio_base);
513
		i915_reg_t reg = RING_INSTPM(ring->mmio_base);
Line 530... Line 514...
530
 
514
 
531
		/* ring should be idle before issuing a sync flush*/
515
		/* ring should be idle before issuing a sync flush*/
Line 532... Line 516...
532
		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
516
		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
Line 733... Line 717...
733
	if (ret)
717
	if (ret)
734
		return ret;
718
		return ret;
Line 735... Line 719...
735
 
719
 
736
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
720
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
737
	for (i = 0; i < w->count; i++) {
721
	for (i = 0; i < w->count; i++) {
738
		intel_ring_emit(ring, w->reg[i].addr);
722
		intel_ring_emit_reg(ring, w->reg[i].addr);
739
		intel_ring_emit(ring, w->reg[i].value);
723
		intel_ring_emit(ring, w->reg[i].value);
740
	}
724
	}
Line 741... Line 725...
741
	intel_ring_emit(ring, MI_NOOP);
725
	intel_ring_emit(ring, MI_NOOP);
Line 766... Line 750...
766
 
750
 
767
	return ret;
751
	return ret;
Line 768... Line 752...
768
}
752
}
-
 
753
 
769
 
754
static int wa_add(struct drm_i915_private *dev_priv,
770
static int wa_add(struct drm_i915_private *dev_priv,
755
		  i915_reg_t addr,
771
		  const u32 addr, const u32 mask, const u32 val)
756
		  const u32 mask, const u32 val)
Line 772... Line 757...
772
{
757
{
773
	const u32 idx = dev_priv->workarounds.count;
758
	const u32 idx = dev_priv->workarounds.count;
Line 924... Line 909...
924
 
909
 
925
	/* Syncing dependencies between camera and graphics:skl,bxt */
910
	/* Syncing dependencies between camera and graphics:skl,bxt */
926
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
911
	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
Line 927... Line -...
927
			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
 
928
 
-
 
929
	if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
-
 
930
	    INTEL_REVID(dev) == SKL_REVID_B0)) ||
912
			  GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
 
913
 
-
 
914
		/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
931
	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
915
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
932
		/* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
916
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
933
		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-
 
Line 934... Line -...
934
				  GEN9_DG_MIRROR_FIX_ENABLE);
-
 
935
	}
-
 
936
 
917
		WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
-
 
918
				  GEN9_DG_MIRROR_FIX_ENABLE);
-
 
919
 
937
	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
920
		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
938
	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
921
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
939
		/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
922
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
940
		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
923
		WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
941
				  GEN9_RHWO_OPTIMIZATION_DISABLE);
924
				  GEN9_RHWO_OPTIMIZATION_DISABLE);
942
		/*
925
		/*
943
		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
926
		 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
944
		 * but we do that in per ctx batchbuffer as there is an issue
927
		 * but we do that in per ctx batchbuffer as there is an issue
Line 945... Line -...
945
		 * with this register not getting restored on ctx restore
-
 
946
		 */
-
 
947
	}
928
		 * with this register not getting restored on ctx restore
-
 
929
		 */
948
 
930
	}
949
	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
931
 
950
	    IS_BROXTON(dev)) {
-
 
Line 951... Line 932...
951
		/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
932
		/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
952
		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
933
	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
953
				  GEN9_ENABLE_YV12_BUGFIX);
934
		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
954
	}
935
				  GEN9_ENABLE_YV12_BUGFIX);
Line 961... Line 942...
961
	/* WaCcsTlbPrefetchDisable:skl,bxt */
942
	/* WaCcsTlbPrefetchDisable:skl,bxt */
962
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
943
	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
963
			  GEN9_CCS_TLB_PREFETCH_ENABLE);
944
			  GEN9_CCS_TLB_PREFETCH_ENABLE);
Line 964... Line 945...
964
 
945
 
965
	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
946
	/* WaDisableMaskBasedCammingInRCC:skl,bxt */
966
	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
947
	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
967
	    (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
948
	    IS_BXT_REVID(dev, 0, BXT_REVID_A1))
968
		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
949
		WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
Line 969... Line 950...
969
				  PIXEL_MASK_CAMMING_DISABLE);
950
				  PIXEL_MASK_CAMMING_DISABLE);
970
 
951
 
971
	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
952
	/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
972
	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
953
	tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
973
	if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
954
	if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
974
	    (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
955
	    IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
Line 975... Line 956...
975
		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
956
		tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
976
	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
-
 
977
 
957
	WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
978
	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
958
 
979
	if (IS_SKYLAKE(dev) ||
959
	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
980
	    (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
-
 
Line 981... Line 960...
981
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
960
	if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
982
				  GEN8_SAMPLER_POWER_BYPASS_DIS);
961
		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
Line 983... Line 962...
983
	}
962
				  GEN8_SAMPLER_POWER_BYPASS_DIS);
Line 1000... Line 979...
1000
 
979
 
1001
		/*
980
		/*
1002
		 * Only consider slices where one, and only one, subslice has 7
981
		 * Only consider slices where one, and only one, subslice has 7
1003
		 * EUs
982
		 * EUs
1004
		 */
983
		 */
1005
		if (hweight8(dev_priv->info.subslice_7eu[i]) != 1)
984
		if (!is_power_of_2(dev_priv->info.subslice_7eu[i]))
Line 1006... Line 985...
1006
			continue;
985
			continue;
1007
 
986
 
1008
		/*
987
		/*
Line 1038... Line 1017...
1038
 
1017
 
1039
	ret = gen9_init_workarounds(ring);
1018
	ret = gen9_init_workarounds(ring);
1040
	if (ret)
1019
	if (ret)
Line 1041... Line 1020...
1041
		return ret;
1020
		return ret;
1042
 
-
 
1043
	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
-
 
1044
		/* WaDisableHDCInvalidation:skl */
-
 
1045
		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-
 
1046
			   BDW_DISABLE_HDC_INVALIDATION);
1021
 
1047
 
1022
	if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
1048
		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1023
		/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1049
		I915_WRITE(FF_SLICE_CS_CHICKEN2,
1024
		I915_WRITE(FF_SLICE_CS_CHICKEN2,
Line 1050... Line 1025...
1050
			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1025
			   _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1051
	}
1026
	}
1052
 
1027
 
1053
	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1028
	/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1054
	 * involving this register should also be added to WA batch as required.
1029
	 * involving this register should also be added to WA batch as required.
1055
	 */
1030
	 */
1056
	if (INTEL_REVID(dev) <= SKL_REVID_E0)
1031
	if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
Line 1057... Line 1032...
1057
		/* WaDisableLSQCROPERFforOCL:skl */
1032
		/* WaDisableLSQCROPERFforOCL:skl */
1058
		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1033
		I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1059
			   GEN8_LQSC_RO_PERF_DIS);
1034
			   GEN8_LQSC_RO_PERF_DIS);
1060
 
1035
 
1061
	/* WaEnableGapsTsvCreditFix:skl */
1036
	/* WaEnableGapsTsvCreditFix:skl */
Line 1062... Line 1037...
1062
	if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
1037
	if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
1063
		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1038
		I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1064
					   GEN9_GAPS_TSV_CREDIT_DISABLE));
1039
					   GEN9_GAPS_TSV_CREDIT_DISABLE));
1065
	}
1040
	}
Line -... Line 1041...
-
 
1041
 
1066
 
1042
	/* WaDisablePowerCompilerClockGating:skl */
1067
	/* WaDisablePowerCompilerClockGating:skl */
1043
	if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
1068
	if (INTEL_REVID(dev) == SKL_REVID_B0)
1044
		WA_SET_BIT_MASKED(HIZ_CHICKEN,
1069
		WA_SET_BIT_MASKED(HIZ_CHICKEN,
1045
				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1070
				  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1046
 
1071
 
1047
	/* This is tied to WaForceContextSaveRestoreNonCoherent */
1072
	if (INTEL_REVID(dev) <= SKL_REVID_D0) {
1048
	if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1073
		/*
1049
		/*
1074
		 *Use Force Non-Coherent whenever executing a 3D context. This
1050
		 *Use Force Non-Coherent whenever executing a 3D context. This
-
 
1051
		 * is a workaround for a possible hang in the unlikely event
-
 
1052
		 * a TLB invalidation occurs during a PSD flush.
-
 
1053
		 */
-
 
1054
		/* WaForceEnableNonCoherent:skl */
1075
		 * is a workaround for a possible hang in the unlikely event
1055
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
Line 1076... Line -...
1076
		 * a TLB invalidation occurs during a PSD flush.
-
 
1077
		 */
-
 
1078
		/* WaForceEnableNonCoherent:skl */
1056
				  HDC_FORCE_NON_COHERENT);
-
 
1057
 
1079
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
1058
		/* WaDisableHDCInvalidation:skl */
1080
				  HDC_FORCE_NON_COHERENT);
1059
		I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
1081
	}
1060
			   BDW_DISABLE_HDC_INVALIDATION);
Line 1082... Line 1061...
1082
 
1061
	}
1083
	if (INTEL_REVID(dev) == SKL_REVID_C0 ||
1062
 
1084
	    INTEL_REVID(dev) == SKL_REVID_D0)
1063
		/* WaBarrierPerformanceFixDisable:skl */
1085
		/* WaBarrierPerformanceFixDisable:skl */
1064
	if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
1086
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
1065
		WA_SET_BIT_MASKED(HDC_CHICKEN0,
1087
				  HDC_FENCE_DEST_SLM_DISABLE |
-
 
Line 1088... Line 1066...
1088
				  HDC_BARRIER_PERFORMANCE_DISABLE);
1066
				  HDC_FENCE_DEST_SLM_DISABLE |
1089
 
1067
				  HDC_BARRIER_PERFORMANCE_DISABLE);
Line 1090... Line 1068...
1090
	/* WaDisableSbeCacheDispatchPortSharing:skl */
1068
 
Line 1107... Line 1085...
1107
	if (ret)
1085
	if (ret)
1108
		return ret;
1086
		return ret;
Line 1109... Line 1087...
1109
 
1087
 
1110
	/* WaStoreMultiplePTEenable:bxt */
1088
	/* WaStoreMultiplePTEenable:bxt */
1111
	/* This is a requirement according to Hardware specification */
1089
	/* This is a requirement according to Hardware specification */
1112
	if (INTEL_REVID(dev) == BXT_REVID_A0)
1090
	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
Line 1113... Line 1091...
1113
		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1091
		I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1114
 
1092
 
1115
	/* WaSetClckGatingDisableMedia:bxt */
1093
	/* WaSetClckGatingDisableMedia:bxt */
1116
	if (INTEL_REVID(dev) == BXT_REVID_A0) {
1094
	if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
1117
		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1095
		I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
Line 1118... Line 1096...
1118
					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1096
					    ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1119
	}
1097
	}
1120
 
1098
 
Line 1121... Line 1099...
1121
	/* WaDisableThreadStallDopClockGating:bxt */
1099
	/* WaDisableThreadStallDopClockGating:bxt */
1122
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1100
	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1123
			  STALL_DOP_GATING_DISABLE);
1101
			  STALL_DOP_GATING_DISABLE);
1124
 
1102
 
1125
	/* WaDisableSbeCacheDispatchPortSharing:bxt */
1103
	/* WaDisableSbeCacheDispatchPortSharing:bxt */
1126
	if (INTEL_REVID(dev) <= BXT_REVID_B0) {
1104
	if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
Line 1319... Line 1297...
1319
	ret = intel_ring_begin(signaller_req, num_dwords);
1297
	ret = intel_ring_begin(signaller_req, num_dwords);
1320
	if (ret)
1298
	if (ret)
1321
		return ret;
1299
		return ret;
Line 1322... Line 1300...
1322
 
1300
 
1323
	for_each_ring(useless, dev_priv, i) {
1301
	for_each_ring(useless, dev_priv, i) {
-
 
1302
		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
1324
		u32 mbox_reg = signaller->semaphore.mbox.signal[i];
1303
 
1325
		if (mbox_reg != GEN6_NOSYNC) {
1304
		if (i915_mmio_reg_valid(mbox_reg)) {
-
 
1305
			u32 seqno = i915_gem_request_get_seqno(signaller_req);
1326
			u32 seqno = i915_gem_request_get_seqno(signaller_req);
1306
 
1327
			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1307
			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1328
			intel_ring_emit(signaller, mbox_reg);
1308
			intel_ring_emit_reg(signaller, mbox_reg);
1329
			intel_ring_emit(signaller, seqno);
1309
			intel_ring_emit(signaller, seqno);
1330
		}
1310
		}
Line 1331... Line 1311...
1331
	}
1311
	}
Line 2025... Line 2005...
2025
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2005
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2026
				     struct intel_ringbuffer *ringbuf)
2006
				     struct intel_ringbuffer *ringbuf)
2027
{
2007
{
2028
	struct drm_i915_private *dev_priv = to_i915(dev);
2008
	struct drm_i915_private *dev_priv = to_i915(dev);
2029
	struct drm_i915_gem_object *obj = ringbuf->obj;
2009
	struct drm_i915_gem_object *obj = ringbuf->obj;
-
 
2010
	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-
 
2011
	unsigned flags = PIN_OFFSET_BIAS | 4096;
2030
	int ret;
2012
	int ret;
Line 2031... Line 2013...
2031
 
2013
 
2032
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2014
	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
2033
	if (ret)
2015
	if (ret)
Line 2081... Line 2063...
2081
{
2063
{
2082
	struct intel_ringbuffer *ring;
2064
	struct intel_ringbuffer *ring;
2083
	int ret;
2065
	int ret;
Line 2084... Line 2066...
2084
 
2066
 
2085
	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2067
	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-
 
2068
	if (ring == NULL) {
-
 
2069
		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
2086
	if (ring == NULL)
2070
				 engine->name);
-
 
2071
		return ERR_PTR(-ENOMEM);
Line 2087... Line 2072...
2087
		return ERR_PTR(-ENOMEM);
2072
	}
-
 
2073
 
Line 2088... Line 2074...
2088
 
2074
	ring->ring = engine;
2089
	ring->ring = engine;
2075
	list_add(&ring->link, &engine->buffers);
2090
 
2076
 
2091
	ring->size = size;
2077
	ring->size = size;
Line 2100... Line 2086...
2100
	ring->last_retired_head = -1;
2086
	ring->last_retired_head = -1;
2101
	intel_ring_update_space(ring);
2087
	intel_ring_update_space(ring);
Line 2102... Line 2088...
2102
 
2088
 
2103
	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
2089
	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
2104
	if (ret) {
2090
	if (ret) {
2105
		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
2091
		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
-
 
2092
			  engine->name, ret);
2106
			  engine->name, ret);
2093
		list_del(&ring->link);
2107
		kfree(ring);
2094
		kfree(ring);
2108
		return ERR_PTR(ret);
2095
		return ERR_PTR(ret);
Line 2109... Line 2096...
2109
	}
2096
	}
Line 2113... Line 2100...
2113
 
2100
 
2114
void
2101
void
2115
intel_ringbuffer_free(struct intel_ringbuffer *ring)
2102
intel_ringbuffer_free(struct intel_ringbuffer *ring)
2116
{
2103
{
-
 
2104
	intel_destroy_ringbuffer_obj(ring);
2117
	intel_destroy_ringbuffer_obj(ring);
2105
	list_del(&ring->link);
2118
	kfree(ring);
2106
	kfree(ring);
Line 2119... Line 2107...
2119
}
2107
}
2120
 
2108
 
Line 2128... Line 2116...
2128
 
2116
 
2129
	ring->dev = dev;
2117
	ring->dev = dev;
2130
	INIT_LIST_HEAD(&ring->active_list);
2118
	INIT_LIST_HEAD(&ring->active_list);
2131
	INIT_LIST_HEAD(&ring->request_list);
2119
	INIT_LIST_HEAD(&ring->request_list);
-
 
2120
	INIT_LIST_HEAD(&ring->execlist_queue);
2132
	INIT_LIST_HEAD(&ring->execlist_queue);
2121
	INIT_LIST_HEAD(&ring->buffers);
2133
	i915_gem_batch_pool_init(dev, &ring->batch_pool);
2122
	i915_gem_batch_pool_init(dev, &ring->batch_pool);
Line 2134... Line 2123...
2134
	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
2123
	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
Line 2135... Line 2124...
2135
 
2124
 
2136
	init_waitqueue_head(&ring->irq_queue);
2125
	init_waitqueue_head(&ring->irq_queue);
2137
 
2126
 
-
 
2127
	ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
-
 
2128
	if (IS_ERR(ringbuf)) {
2138
	ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
2129
		ret = PTR_ERR(ringbuf);
Line 2139... Line 2130...
2139
	if (IS_ERR(ringbuf))
2130
		goto error;
2140
		return PTR_ERR(ringbuf);
2131
	}
2141
	ring->buffer = ringbuf;
2132
	ring->buffer = ringbuf;
Line 2164... Line 2155...
2164
		goto error;
2155
		goto error;
Line 2165... Line 2156...
2165
 
2156
 
Line 2166... Line 2157...
2166
	return 0;
2157
	return 0;
2167
 
2158
 
2168
error:
-
 
2169
	intel_ringbuffer_free(ringbuf);
2159
error:
2170
	ring->buffer = NULL;
2160
	intel_cleanup_ring_buffer(ring);
Line 2171... Line 2161...
2171
	return ret;
2161
	return ret;
2172
}
2162
}
Line 2178... Line 2168...
2178
	if (!intel_ring_initialized(ring))
2168
	if (!intel_ring_initialized(ring))
2179
		return;
2169
		return;
Line 2180... Line 2170...
2180
 
2170
 
Line -... Line 2171...
-
 
2171
	dev_priv = to_i915(ring->dev);
2181
	dev_priv = to_i915(ring->dev);
2172
 
2182
 
2173
	if (ring->buffer) {
Line 2183... Line 2174...
2183
	intel_stop_ring_buffer(ring);
2174
	intel_stop_ring_buffer(ring);
2184
	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
2175
	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
2185
 
2176
 
-
 
2177
	intel_unpin_ringbuffer_obj(ring->buffer);
Line 2186... Line 2178...
2186
	intel_unpin_ringbuffer_obj(ring->buffer);
2178
	intel_ringbuffer_free(ring->buffer);
2187
	intel_ringbuffer_free(ring->buffer);
2179
	ring->buffer = NULL;
Line 2188... Line 2180...
2188
	ring->buffer = NULL;
2180
	}
Line 2197... Line 2189...
2197
		cleanup_phys_status_page(ring);
2189
		cleanup_phys_status_page(ring);
2198
	}
2190
	}
Line 2199... Line 2191...
2199
 
2191
 
2200
	i915_cmd_parser_fini_ring(ring);
2192
	i915_cmd_parser_fini_ring(ring);
-
 
2193
	i915_gem_batch_pool_fini(&ring->batch_pool);
2201
	i915_gem_batch_pool_fini(&ring->batch_pool);
2194
	ring->dev = NULL;
Line 2202... Line 2195...
2202
}
2195
}
2203
 
2196
 
2204
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
2197
static int ring_wait_for_space(struct intel_engine_cs *ring, int n)