Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 5354
Line 34... Line 34...
34
 
34
 
Line 35... Line 35...
35
#include 
35
#include 
Line 36... Line -...
36
 
-
 
37
#define FORCEWAKE_ACK_TIMEOUT_MS 2
-
 
38
 
36
 
Line 39... Line -...
39
#define assert_spin_locked(x)
-
 
40
 
-
 
41
void getrawmonotonic(struct timespec *ts);
-
 
42
 
-
 
43
static inline void outb(u8 v, u16 port)
-
 
44
{
-
 
45
    asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
-
 
46
}
-
 
47
static inline u8 inb(u16 port)
-
 
48
{
-
 
49
    u8 v;
-
 
50
    asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
37
#define FORCEWAKE_ACK_TIMEOUT_MS 2
51
    return v;
38
 
52
}
39
void getrawmonotonic(struct timespec *ts);
Line 53... Line 40...
53
 
40
 
Line 93... Line 80...
93
 *
80
 *
94
 * FBC-related functionality can be enabled by the means of the
81
 * FBC-related functionality can be enabled by the means of the
95
 * i915.i915_enable_fbc parameter
82
 * i915.i915_enable_fbc parameter
96
 */
83
 */
Line -... Line 84...
-
 
84
 
-
 
85
static void gen9_init_clock_gating(struct drm_device *dev)
-
 
86
{
-
 
87
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
88
 
-
 
89
	/*
-
 
90
	 * WaDisableSDEUnitClockGating:skl
-
 
91
	 * This seems to be a pre-production w/a.
-
 
92
	 */
-
 
93
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
-
 
94
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
 
95
 
-
 
96
	/*
-
 
97
	 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
-
 
98
	 * This is a pre-production w/a.
-
 
99
	 */
-
 
100
	I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
-
 
101
		   I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
-
 
102
		   ~GEN9_DG_MIRROR_FIX_ENABLE);
-
 
103
 
-
 
104
	/* Wa4x4STCOptimizationDisable:skl */
-
 
105
	I915_WRITE(CACHE_MODE_1,
-
 
106
		   _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
-
 
107
}
97
 
108
 
98
static void i8xx_disable_fbc(struct drm_device *dev)
109
static void i8xx_disable_fbc(struct drm_device *dev)
99
{
110
{
100
	struct drm_i915_private *dev_priv = dev->dev_private;
111
	struct drm_i915_private *dev_priv = dev->dev_private;
Line -... Line 112...
-
 
112
	u32 fbc_ctl;
-
 
113
 
101
	u32 fbc_ctl;
114
	dev_priv->fbc.enabled = false;
102
 
115
 
103
	/* Disable compression */
116
	/* Disable compression */
104
	fbc_ctl = I915_READ(FBC_CONTROL);
117
	fbc_ctl = I915_READ(FBC_CONTROL);
Line 126... Line 139...
126
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
139
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
127
	int cfb_pitch;
140
	int cfb_pitch;
128
	int i;
141
	int i;
129
	u32 fbc_ctl;
142
	u32 fbc_ctl;
Line -... Line 143...
-
 
143
 
-
 
144
	dev_priv->fbc.enabled = true;
130
 
145
 
131
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
146
	cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
132
	if (fb->pitches[0] < cfb_pitch)
147
	if (fb->pitches[0] < cfb_pitch)
Line 133... Line 148...
133
		cfb_pitch = fb->pitches[0];
148
		cfb_pitch = fb->pitches[0];
Line 180... Line 195...
180
	struct drm_framebuffer *fb = crtc->primary->fb;
195
	struct drm_framebuffer *fb = crtc->primary->fb;
181
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
196
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
182
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
197
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183
	u32 dpfc_ctl;
198
	u32 dpfc_ctl;
Line -... Line 199...
-
 
199
 
-
 
200
	dev_priv->fbc.enabled = true;
184
 
201
 
185
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
202
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
186
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
203
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
187
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
204
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
188
	else
205
	else
Line 200... Line 217...
200
static void g4x_disable_fbc(struct drm_device *dev)
217
static void g4x_disable_fbc(struct drm_device *dev)
201
{
218
{
202
	struct drm_i915_private *dev_priv = dev->dev_private;
219
	struct drm_i915_private *dev_priv = dev->dev_private;
203
	u32 dpfc_ctl;
220
	u32 dpfc_ctl;
Line -... Line 221...
-
 
221
 
-
 
222
	dev_priv->fbc.enabled = false;
204
 
223
 
205
	/* Disable compression */
224
	/* Disable compression */
206
	dpfc_ctl = I915_READ(DPFC_CONTROL);
225
	dpfc_ctl = I915_READ(DPFC_CONTROL);
207
	if (dpfc_ctl & DPFC_CTL_EN) {
226
	if (dpfc_ctl & DPFC_CTL_EN) {
208
		dpfc_ctl &= ~DPFC_CTL_EN;
227
		dpfc_ctl &= ~DPFC_CTL_EN;
Line 251... Line 270...
251
	struct drm_framebuffer *fb = crtc->primary->fb;
270
	struct drm_framebuffer *fb = crtc->primary->fb;
252
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
271
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
253
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
272
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
254
	u32 dpfc_ctl;
273
	u32 dpfc_ctl;
Line -... Line 274...
-
 
274
 
-
 
275
	dev_priv->fbc.enabled = true;
255
 
276
 
256
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
277
	dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
257
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
278
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
Line 258... Line 279...
258
		dev_priv->fbc.threshold++;
279
		dev_priv->fbc.threshold++;
Line 291... Line 312...
291
static void ironlake_disable_fbc(struct drm_device *dev)
312
static void ironlake_disable_fbc(struct drm_device *dev)
292
{
313
{
293
	struct drm_i915_private *dev_priv = dev->dev_private;
314
	struct drm_i915_private *dev_priv = dev->dev_private;
294
	u32 dpfc_ctl;
315
	u32 dpfc_ctl;
Line -... Line 316...
-
 
316
 
-
 
317
	dev_priv->fbc.enabled = false;
295
 
318
 
296
	/* Disable compression */
319
	/* Disable compression */
297
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
320
	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
298
	if (dpfc_ctl & DPFC_CTL_EN) {
321
	if (dpfc_ctl & DPFC_CTL_EN) {
299
		dpfc_ctl &= ~DPFC_CTL_EN;
322
		dpfc_ctl &= ~DPFC_CTL_EN;
Line 317... Line 340...
317
	struct drm_framebuffer *fb = crtc->primary->fb;
340
	struct drm_framebuffer *fb = crtc->primary->fb;
318
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
341
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
319
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
342
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
320
	u32 dpfc_ctl;
343
	u32 dpfc_ctl;
Line -... Line 344...
-
 
344
 
-
 
345
	dev_priv->fbc.enabled = true;
321
 
346
 
322
	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
347
	dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
323
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
348
	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
Line 324... Line 349...
324
		dev_priv->fbc.threshold++;
349
		dev_priv->fbc.threshold++;
Line 336... Line 361...
336
		break;
361
		break;
337
	}
362
	}
Line 338... Line 363...
338
 
363
 
Line -... Line 364...
-
 
364
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
-
 
365
 
-
 
366
	if (dev_priv->fbc.false_color)
339
	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
367
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;
Line 340... Line 368...
340
 
368
 
341
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
369
	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
342
 
370
 
Line 363... Line 391...
363
 
391
 
364
bool intel_fbc_enabled(struct drm_device *dev)
392
bool intel_fbc_enabled(struct drm_device *dev)
365
{
393
{
Line 366... Line 394...
366
	struct drm_i915_private *dev_priv = dev->dev_private;
394
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
395
 
-
 
396
	return dev_priv->fbc.enabled;
-
 
397
}
-
 
398
 
-
 
399
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
-
 
400
{
-
 
401
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
402
 
-
 
403
	if (!IS_GEN8(dev))
-
 
404
		return;
367
 
405
 
Line 368... Line 406...
368
	if (!dev_priv->display.fbc_enabled)
406
	if (!intel_fbc_enabled(dev))
369
		return false;
407
		return;
Line 370... Line 408...
370
 
408
 
371
	return dev_priv->display.fbc_enabled(dev);
409
	I915_WRITE(MSG_FBC_REND_STATE, value);
372
}
410
}
Line 605... Line 643...
605
	    obj->fence_reg == I915_FENCE_REG_NONE) {
643
	    obj->fence_reg == I915_FENCE_REG_NONE) {
606
		if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
644
		if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
607
		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
645
		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
608
		goto out_disable;
646
		goto out_disable;
609
	}
647
	}
-
 
648
	if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-
 
649
	    to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
-
 
650
		if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
-
 
651
			DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
-
 
652
		goto out_disable;
-
 
653
	}
Line 610... Line 654...
610
 
654
 
611
	/* If the kernel debugger is active, always disable compression */
655
	/* If the kernel debugger is active, always disable compression */
612
	if (in_dbg_master())
656
	if (in_dbg_master())
Line 880... Line 924...
880
 * FIFO underruns and display "flicker").
924
 * FIFO underruns and display "flicker").
881
 *
925
 *
882
 * A value of 5us seems to be a good balance; safe for very low end
926
 * A value of 5us seems to be a good balance; safe for very low end
883
 * platforms but not overly aggressive on lower latency configs.
927
 * platforms but not overly aggressive on lower latency configs.
884
 */
928
 */
885
static const int latency_ns = 5000;
929
static const int pessimal_latency_ns = 5000;
Line 886... Line 930...
886
 
930
 
887
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
931
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
888
{
932
{
889
	struct drm_i915_private *dev_priv = dev->dev_private;
933
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 1009... Line 1053...
1009
	.max_wm = I915_MAX_WM,
1053
	.max_wm = I915_MAX_WM,
1010
	.default_wm = 1,
1054
	.default_wm = 1,
1011
	.guard_size = 2,
1055
	.guard_size = 2,
1012
	.cacheline_size = I915_FIFO_LINE_SIZE,
1056
	.cacheline_size = I915_FIFO_LINE_SIZE,
1013
};
1057
};
1014
static const struct intel_watermark_params i830_wm_info = {
1058
static const struct intel_watermark_params i830_a_wm_info = {
1015
	.fifo_size = I855GM_FIFO_SIZE,
1059
	.fifo_size = I855GM_FIFO_SIZE,
1016
	.max_wm = I915_MAX_WM,
1060
	.max_wm = I915_MAX_WM,
1017
	.default_wm = 1,
1061
	.default_wm = 1,
1018
	.guard_size = 2,
1062
	.guard_size = 2,
1019
	.cacheline_size = I830_FIFO_LINE_SIZE,
1063
	.cacheline_size = I830_FIFO_LINE_SIZE,
1020
};
1064
};
-
 
1065
static const struct intel_watermark_params i830_bc_wm_info = {
-
 
1066
	.fifo_size = I855GM_FIFO_SIZE,
-
 
1067
	.max_wm = I915_MAX_WM/2,
-
 
1068
	.default_wm = 1,
-
 
1069
	.guard_size = 2,
-
 
1070
	.cacheline_size = I830_FIFO_LINE_SIZE,
-
 
1071
};
1021
static const struct intel_watermark_params i845_wm_info = {
1072
static const struct intel_watermark_params i845_wm_info = {
1022
	.fifo_size = I830_FIFO_SIZE,
1073
	.fifo_size = I830_FIFO_SIZE,
1023
	.max_wm = I915_MAX_WM,
1074
	.max_wm = I915_MAX_WM,
1024
	.default_wm = 1,
1075
	.default_wm = 1,
1025
	.guard_size = 2,
1076
	.guard_size = 2,
Line 1071... Line 1122...
1071
	/* Don't promote wm_size to unsigned... */
1122
	/* Don't promote wm_size to unsigned... */
1072
	if (wm_size > (long)wm->max_wm)
1123
	if (wm_size > (long)wm->max_wm)
1073
		wm_size = wm->max_wm;
1124
		wm_size = wm->max_wm;
1074
	if (wm_size <= 0)
1125
	if (wm_size <= 0)
1075
		wm_size = wm->default_wm;
1126
		wm_size = wm->default_wm;
-
 
1127
 
-
 
1128
	/*
-
 
1129
	 * Bspec seems to indicate that the value shouldn't be lower than
-
 
1130
	 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
-
 
1131
	 * Lets go for 8 which is the burst size since certain platforms
-
 
1132
	 * already use a hardcoded 8 (which is what the spec says should be
-
 
1133
	 * done).
-
 
1134
	 */
-
 
1135
	if (wm_size <= 8)
-
 
1136
		wm_size = 8;
-
 
1137
 
1076
	return wm_size;
1138
	return wm_size;
1077
}
1139
}
Line 1078... Line 1140...
1078
 
1140
 
1079
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1141
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
Line 1295... Line 1357...
1295
	return g4x_check_srwm(dev,
1357
	return g4x_check_srwm(dev,
1296
			      *display_wm, *cursor_wm,
1358
			      *display_wm, *cursor_wm,
1297
			      display, cursor);
1359
			      display, cursor);
1298
}
1360
}
Line 1299... Line 1361...
1299
 
1361
 
1300
static bool vlv_compute_drain_latency(struct drm_device *dev,
1362
static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1301
				     int plane,
1363
				      int pixel_size,
1302
				     int *plane_prec_mult,
1364
				      int *prec_mult,
1303
				     int *plane_dl,
-
 
1304
				     int *cursor_prec_mult,
-
 
1305
				     int *cursor_dl)
1365
				      int *drain_latency)
1306
{
1366
{
1307
	struct drm_crtc *crtc;
-
 
1308
	int clock, pixel_size;
1367
	struct drm_device *dev = crtc->dev;
-
 
1368
	int entries;
Line 1309... Line -...
1309
	int entries;
-
 
1310
 
1369
	int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1311
	crtc = intel_get_crtc_for_plane(dev, plane);
1370
 
Line 1312... Line 1371...
1312
	if (!intel_crtc_active(crtc))
1371
	if (WARN(clock == 0, "Pixel clock is zero!\n"))
1313
		return false;
1372
		return false;
Line 1314... Line 1373...
1314
 
1373
 
-
 
1374
	if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1315
	clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1375
		return false;
-
 
1376
 
-
 
1377
	entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1316
	pixel_size = crtc->primary->fb->bits_per_pixel / 8;	/* BPP */
1378
	if (IS_CHERRYVIEW(dev))
-
 
1379
		*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
1317
 
1380
					       DRAIN_LATENCY_PRECISION_16;
1318
	entries = (clock / 1000) * pixel_size;
1381
	else
1319
	*plane_prec_mult = (entries > 128) ?
-
 
1320
		DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1382
		*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1321
	*plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
1383
					       DRAIN_LATENCY_PRECISION_32;
1322
 
-
 
Line 1323... Line 1384...
1323
	entries = (clock / 1000) * 4;	/* BPP is always 4 for cursor */
1384
	*drain_latency = (64 * (*prec_mult) * 4) / entries;
1324
	*cursor_prec_mult = (entries > 128) ?
1385
 
Line 1325... Line 1386...
1325
		DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1386
	if (*drain_latency > DRAIN_LATENCY_MASK)
Line 1334... Line 1395...
1334
 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1395
 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1335
 * to be programmed. Each plane has a drain latency multiplier and a drain
1396
 * to be programmed. Each plane has a drain latency multiplier and a drain
1336
 * latency value.
1397
 * latency value.
1337
 */
1398
 */
Line 1338... Line 1399...
1338
 
1399
 
1339
static void vlv_update_drain_latency(struct drm_device *dev)
1400
static void vlv_update_drain_latency(struct drm_crtc *crtc)
-
 
1401
{
1340
{
1402
	struct drm_device *dev = crtc->dev;
1341
	struct drm_i915_private *dev_priv = dev->dev_private;
1403
	struct drm_i915_private *dev_priv = dev->dev_private;
1342
	int planea_prec, planea_dl, planeb_prec, planeb_dl;
-
 
1343
	int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
-
 
1344
	int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1404
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1345
							either 16 or 32 */
-
 
1346
 
1405
	int pixel_size;
1347
	/* For plane A, Cursor A */
1406
	int drain_latency;
1348
	if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1407
	enum pipe pipe = intel_crtc->pipe;
1349
				      &cursor_prec_mult, &cursora_dl)) {
-
 
1350
		cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
-
 
1351
			DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1408
	int plane_prec, prec_mult, plane_dl;
1352
		planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1409
	const int high_precision = IS_CHERRYVIEW(dev) ?
1353
			DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
-
 
1354
 
-
 
1355
		I915_WRITE(VLV_DDL1, cursora_prec |
-
 
1356
				(cursora_dl << DDL_CURSORA_SHIFT) |
-
 
1357
				planea_prec | planea_dl);
-
 
1358
	}
1410
		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1359
 
-
 
1360
	/* For plane B, Cursor B */
-
 
1361
	if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
-
 
1362
				      &cursor_prec_mult, &cursorb_dl)) {
1411
 
1363
		cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1412
	plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
1364
			DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
-
 
1365
		planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1413
		   DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
1366
			DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1414
		   (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1367
 
1415
 
1368
		I915_WRITE(VLV_DDL2, cursorb_prec |
1416
	if (!intel_crtc_active(crtc)) {
1369
				(cursorb_dl << DDL_CURSORB_SHIFT) |
1417
		I915_WRITE(VLV_DDL(pipe), plane_dl);
1370
				planeb_prec | planeb_dl);
1418
		return;
-
 
1419
	}
-
 
1420
 
-
 
1421
	/* Primary plane Drain Latency */
-
 
1422
	pixel_size = crtc->primary->fb->bits_per_pixel / 8;	/* BPP */
-
 
1423
	if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
-
 
1424
		plane_prec = (prec_mult == high_precision) ?
-
 
1425
					   DDL_PLANE_PRECISION_HIGH :
-
 
1426
					   DDL_PLANE_PRECISION_LOW;
-
 
1427
		plane_dl |= plane_prec | drain_latency;
-
 
1428
	}
-
 
1429
 
-
 
1430
	/* Cursor Drain Latency
-
 
1431
	 * BPP is always 4 for cursor
-
 
1432
	 */
-
 
1433
	pixel_size = 4;
-
 
1434
 
-
 
1435
	/* Program cursor DL only if it is enabled */
-
 
1436
	if (intel_crtc->cursor_base &&
-
 
1437
	    vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
-
 
1438
		plane_prec = (prec_mult == high_precision) ?
-
 
1439
					   DDL_CURSOR_PRECISION_HIGH :
-
 
1440
					   DDL_CURSOR_PRECISION_LOW;
-
 
1441
		plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
-
 
1442
	}
-
 
1443
 
1371
	}
1444
	I915_WRITE(VLV_DDL(pipe), plane_dl);
Line 1372... Line 1445...
1372
}
1445
}
Line 1373... Line 1446...
1373
 
1446
 
Line 1382... Line 1455...
1382
	int plane_sr, cursor_sr;
1455
	int plane_sr, cursor_sr;
1383
	int ignore_plane_sr, ignore_cursor_sr;
1456
	int ignore_plane_sr, ignore_cursor_sr;
1384
	unsigned int enabled = 0;
1457
	unsigned int enabled = 0;
1385
	bool cxsr_enabled;
1458
	bool cxsr_enabled;
Line 1386... Line 1459...
1386
 
1459
 
Line 1387... Line 1460...
1387
	vlv_update_drain_latency(dev);
1460
	vlv_update_drain_latency(crtc);
1388
 
1461
 
1389
	if (g4x_compute_wm0(dev, PIPE_A,
1462
	if (g4x_compute_wm0(dev, PIPE_A,
1390
			    &valleyview_wm_info, latency_ns,
1463
			    &valleyview_wm_info, pessimal_latency_ns,
1391
			    &valleyview_cursor_wm_info, latency_ns,
1464
			    &valleyview_cursor_wm_info, pessimal_latency_ns,
Line 1392... Line 1465...
1392
			    &planea_wm, &cursora_wm))
1465
			    &planea_wm, &cursora_wm))
1393
		enabled |= 1 << PIPE_A;
1466
		enabled |= 1 << PIPE_A;
1394
 
1467
 
1395
	if (g4x_compute_wm0(dev, PIPE_B,
1468
	if (g4x_compute_wm0(dev, PIPE_B,
1396
			    &valleyview_wm_info, latency_ns,
1469
			    &valleyview_wm_info, pessimal_latency_ns,
Line 1397... Line 1470...
1397
			    &valleyview_cursor_wm_info, latency_ns,
1470
			    &valleyview_cursor_wm_info, pessimal_latency_ns,
1398
			    &planeb_wm, &cursorb_wm))
1471
			    &planeb_wm, &cursorb_wm))
Line 1414... Line 1487...
1414
		cxsr_enabled = false;
1487
		cxsr_enabled = false;
1415
		intel_set_memory_cxsr(dev_priv, false);
1488
		intel_set_memory_cxsr(dev_priv, false);
1416
		plane_sr = cursor_sr = 0;
1489
		plane_sr = cursor_sr = 0;
1417
	}
1490
	}
Line 1418... Line 1491...
1418
 
1491
 
-
 
1492
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1419
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1493
		      "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1420
		      planea_wm, cursora_wm,
1494
		      planea_wm, cursora_wm,
1421
		      planeb_wm, cursorb_wm,
1495
		      planeb_wm, cursorb_wm,
Line 1422... Line 1496...
1422
		      plane_sr, cursor_sr);
1496
		      plane_sr, cursor_sr);
1423
 
1497
 
1424
	I915_WRITE(DSPFW1,
1498
	I915_WRITE(DSPFW1,
1425
		   (plane_sr << DSPFW_SR_SHIFT) |
1499
		   (plane_sr << DSPFW_SR_SHIFT) |
1426
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1500
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1427
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1501
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1428
		   planea_wm);
1502
		   (planea_wm << DSPFW_PLANEA_SHIFT));
1429
	I915_WRITE(DSPFW2,
1503
	I915_WRITE(DSPFW2,
1430
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1504
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1431
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1505
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
Line 1435... Line 1509...
1435
 
1509
 
1436
	if (cxsr_enabled)
1510
	if (cxsr_enabled)
1437
		intel_set_memory_cxsr(dev_priv, true);
1511
		intel_set_memory_cxsr(dev_priv, true);
Line -... Line 1512...
-
 
1512
}
-
 
1513
 
-
 
1514
static void cherryview_update_wm(struct drm_crtc *crtc)
-
 
1515
{
-
 
1516
	struct drm_device *dev = crtc->dev;
-
 
1517
	static const int sr_latency_ns = 12000;
-
 
1518
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1519
	int planea_wm, planeb_wm, planec_wm;
-
 
1520
	int cursora_wm, cursorb_wm, cursorc_wm;
-
 
1521
	int plane_sr, cursor_sr;
-
 
1522
	int ignore_plane_sr, ignore_cursor_sr;
-
 
1523
	unsigned int enabled = 0;
-
 
1524
	bool cxsr_enabled;
-
 
1525
 
-
 
1526
	vlv_update_drain_latency(crtc);
-
 
1527
 
-
 
1528
	if (g4x_compute_wm0(dev, PIPE_A,
-
 
1529
			    &valleyview_wm_info, pessimal_latency_ns,
-
 
1530
			    &valleyview_cursor_wm_info, pessimal_latency_ns,
-
 
1531
			    &planea_wm, &cursora_wm))
-
 
1532
		enabled |= 1 << PIPE_A;
-
 
1533
 
-
 
1534
	if (g4x_compute_wm0(dev, PIPE_B,
-
 
1535
			    &valleyview_wm_info, pessimal_latency_ns,
-
 
1536
			    &valleyview_cursor_wm_info, pessimal_latency_ns,
-
 
1537
			    &planeb_wm, &cursorb_wm))
-
 
1538
		enabled |= 1 << PIPE_B;
-
 
1539
 
-
 
1540
	if (g4x_compute_wm0(dev, PIPE_C,
-
 
1541
			    &valleyview_wm_info, pessimal_latency_ns,
-
 
1542
			    &valleyview_cursor_wm_info, pessimal_latency_ns,
-
 
1543
			    &planec_wm, &cursorc_wm))
-
 
1544
		enabled |= 1 << PIPE_C;
-
 
1545
 
-
 
1546
	if (single_plane_enabled(enabled) &&
-
 
1547
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
-
 
1548
			     sr_latency_ns,
-
 
1549
			     &valleyview_wm_info,
-
 
1550
			     &valleyview_cursor_wm_info,
-
 
1551
			     &plane_sr, &ignore_cursor_sr) &&
-
 
1552
	    g4x_compute_srwm(dev, ffs(enabled) - 1,
-
 
1553
			     2*sr_latency_ns,
-
 
1554
			     &valleyview_wm_info,
-
 
1555
			     &valleyview_cursor_wm_info,
-
 
1556
			     &ignore_plane_sr, &cursor_sr)) {
-
 
1557
		cxsr_enabled = true;
-
 
1558
	} else {
-
 
1559
		cxsr_enabled = false;
-
 
1560
		intel_set_memory_cxsr(dev_priv, false);
-
 
1561
		plane_sr = cursor_sr = 0;
-
 
1562
	}
-
 
1563
 
-
 
1564
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
-
 
1565
		      "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
-
 
1566
		      "SR: plane=%d, cursor=%d\n",
-
 
1567
		      planea_wm, cursora_wm,
-
 
1568
		      planeb_wm, cursorb_wm,
-
 
1569
		      planec_wm, cursorc_wm,
-
 
1570
		      plane_sr, cursor_sr);
-
 
1571
 
-
 
1572
	I915_WRITE(DSPFW1,
-
 
1573
		   (plane_sr << DSPFW_SR_SHIFT) |
-
 
1574
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
-
 
1575
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
-
 
1576
		   (planea_wm << DSPFW_PLANEA_SHIFT));
-
 
1577
	I915_WRITE(DSPFW2,
-
 
1578
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
-
 
1579
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
-
 
1580
	I915_WRITE(DSPFW3,
-
 
1581
		   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
-
 
1582
		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
-
 
1583
	I915_WRITE(DSPFW9_CHV,
-
 
1584
		   (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
-
 
1585
					      DSPFW_CURSORC_MASK)) |
-
 
1586
		   (planec_wm << DSPFW_PLANEC_SHIFT) |
-
 
1587
		   (cursorc_wm << DSPFW_CURSORC_SHIFT));
-
 
1588
 
-
 
1589
	if (cxsr_enabled)
-
 
1590
		intel_set_memory_cxsr(dev_priv, true);
-
 
1591
}
-
 
1592
 
-
 
1593
static void valleyview_update_sprite_wm(struct drm_plane *plane,
-
 
1594
					struct drm_crtc *crtc,
-
 
1595
					uint32_t sprite_width,
-
 
1596
					uint32_t sprite_height,
-
 
1597
					int pixel_size,
-
 
1598
					bool enabled, bool scaled)
-
 
1599
{
-
 
1600
	struct drm_device *dev = crtc->dev;
-
 
1601
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
1602
	int pipe = to_intel_plane(plane)->pipe;
-
 
1603
	int sprite = to_intel_plane(plane)->plane;
-
 
1604
	int drain_latency;
-
 
1605
	int plane_prec;
-
 
1606
	int sprite_dl;
-
 
1607
	int prec_mult;
-
 
1608
	const int high_precision = IS_CHERRYVIEW(dev) ?
-
 
1609
		DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
-
 
1610
 
-
 
1611
	sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
-
 
1612
		    (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
-
 
1613
 
-
 
1614
	if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
-
 
1615
						 &drain_latency)) {
-
 
1616
		plane_prec = (prec_mult == high_precision) ?
-
 
1617
					   DDL_SPRITE_PRECISION_HIGH(sprite) :
-
 
1618
					   DDL_SPRITE_PRECISION_LOW(sprite);
-
 
1619
		sprite_dl |= plane_prec |
-
 
1620
			     (drain_latency << DDL_SPRITE_SHIFT(sprite));
-
 
1621
	}
-
 
1622
 
-
 
1623
	I915_WRITE(VLV_DDL(pipe), sprite_dl);
1438
}
1624
}
1439
 
1625
 
1440
static void g4x_update_wm(struct drm_crtc *crtc)
1626
static void g4x_update_wm(struct drm_crtc *crtc)
1441
{
1627
{
1442
	struct drm_device *dev = crtc->dev;
1628
	struct drm_device *dev = crtc->dev;
Line 1446... Line 1632...
1446
	int plane_sr, cursor_sr;
1632
	int plane_sr, cursor_sr;
1447
	unsigned int enabled = 0;
1633
	unsigned int enabled = 0;
1448
	bool cxsr_enabled;
1634
	bool cxsr_enabled;
Line 1449... Line 1635...
1449
 
1635
 
1450
	if (g4x_compute_wm0(dev, PIPE_A,
1636
	if (g4x_compute_wm0(dev, PIPE_A,
1451
			    &g4x_wm_info, latency_ns,
1637
			    &g4x_wm_info, pessimal_latency_ns,
1452
			    &g4x_cursor_wm_info, latency_ns,
1638
			    &g4x_cursor_wm_info, pessimal_latency_ns,
1453
			    &planea_wm, &cursora_wm))
1639
			    &planea_wm, &cursora_wm))
Line 1454... Line 1640...
1454
		enabled |= 1 << PIPE_A;
1640
		enabled |= 1 << PIPE_A;
1455
 
1641
 
1456
	if (g4x_compute_wm0(dev, PIPE_B,
1642
	if (g4x_compute_wm0(dev, PIPE_B,
1457
			    &g4x_wm_info, latency_ns,
1643
			    &g4x_wm_info, pessimal_latency_ns,
1458
			    &g4x_cursor_wm_info, latency_ns,
1644
			    &g4x_cursor_wm_info, pessimal_latency_ns,
Line 1459... Line 1645...
1459
			    &planeb_wm, &cursorb_wm))
1645
			    &planeb_wm, &cursorb_wm))
1460
		enabled |= 1 << PIPE_B;
1646
		enabled |= 1 << PIPE_B;
Line 1470... Line 1656...
1470
		cxsr_enabled = false;
1656
		cxsr_enabled = false;
1471
		intel_set_memory_cxsr(dev_priv, false);
1657
		intel_set_memory_cxsr(dev_priv, false);
1472
		plane_sr = cursor_sr = 0;
1658
		plane_sr = cursor_sr = 0;
1473
	}
1659
	}
Line 1474... Line 1660...
1474
 
1660
 
-
 
1661
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1475
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1662
		      "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1476
		      planea_wm, cursora_wm,
1663
		      planea_wm, cursora_wm,
1477
		      planeb_wm, cursorb_wm,
1664
		      planeb_wm, cursorb_wm,
Line 1478... Line 1665...
1478
		      plane_sr, cursor_sr);
1665
		      plane_sr, cursor_sr);
1479
 
1666
 
1480
	I915_WRITE(DSPFW1,
1667
	I915_WRITE(DSPFW1,
1481
		   (plane_sr << DSPFW_SR_SHIFT) |
1668
		   (plane_sr << DSPFW_SR_SHIFT) |
1482
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1669
		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1483
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1670
		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
1484
		   planea_wm);
1671
		   (planea_wm << DSPFW_PLANEA_SHIFT));
1485
	I915_WRITE(DSPFW2,
1672
	I915_WRITE(DSPFW2,
1486
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1673
		   (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1487
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
1674
		   (cursora_wm << DSPFW_CURSORA_SHIFT));
Line 1553... Line 1740...
1553
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1740
	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1554
		      srwm);
1741
		      srwm);
Line 1555... Line 1742...
1555
 
1742
 
1556
	/* 965 has limitations... */
1743
	/* 965 has limitations... */
-
 
1744
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
-
 
1745
		   (8 << DSPFW_CURSORB_SHIFT) |
1557
	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1746
		   (8 << DSPFW_PLANEB_SHIFT) |
1558
		   (8 << 16) | (8 << 8) | (8 << 0));
1747
		   (8 << DSPFW_PLANEA_SHIFT));
-
 
1748
	I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1559
	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1749
		   (8 << DSPFW_PLANEC_SHIFT_OLD));
1560
	/* update cursor SR watermark */
1750
	/* update cursor SR watermark */
Line 1561... Line 1751...
1561
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1751
	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1562
 
1752
 
Line 1579... Line 1769...
1579
	if (IS_I945GM(dev))
1769
	if (IS_I945GM(dev))
1580
		wm_info = &i945_wm_info;
1770
		wm_info = &i945_wm_info;
1581
	else if (!IS_GEN2(dev))
1771
	else if (!IS_GEN2(dev))
1582
		wm_info = &i915_wm_info;
1772
		wm_info = &i915_wm_info;
1583
	else
1773
	else
1584
		wm_info = &i830_wm_info;
1774
		wm_info = &i830_a_wm_info;
Line 1585... Line 1775...
1585
 
1775
 
1586
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1776
	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1587
	crtc = intel_get_crtc_for_plane(dev, 0);
1777
	crtc = intel_get_crtc_for_plane(dev, 0);
1588
	if (intel_crtc_active(crtc)) {
1778
	if (intel_crtc_active(crtc)) {
Line 1592... Line 1782...
1592
			cpp = 4;
1782
			cpp = 4;
Line 1593... Line 1783...
1593
 
1783
 
1594
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1784
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1595
		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1785
		planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1596
					       wm_info, fifo_size, cpp,
1786
					       wm_info, fifo_size, cpp,
1597
					       latency_ns);
1787
					       pessimal_latency_ns);
1598
		enabled = crtc;
1788
		enabled = crtc;
1599
	} else
1789
	} else {
-
 
1790
		planea_wm = fifo_size - wm_info->guard_size;
-
 
1791
		if (planea_wm > (long)wm_info->max_wm)
-
 
1792
			planea_wm = wm_info->max_wm;
-
 
1793
	}
-
 
1794
 
-
 
1795
	if (IS_GEN2(dev))
Line 1600... Line 1796...
1600
		planea_wm = fifo_size - wm_info->guard_size;
1796
		wm_info = &i830_bc_wm_info;
1601
 
1797
 
1602
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1798
	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1603
	crtc = intel_get_crtc_for_plane(dev, 1);
1799
	crtc = intel_get_crtc_for_plane(dev, 1);
Line 1608... Line 1804...
1608
			cpp = 4;
1804
			cpp = 4;
Line 1609... Line 1805...
1609
 
1805
 
1610
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1806
		adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1611
		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1807
		planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1612
					       wm_info, fifo_size, cpp,
1808
					       wm_info, fifo_size, cpp,
1613
					       latency_ns);
1809
					       pessimal_latency_ns);
1614
		if (enabled == NULL)
1810
		if (enabled == NULL)
1615
			enabled = crtc;
1811
			enabled = crtc;
1616
		else
1812
		else
1617
			enabled = NULL;
1813
			enabled = NULL;
1618
	} else
1814
	} else {
-
 
1815
		planeb_wm = fifo_size - wm_info->guard_size;
-
 
1816
		if (planeb_wm > (long)wm_info->max_wm)
-
 
1817
			planeb_wm = wm_info->max_wm;
Line 1619... Line 1818...
1619
		planeb_wm = fifo_size - wm_info->guard_size;
1818
	}
Line 1620... Line 1819...
1620
 
1819
 
1621
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1820
	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
Line 1701... Line 1900...
1701
 
1900
 
1702
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1901
	adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1703
	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1902
	planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1704
				       &i845_wm_info,
1903
				       &i845_wm_info,
1705
				       dev_priv->display.get_fifo_size(dev, 0),
1904
				       dev_priv->display.get_fifo_size(dev, 0),
1706
				       4, latency_ns);
1905
				       4, pessimal_latency_ns);
1707
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1906
	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
Line 1708... Line 1907...
1708
	fwater_lo |= (3<<8) | planea_wm;
1907
	fwater_lo |= (3<<8) | planea_wm;
Line 1778... Line 1977...
1778
			   uint8_t bytes_per_pixel)
1977
			   uint8_t bytes_per_pixel)
1779
{
1978
{
1780
	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1979
	return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1781
}
1980
}
Line -... Line 1981...
-
 
1981
 
-
 
1982
struct skl_pipe_wm_parameters {
-
 
1983
	bool active;
-
 
1984
	uint32_t pipe_htotal;
-
 
1985
	uint32_t pixel_rate; /* in KHz */
-
 
1986
	struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
-
 
1987
	struct intel_plane_wm_parameters cursor;
-
 
1988
};
1782
 
1989
 
1783
struct ilk_pipe_wm_parameters {
1990
struct ilk_pipe_wm_parameters {
1784
	bool active;
1991
	bool active;
1785
	uint32_t pipe_htotal;
1992
	uint32_t pipe_htotal;
1786
	uint32_t pixel_rate;
1993
	uint32_t pixel_rate;
Line 2089... Line 2296...
2089
 
2296
 
2090
	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2297
	return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2091
	       PIPE_WM_LINETIME_TIME(linetime);
2298
	       PIPE_WM_LINETIME_TIME(linetime);
Line 2092... Line 2299...
2092
}
2299
}
2093
 
2300
 
2094
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2301
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
Line -... Line 2302...
-
 
2302
{
-
 
2303
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2304
 
-
 
2305
	if (IS_GEN9(dev)) {
-
 
2306
		uint32_t val;
-
 
2307
		int ret, i;
-
 
2308
		int level, max_level = ilk_wm_max_level(dev);
-
 
2309
 
-
 
2310
		/* read the first set of memory latencies[0:3] */
-
 
2311
		val = 0; /* data0 to be programmed to 0 for first set */
-
 
2312
		mutex_lock(&dev_priv->rps.hw_lock);
-
 
2313
		ret = sandybridge_pcode_read(dev_priv,
-
 
2314
					     GEN9_PCODE_READ_MEM_LATENCY,
-
 
2315
					     &val);
-
 
2316
		mutex_unlock(&dev_priv->rps.hw_lock);
-
 
2317
 
-
 
2318
		if (ret) {
-
 
2319
			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-
 
2320
			return;
-
 
2321
		}
-
 
2322
 
-
 
2323
		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2324
		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-
 
2325
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2326
		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-
 
2327
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2328
		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-
 
2329
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2330
 
-
 
2331
		/* read the second set of memory latencies[4:7] */
-
 
2332
		val = 1; /* data0 to be programmed to 1 for second set */
-
 
2333
		mutex_lock(&dev_priv->rps.hw_lock);
-
 
2334
		ret = sandybridge_pcode_read(dev_priv,
-
 
2335
					     GEN9_PCODE_READ_MEM_LATENCY,
-
 
2336
					     &val);
-
 
2337
		mutex_unlock(&dev_priv->rps.hw_lock);
-
 
2338
		if (ret) {
-
 
2339
			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-
 
2340
			return;
-
 
2341
		}
-
 
2342
 
-
 
2343
		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2344
		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-
 
2345
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2346
		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-
 
2347
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2348
		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-
 
2349
				GEN9_MEM_LATENCY_LEVEL_MASK;
-
 
2350
 
-
 
2351
		/*
-
 
2352
		 * punit doesn't take into account the read latency so we need
-
 
2353
		 * to add 2us to the various latency levels we retrieve from
-
 
2354
		 * the punit.
-
 
2355
		 *   - W0 is a bit special in that it's the only level that
-
 
2356
		 *   can't be disabled if we want to have display working, so
-
 
2357
		 *   we always add 2us there.
-
 
2358
		 *   - For levels >=1, punit returns 0us latency when they are
-
 
2359
		 *   disabled, so we respect that and don't add 2us then
-
 
2360
		 *
-
 
2361
		 * Additionally, if a level n (n > 1) has a 0us latency, all
-
 
2362
		 * levels m (m >= n) need to be disabled. We make sure to
-
 
2363
		 * sanitize the values out of the punit to satisfy this
-
 
2364
		 * requirement.
-
 
2365
		 */
-
 
2366
		wm[0] += 2;
-
 
2367
		for (level = 1; level <= max_level; level++)
-
 
2368
			if (wm[level] != 0)
-
 
2369
				wm[level] += 2;
-
 
2370
			else {
-
 
2371
				for (i = level + 1; i <= max_level; i++)
-
 
2372
					wm[i] = 0;
2095
{
2373
 
2096
	struct drm_i915_private *dev_priv = dev->dev_private;
2374
				break;
Line 2097... Line 2375...
2097
 
2375
			}
2098
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2376
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2099
		uint64_t sskpd = I915_READ64(MCH_SSKPD);
2377
		uint64_t sskpd = I915_READ64(MCH_SSKPD);
Line 2141... Line 2419...
2141
}
2419
}
Line 2142... Line 2420...
2142
 
2420
 
2143
int ilk_wm_max_level(const struct drm_device *dev)
2421
int ilk_wm_max_level(const struct drm_device *dev)
2144
{
2422
{
-
 
2423
	/* how many WM levels are we expecting */
-
 
2424
	if (IS_GEN9(dev))
2145
	/* how many WM levels are we expecting */
2425
		return 7;
2146
	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2426
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2147
		return 4;
2427
		return 4;
2148
	else if (INTEL_INFO(dev)->gen >= 6)
2428
	else if (INTEL_INFO(dev)->gen >= 6)
2149
		return 3;
2429
		return 3;
2150
	else
2430
	else
2151
		return 2;
2431
		return 2;
Line 2152... Line 2432...
2152
}
2432
}
2153
 
2433
 
2154
static void intel_print_wm_latency(struct drm_device *dev,
2434
static void intel_print_wm_latency(struct drm_device *dev,
2155
				   const char *name,
2435
				   const char *name,
2156
				   const uint16_t wm[5])
2436
				   const uint16_t wm[8])
Line 2157... Line 2437...
2157
{
2437
{
2158
	int level, max_level = ilk_wm_max_level(dev);
2438
	int level, max_level = ilk_wm_max_level(dev);
Line 2164... Line 2444...
2164
			DRM_ERROR("%s WM%d latency not provided\n",
2444
			DRM_ERROR("%s WM%d latency not provided\n",
2165
				  name, level);
2445
				  name, level);
2166
			continue;
2446
			continue;
2167
		}
2447
		}
Line -... Line 2448...
-
 
2448
 
-
 
2449
		/*
2168
 
2450
		 * - latencies are in us on gen9.
-
 
2451
		 * - before then, WM1+ latency values are in 0.5us units
-
 
2452
		 */
-
 
2453
		if (IS_GEN9(dev))
2169
		/* WM1+ latency values in 0.5us units */
2454
			latency *= 10;
2170
		if (level > 0)
2455
		else if (level > 0)
Line 2171... Line 2456...
2171
			latency *= 5;
2456
			latency *= 5;
2172
 
2457
 
2173
		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2458
		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
Line 2233... Line 2518...
2233
 
2518
 
2234
	if (IS_GEN6(dev))
2519
	if (IS_GEN6(dev))
2235
		snb_wm_latency_quirk(dev);
2520
		snb_wm_latency_quirk(dev);
Line -... Line 2521...
-
 
2521
}
-
 
2522
 
-
 
2523
static void skl_setup_wm_latency(struct drm_device *dev)
-
 
2524
{
-
 
2525
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2526
 
-
 
2527
	intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
-
 
2528
	intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2236
}
2529
}
2237
 
2530
 
2238
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2531
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2239
				      struct ilk_pipe_wm_parameters *p)
2532
				      struct ilk_pipe_wm_parameters *p)
2240
{
2533
{
Line 2554... Line 2847...
2554
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2847
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2555
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2848
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2556
#define WM_DIRTY_FBC (1 << 24)
2849
#define WM_DIRTY_FBC (1 << 24)
2557
#define WM_DIRTY_DDB (1 << 25)
2850
#define WM_DIRTY_DDB (1 << 25)
Line 2558... Line 2851...
2558
 
2851
 
2559
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2852
static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2560
					 const struct ilk_wm_values *old,
2853
					 const struct ilk_wm_values *old,
2561
					 const struct ilk_wm_values *new)
2854
					 const struct ilk_wm_values *new)
2562
{
2855
{
2563
	unsigned int dirty = 0;
2856
	unsigned int dirty = 0;
2564
	enum pipe pipe;
2857
	enum pipe pipe;
Line 2565... Line 2858...
2565
	int wm_lp;
2858
	int wm_lp;
2566
 
2859
 
2567
	for_each_pipe(pipe) {
2860
	for_each_pipe(dev_priv, pipe) {
2568
		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2861
		if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2569
			dirty |= WM_DIRTY_LINETIME(pipe);
2862
			dirty |= WM_DIRTY_LINETIME(pipe);
2570
			/* Must disable LP1+ watermarks too */
2863
			/* Must disable LP1+ watermarks too */
Line 2648... Line 2941...
2648
	struct drm_device *dev = dev_priv->dev;
2941
	struct drm_device *dev = dev_priv->dev;
2649
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
2942
	struct ilk_wm_values *previous = &dev_priv->wm.hw;
2650
	unsigned int dirty;
2943
	unsigned int dirty;
2651
	uint32_t val;
2944
	uint32_t val;
Line 2652... Line 2945...
2652
 
2945
 
2653
	dirty = ilk_compute_wm_dirty(dev, previous, results);
2946
	dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2654
	if (!dirty)
2947
	if (!dirty)
Line 2655... Line 2948...
2655
		return;
2948
		return;
Line 2723... Line 3016...
2723
	struct drm_i915_private *dev_priv = dev->dev_private;
3016
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 2724... Line 3017...
2724
 
3017
 
2725
	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3018
	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
Line -... Line 3019...
-
 
3019
}
-
 
3020
 
-
 
3021
/*
-
 
3022
 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
-
 
3023
 * different active planes.
-
 
3024
 */
-
 
3025
 
-
 
3026
#define SKL_DDB_SIZE		896	/* in blocks */
-
 
3027
 
-
 
3028
static void
-
 
3029
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
-
 
3030
				   struct drm_crtc *for_crtc,
-
 
3031
				   const struct intel_wm_config *config,
-
 
3032
				   const struct skl_pipe_wm_parameters *params,
-
 
3033
				   struct skl_ddb_entry *alloc /* out */)
-
 
3034
{
-
 
3035
	struct drm_crtc *crtc;
-
 
3036
	unsigned int pipe_size, ddb_size;
-
 
3037
	int nth_active_pipe;
-
 
3038
 
-
 
3039
	if (!params->active) {
-
 
3040
		alloc->start = 0;
-
 
3041
		alloc->end = 0;
-
 
3042
		return;
-
 
3043
	}
-
 
3044
 
-
 
3045
	ddb_size = SKL_DDB_SIZE;
-
 
3046
 
-
 
3047
	ddb_size -= 4; /* 4 blocks for bypass path allocation */
-
 
3048
 
-
 
3049
	nth_active_pipe = 0;
-
 
3050
	for_each_crtc(dev, crtc) {
-
 
3051
		if (!intel_crtc_active(crtc))
-
 
3052
			continue;
-
 
3053
 
-
 
3054
		if (crtc == for_crtc)
-
 
3055
			break;
-
 
3056
 
-
 
3057
		nth_active_pipe++;
-
 
3058
	}
-
 
3059
 
-
 
3060
	pipe_size = ddb_size / config->num_pipes_active;
-
 
3061
	alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
-
 
3062
	alloc->end = alloc->start + pipe_size;
-
 
3063
}
-
 
3064
 
-
 
3065
static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
-
 
3066
{
-
 
3067
	if (config->num_pipes_active == 1)
-
 
3068
		return 32;
-
 
3069
 
-
 
3070
	return 8;
-
 
3071
}
-
 
3072
 
-
 
3073
static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
-
 
3074
{
-
 
3075
	entry->start = reg & 0x3ff;
-
 
3076
	entry->end = (reg >> 16) & 0x3ff;
-
 
3077
	if (entry->end)
-
 
3078
		entry->end += 1;
-
 
3079
}
-
 
3080
 
-
 
3081
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
-
 
3082
			  struct skl_ddb_allocation *ddb /* out */)
-
 
3083
{
-
 
3084
	struct drm_device *dev = dev_priv->dev;
-
 
3085
	enum pipe pipe;
-
 
3086
	int plane;
-
 
3087
	u32 val;
-
 
3088
 
-
 
3089
	for_each_pipe(dev_priv, pipe) {
-
 
3090
		for_each_plane(pipe, plane) {
-
 
3091
			val = I915_READ(PLANE_BUF_CFG(pipe, plane));
-
 
3092
			skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
-
 
3093
						   val);
-
 
3094
		}
-
 
3095
 
-
 
3096
		val = I915_READ(CUR_BUF_CFG(pipe));
-
 
3097
		skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
-
 
3098
	}
-
 
3099
}
-
 
3100
 
-
 
3101
static unsigned int
-
 
3102
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
-
 
3103
{
-
 
3104
	return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
-
 
3105
}
-
 
3106
 
-
 
3107
/*
-
 
3108
 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
-
 
3109
 * a 8192x4096@32bpp framebuffer:
-
 
3110
 *   3 * 4096 * 8192  * 4 < 2^32
-
 
3111
 */
-
 
3112
static unsigned int
-
 
3113
skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
-
 
3114
				 const struct skl_pipe_wm_parameters *params)
-
 
3115
{
-
 
3116
	unsigned int total_data_rate = 0;
-
 
3117
	int plane;
-
 
3118
 
-
 
3119
	for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
-
 
3120
		const struct intel_plane_wm_parameters *p;
-
 
3121
 
-
 
3122
		p = ¶ms->plane[plane];
-
 
3123
		if (!p->enabled)
-
 
3124
			continue;
-
 
3125
 
-
 
3126
		total_data_rate += skl_plane_relative_data_rate(p);
-
 
3127
	}
-
 
3128
 
-
 
3129
	return total_data_rate;
-
 
3130
}
-
 
3131
 
-
 
3132
static void
-
 
3133
skl_allocate_pipe_ddb(struct drm_crtc *crtc,
-
 
3134
		      const struct intel_wm_config *config,
-
 
3135
		      const struct skl_pipe_wm_parameters *params,
-
 
3136
		      struct skl_ddb_allocation *ddb /* out */)
-
 
3137
{
-
 
3138
	struct drm_device *dev = crtc->dev;
-
 
3139
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3140
	enum pipe pipe = intel_crtc->pipe;
-
 
3141
	struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
-
 
3142
	uint16_t alloc_size, start, cursor_blocks;
-
 
3143
	unsigned int total_data_rate;
-
 
3144
	int plane;
-
 
3145
 
-
 
3146
	skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
-
 
3147
	alloc_size = skl_ddb_entry_size(alloc);
-
 
3148
	if (alloc_size == 0) {
-
 
3149
		memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-
 
3150
		memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
-
 
3151
		return;
-
 
3152
	}
-
 
3153
 
-
 
3154
	cursor_blocks = skl_cursor_allocation(config);
-
 
3155
	ddb->cursor[pipe].start = alloc->end - cursor_blocks;
-
 
3156
	ddb->cursor[pipe].end = alloc->end;
-
 
3157
 
-
 
3158
	alloc_size -= cursor_blocks;
-
 
3159
	alloc->end -= cursor_blocks;
-
 
3160
 
-
 
3161
		/*
-
 
3162
	 * Each active plane get a portion of the remaining space, in
-
 
3163
	 * proportion to the amount of data they need to fetch from memory.
-
 
3164
	 *
-
 
3165
	 * FIXME: we may not allocate every single block here.
-
 
3166
		 */
-
 
3167
	total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
-
 
3168
 
-
 
3169
	start = alloc->start;
-
 
3170
	for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
-
 
3171
		const struct intel_plane_wm_parameters *p;
-
 
3172
		unsigned int data_rate;
-
 
3173
		uint16_t plane_blocks;
-
 
3174
 
-
 
3175
		p = ¶ms->plane[plane];
-
 
3176
		if (!p->enabled)
-
 
3177
			continue;
-
 
3178
 
-
 
3179
		data_rate = skl_plane_relative_data_rate(p);
-
 
3180
 
-
 
3181
		/*
-
 
3182
		 * promote the expression to 64 bits to avoid overflowing, the
-
 
3183
		 * result is < available as data_rate / total_data_rate < 1
-
 
3184
		 */
-
 
3185
		plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
-
 
3186
				       total_data_rate);
-
 
3187
 
-
 
3188
		ddb->plane[pipe][plane].start = start;
-
 
3189
		ddb->plane[pipe][plane].end = start + plane_blocks;
-
 
3190
 
-
 
3191
		start += plane_blocks;
-
 
3192
	}
-
 
3193
 
-
 
3194
}
-
 
3195
 
-
 
3196
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
-
 
3197
{
-
 
3198
	/* TODO: Take into account the scalers once we support them */
-
 
3199
	return config->adjusted_mode.crtc_clock;
-
 
3200
}
-
 
3201
 
-
 
3202
/*
-
 
3203
 * The max latency should be 257 (max the punit can code is 255 and we add 2us
-
 
3204
 * for the read latency) and bytes_per_pixel should always be <= 8, so that
-
 
3205
 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
-
 
3206
 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
-
 
3207
*/
-
 
3208
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
-
 
3209
			       uint32_t latency)
-
 
3210
{
-
 
3211
	uint32_t wm_intermediate_val, ret;
-
 
3212
 
-
 
3213
	if (latency == 0)
-
 
3214
		return UINT_MAX;
-
 
3215
 
-
 
3216
	wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
-
 
3217
	ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
-
 
3218
 
-
 
3219
	return ret;
-
 
3220
}
-
 
3221
 
-
 
3222
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-
 
3223
			       uint32_t horiz_pixels, uint8_t bytes_per_pixel,
-
 
3224
			       uint32_t latency)
-
 
3225
{
-
 
3226
	uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
-
 
3227
 
-
 
3228
	if (latency == 0)
-
 
3229
		return UINT_MAX;
-
 
3230
 
-
 
3231
	plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
-
 
3232
	wm_intermediate_val = latency * pixel_rate;
-
 
3233
	ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
-
 
3234
				plane_bytes_per_line;
-
 
3235
 
-
 
3236
	return ret;
-
 
3237
}
-
 
3238
 
-
 
3239
static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
-
 
3240
				       const struct intel_crtc *intel_crtc)
-
 
3241
{
-
 
3242
	struct drm_device *dev = intel_crtc->base.dev;
-
 
3243
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3244
	const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
 
3245
	enum pipe pipe = intel_crtc->pipe;
-
 
3246
 
-
 
3247
	if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
-
 
3248
		   sizeof(new_ddb->plane[pipe])))
-
 
3249
		return true;
-
 
3250
 
-
 
3251
	if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
-
 
3252
		    sizeof(new_ddb->cursor[pipe])))
-
 
3253
		return true;
-
 
3254
 
-
 
3255
	return false;
-
 
3256
}
-
 
3257
 
-
 
3258
static void skl_compute_wm_global_parameters(struct drm_device *dev,
-
 
3259
					     struct intel_wm_config *config)
-
 
3260
{
-
 
3261
	struct drm_crtc *crtc;
-
 
3262
	struct drm_plane *plane;
-
 
3263
 
-
 
3264
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-
 
3265
		config->num_pipes_active += intel_crtc_active(crtc);
-
 
3266
 
-
 
3267
	/* FIXME: I don't think we need those two global parameters on SKL */
-
 
3268
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-
 
3269
		struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
3270
 
-
 
3271
		config->sprites_enabled |= intel_plane->wm.enabled;
-
 
3272
		config->sprites_scaled |= intel_plane->wm.scaled;
-
 
3273
	}
-
 
3274
}
-
 
3275
 
-
 
3276
static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
-
 
3277
					   struct skl_pipe_wm_parameters *p)
-
 
3278
{
-
 
3279
	struct drm_device *dev = crtc->dev;
-
 
3280
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3281
	enum pipe pipe = intel_crtc->pipe;
-
 
3282
	struct drm_plane *plane;
-
 
3283
	int i = 1; /* Index for sprite planes start */
-
 
3284
 
-
 
3285
	p->active = intel_crtc_active(crtc);
-
 
3286
	if (p->active) {
-
 
3287
		p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
-
 
3288
		p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
-
 
3289
 
-
 
3290
		/*
-
 
3291
		 * For now, assume primary and cursor planes are always enabled.
-
 
3292
		 */
-
 
3293
		p->plane[0].enabled = true;
-
 
3294
		p->plane[0].bytes_per_pixel =
-
 
3295
			crtc->primary->fb->bits_per_pixel / 8;
-
 
3296
		p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
-
 
3297
		p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
-
 
3298
 
-
 
3299
		p->cursor.enabled = true;
-
 
3300
		p->cursor.bytes_per_pixel = 4;
-
 
3301
		p->cursor.horiz_pixels = intel_crtc->cursor_width ?
-
 
3302
					 intel_crtc->cursor_width : 64;
-
 
3303
	}
-
 
3304
 
-
 
3305
	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
-
 
3306
		struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
3307
 
-
 
3308
		if (intel_plane->pipe == pipe)
-
 
3309
			p->plane[i++] = intel_plane->wm;
-
 
3310
	}
-
 
3311
}
-
 
3312
 
-
 
3313
static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
-
 
3314
				 struct intel_plane_wm_parameters *p_params,
-
 
3315
				 uint16_t ddb_allocation,
-
 
3316
				 uint32_t mem_value,
-
 
3317
				 uint16_t *out_blocks, /* out */
-
 
3318
				 uint8_t *out_lines /* out */)
-
 
3319
{
-
 
3320
	uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
-
 
3321
	uint32_t result_bytes;
-
 
3322
 
-
 
3323
	if (mem_value == 0 || !p->active || !p_params->enabled)
-
 
3324
		return false;
-
 
3325
 
-
 
3326
	method1 = skl_wm_method1(p->pixel_rate,
-
 
3327
				 p_params->bytes_per_pixel,
-
 
3328
				 mem_value);
-
 
3329
	method2 = skl_wm_method2(p->pixel_rate,
-
 
3330
				 p->pipe_htotal,
-
 
3331
				 p_params->horiz_pixels,
-
 
3332
				 p_params->bytes_per_pixel,
-
 
3333
				 mem_value);
-
 
3334
 
-
 
3335
	plane_bytes_per_line = p_params->horiz_pixels *
-
 
3336
					p_params->bytes_per_pixel;
-
 
3337
 
-
 
3338
	/* For now xtile and linear */
-
 
3339
	if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
-
 
3340
		result_bytes = min(method1, method2);
-
 
3341
	else
-
 
3342
		result_bytes = method1;
-
 
3343
 
-
 
3344
	res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
-
 
3345
	res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
-
 
3346
 
-
 
3347
	if (res_blocks > ddb_allocation || res_lines > 31)
-
 
3348
		return false;
-
 
3349
 
-
 
3350
	*out_blocks = res_blocks;
-
 
3351
	*out_lines = res_lines;
-
 
3352
 
-
 
3353
	return true;
-
 
3354
}
-
 
3355
 
-
 
3356
static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
-
 
3357
				 struct skl_ddb_allocation *ddb,
-
 
3358
				 struct skl_pipe_wm_parameters *p,
-
 
3359
				 enum pipe pipe,
-
 
3360
				 int level,
-
 
3361
				 int num_planes,
-
 
3362
				 struct skl_wm_level *result)
-
 
3363
{
-
 
3364
	uint16_t latency = dev_priv->wm.skl_latency[level];
-
 
3365
	uint16_t ddb_blocks;
-
 
3366
	int i;
-
 
3367
 
-
 
3368
	for (i = 0; i < num_planes; i++) {
-
 
3369
		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
-
 
3370
 
-
 
3371
		result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
-
 
3372
						ddb_blocks,
-
 
3373
						latency,
-
 
3374
						&result->plane_res_b[i],
-
 
3375
						&result->plane_res_l[i]);
-
 
3376
	}
-
 
3377
 
-
 
3378
	ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
-
 
3379
	result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
-
 
3380
						 latency, &result->cursor_res_b,
-
 
3381
						 &result->cursor_res_l);
-
 
3382
}
-
 
3383
 
-
 
3384
static uint32_t
-
 
3385
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
-
 
3386
{
-
 
3387
	if (!intel_crtc_active(crtc))
-
 
3388
		return 0;
-
 
3389
 
-
 
3390
	return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
-
 
3391
 
-
 
3392
}
-
 
3393
 
-
 
3394
static void skl_compute_transition_wm(struct drm_crtc *crtc,
-
 
3395
				      struct skl_pipe_wm_parameters *params,
-
 
3396
				      struct skl_wm_level *trans_wm /* out */)
-
 
3397
{
-
 
3398
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3399
	int i;
-
 
3400
 
-
 
3401
	if (!params->active)
-
 
3402
		return;
-
 
3403
 
-
 
3404
	/* Until we know more, just disable transition WMs */
-
 
3405
	for (i = 0; i < intel_num_planes(intel_crtc); i++)
-
 
3406
		trans_wm->plane_en[i] = false;
-
 
3407
	trans_wm->cursor_en = false;
-
 
3408
}
-
 
3409
 
-
 
3410
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
-
 
3411
				struct skl_ddb_allocation *ddb,
-
 
3412
				struct skl_pipe_wm_parameters *params,
-
 
3413
				struct skl_pipe_wm *pipe_wm)
-
 
3414
{
-
 
3415
	struct drm_device *dev = crtc->dev;
-
 
3416
	const struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3417
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3418
	int level, max_level = ilk_wm_max_level(dev);
-
 
3419
 
-
 
3420
	for (level = 0; level <= max_level; level++) {
-
 
3421
		skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
-
 
3422
				     level, intel_num_planes(intel_crtc),
-
 
3423
				     &pipe_wm->wm[level]);
-
 
3424
	}
-
 
3425
	pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
-
 
3426
 
-
 
3427
	skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
-
 
3428
}
-
 
3429
 
-
 
3430
static void skl_compute_wm_results(struct drm_device *dev,
-
 
3431
				   struct skl_pipe_wm_parameters *p,
-
 
3432
				   struct skl_pipe_wm *p_wm,
-
 
3433
				   struct skl_wm_values *r,
-
 
3434
				   struct intel_crtc *intel_crtc)
-
 
3435
{
-
 
3436
	int level, max_level = ilk_wm_max_level(dev);
-
 
3437
	enum pipe pipe = intel_crtc->pipe;
-
 
3438
	uint32_t temp;
-
 
3439
	int i;
-
 
3440
 
-
 
3441
	for (level = 0; level <= max_level; level++) {
-
 
3442
		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-
 
3443
			temp = 0;
-
 
3444
 
-
 
3445
			temp |= p_wm->wm[level].plane_res_l[i] <<
-
 
3446
					PLANE_WM_LINES_SHIFT;
-
 
3447
			temp |= p_wm->wm[level].plane_res_b[i];
-
 
3448
			if (p_wm->wm[level].plane_en[i])
-
 
3449
				temp |= PLANE_WM_EN;
-
 
3450
 
-
 
3451
			r->plane[pipe][i][level] = temp;
-
 
3452
		}
-
 
3453
 
-
 
3454
		temp = 0;
-
 
3455
 
-
 
3456
		temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
-
 
3457
		temp |= p_wm->wm[level].cursor_res_b;
-
 
3458
 
-
 
3459
		if (p_wm->wm[level].cursor_en)
-
 
3460
			temp |= PLANE_WM_EN;
-
 
3461
 
-
 
3462
		r->cursor[pipe][level] = temp;
-
 
3463
 
-
 
3464
	}
-
 
3465
 
-
 
3466
	/* transition WMs */
-
 
3467
	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-
 
3468
		temp = 0;
-
 
3469
		temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
-
 
3470
		temp |= p_wm->trans_wm.plane_res_b[i];
-
 
3471
		if (p_wm->trans_wm.plane_en[i])
-
 
3472
			temp |= PLANE_WM_EN;
-
 
3473
 
-
 
3474
		r->plane_trans[pipe][i] = temp;
-
 
3475
	}
-
 
3476
 
-
 
3477
	temp = 0;
-
 
3478
	temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
-
 
3479
	temp |= p_wm->trans_wm.cursor_res_b;
-
 
3480
	if (p_wm->trans_wm.cursor_en)
-
 
3481
		temp |= PLANE_WM_EN;
-
 
3482
 
-
 
3483
	r->cursor_trans[pipe] = temp;
-
 
3484
 
-
 
3485
	r->wm_linetime[pipe] = p_wm->linetime;
-
 
3486
}
-
 
3487
 
-
 
3488
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
-
 
3489
				const struct skl_ddb_entry *entry)
-
 
3490
{
-
 
3491
	if (entry->end)
-
 
3492
		I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
-
 
3493
	else
-
 
3494
		I915_WRITE(reg, 0);
-
 
3495
}
-
 
3496
 
-
 
3497
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
-
 
3498
				const struct skl_wm_values *new)
-
 
3499
{
-
 
3500
	struct drm_device *dev = dev_priv->dev;
-
 
3501
	struct intel_crtc *crtc;
-
 
3502
 
-
 
3503
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
-
 
3504
		int i, level, max_level = ilk_wm_max_level(dev);
-
 
3505
		enum pipe pipe = crtc->pipe;
-
 
3506
 
-
 
3507
		if (!new->dirty[pipe])
-
 
3508
			continue;
-
 
3509
 
-
 
3510
		I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
-
 
3511
 
-
 
3512
		for (level = 0; level <= max_level; level++) {
-
 
3513
			for (i = 0; i < intel_num_planes(crtc); i++)
-
 
3514
				I915_WRITE(PLANE_WM(pipe, i, level),
-
 
3515
					   new->plane[pipe][i][level]);
-
 
3516
			I915_WRITE(CUR_WM(pipe, level),
-
 
3517
				   new->cursor[pipe][level]);
-
 
3518
		}
-
 
3519
		for (i = 0; i < intel_num_planes(crtc); i++)
-
 
3520
			I915_WRITE(PLANE_WM_TRANS(pipe, i),
-
 
3521
				   new->plane_trans[pipe][i]);
-
 
3522
		I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
-
 
3523
 
-
 
3524
		for (i = 0; i < intel_num_planes(crtc); i++)
-
 
3525
			skl_ddb_entry_write(dev_priv,
-
 
3526
					    PLANE_BUF_CFG(pipe, i),
-
 
3527
					    &new->ddb.plane[pipe][i]);
-
 
3528
 
-
 
3529
		skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
-
 
3530
				    &new->ddb.cursor[pipe]);
-
 
3531
	}
-
 
3532
}
-
 
3533
 
-
 
3534
/*
-
 
3535
 * When setting up a new DDB allocation arrangement, we need to correctly
-
 
3536
 * sequence the times at which the new allocations for the pipes are taken into
-
 
3537
 * account or we'll have pipes fetching from space previously allocated to
-
 
3538
 * another pipe.
-
 
3539
 *
-
 
3540
 * Roughly the sequence looks like:
-
 
3541
 *  1. re-allocate the pipe(s) with the allocation being reduced and not
-
 
3542
 *     overlapping with a previous light-up pipe (another way to put it is:
-
 
3543
 *     pipes with their new allocation strickly included into their old ones).
-
 
3544
 *  2. re-allocate the other pipes that get their allocation reduced
-
 
3545
 *  3. allocate the pipes having their allocation increased
-
 
3546
 *
-
 
3547
 * Steps 1. and 2. are here to take care of the following case:
-
 
3548
 * - Initially DDB looks like this:
-
 
3549
 *     |   B    |   C    |
-
 
3550
 * - enable pipe A.
-
 
3551
 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
-
 
3552
 *   allocation
-
 
3553
 *     |  A  |  B  |  C  |
-
 
3554
 *
-
 
3555
 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
-
 
3556
 */
-
 
3557
 
-
 
3558
static void
-
 
3559
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
-
 
3560
{
-
 
3561
	struct drm_device *dev = dev_priv->dev;
-
 
3562
	int plane;
-
 
3563
 
-
 
3564
	DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
-
 
3565
 
-
 
3566
	for_each_plane(pipe, plane) {
-
 
3567
		I915_WRITE(PLANE_SURF(pipe, plane),
-
 
3568
			   I915_READ(PLANE_SURF(pipe, plane)));
-
 
3569
	}
-
 
3570
	I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
-
 
3571
}
-
 
3572
 
-
 
3573
static bool
-
 
3574
skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
-
 
3575
			    const struct skl_ddb_allocation *new,
-
 
3576
			    enum pipe pipe)
-
 
3577
{
-
 
3578
	uint16_t old_size, new_size;
-
 
3579
 
-
 
3580
	old_size = skl_ddb_entry_size(&old->pipe[pipe]);
-
 
3581
	new_size = skl_ddb_entry_size(&new->pipe[pipe]);
-
 
3582
 
-
 
3583
	return old_size != new_size &&
-
 
3584
	       new->pipe[pipe].start >= old->pipe[pipe].start &&
-
 
3585
	       new->pipe[pipe].end <= old->pipe[pipe].end;
-
 
3586
}
-
 
3587
 
-
 
3588
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
-
 
3589
				struct skl_wm_values *new_values)
-
 
3590
{
-
 
3591
	struct drm_device *dev = dev_priv->dev;
-
 
3592
	struct skl_ddb_allocation *cur_ddb, *new_ddb;
-
 
3593
	bool reallocated[I915_MAX_PIPES] = {false, false, false};
-
 
3594
	struct intel_crtc *crtc;
-
 
3595
	enum pipe pipe;
-
 
3596
 
-
 
3597
	new_ddb = &new_values->ddb;
-
 
3598
	cur_ddb = &dev_priv->wm.skl_hw.ddb;
-
 
3599
 
-
 
3600
	/*
-
 
3601
	 * First pass: flush the pipes with the new allocation contained into
-
 
3602
	 * the old space.
-
 
3603
	 *
-
 
3604
	 * We'll wait for the vblank on those pipes to ensure we can safely
-
 
3605
	 * re-allocate the freed space without this pipe fetching from it.
-
 
3606
	 */
-
 
3607
	for_each_intel_crtc(dev, crtc) {
-
 
3608
		if (!crtc->active)
-
 
3609
			continue;
-
 
3610
 
-
 
3611
		pipe = crtc->pipe;
-
 
3612
 
-
 
3613
		if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
-
 
3614
			continue;
-
 
3615
 
-
 
3616
		skl_wm_flush_pipe(dev_priv, pipe, 1);
-
 
3617
		intel_wait_for_vblank(dev, pipe);
-
 
3618
 
-
 
3619
		reallocated[pipe] = true;
-
 
3620
	}
-
 
3621
 
-
 
3622
 
-
 
3623
	/*
-
 
3624
	 * Second pass: flush the pipes that are having their allocation
-
 
3625
	 * reduced, but overlapping with a previous allocation.
-
 
3626
	 *
-
 
3627
	 * Here as well we need to wait for the vblank to make sure the freed
-
 
3628
	 * space is not used anymore.
-
 
3629
	 */
-
 
3630
	for_each_intel_crtc(dev, crtc) {
-
 
3631
		if (!crtc->active)
-
 
3632
			continue;
-
 
3633
 
-
 
3634
		pipe = crtc->pipe;
-
 
3635
 
-
 
3636
		if (reallocated[pipe])
-
 
3637
			continue;
-
 
3638
 
-
 
3639
		if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
-
 
3640
		    skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
-
 
3641
			skl_wm_flush_pipe(dev_priv, pipe, 2);
-
 
3642
			intel_wait_for_vblank(dev, pipe);
-
 
3643
		}
-
 
3644
 
-
 
3645
		reallocated[pipe] = true;
-
 
3646
	}
-
 
3647
 
-
 
3648
	/*
-
 
3649
	 * Third pass: flush the pipes that got more space allocated.
-
 
3650
	 *
-
 
3651
	 * We don't need to actively wait for the update here, next vblank
-
 
3652
	 * will just get more DDB space with the correct WM values.
-
 
3653
	 */
-
 
3654
	for_each_intel_crtc(dev, crtc) {
-
 
3655
		if (!crtc->active)
-
 
3656
			continue;
-
 
3657
 
-
 
3658
		pipe = crtc->pipe;
-
 
3659
 
-
 
3660
		/*
-
 
3661
		 * At this point, only the pipes more space than before are
-
 
3662
		 * left to re-allocate.
-
 
3663
		 */
-
 
3664
		if (reallocated[pipe])
-
 
3665
			continue;
-
 
3666
 
-
 
3667
		skl_wm_flush_pipe(dev_priv, pipe, 3);
-
 
3668
	}
-
 
3669
}
-
 
3670
 
-
 
3671
static bool skl_update_pipe_wm(struct drm_crtc *crtc,
-
 
3672
			       struct skl_pipe_wm_parameters *params,
-
 
3673
			       struct intel_wm_config *config,
-
 
3674
			       struct skl_ddb_allocation *ddb, /* out */
-
 
3675
			       struct skl_pipe_wm *pipe_wm /* out */)
-
 
3676
{
-
 
3677
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3678
 
-
 
3679
	skl_compute_wm_pipe_parameters(crtc, params);
-
 
3680
	skl_allocate_pipe_ddb(crtc, config, params, ddb);
-
 
3681
	skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
-
 
3682
 
-
 
3683
	if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
-
 
3684
		return false;
-
 
3685
 
-
 
3686
	intel_crtc->wm.skl_active = *pipe_wm;
-
 
3687
	return true;
-
 
3688
}
-
 
3689
 
-
 
3690
static void skl_update_other_pipe_wm(struct drm_device *dev,
-
 
3691
				     struct drm_crtc *crtc,
-
 
3692
				     struct intel_wm_config *config,
-
 
3693
				     struct skl_wm_values *r)
-
 
3694
{
-
 
3695
	struct intel_crtc *intel_crtc;
-
 
3696
	struct intel_crtc *this_crtc = to_intel_crtc(crtc);
-
 
3697
 
-
 
3698
	/*
-
 
3699
	 * If the WM update hasn't changed the allocation for this_crtc (the
-
 
3700
	 * crtc we are currently computing the new WM values for), other
-
 
3701
	 * enabled crtcs will keep the same allocation and we don't need to
-
 
3702
	 * recompute anything for them.
-
 
3703
	 */
-
 
3704
	if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
-
 
3705
		return;
-
 
3706
 
-
 
3707
	/*
-
 
3708
	 * Otherwise, because of this_crtc being freshly enabled/disabled, the
-
 
3709
	 * other active pipes need new DDB allocation and WM values.
-
 
3710
	 */
-
 
3711
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
-
 
3712
				base.head) {
-
 
3713
		struct skl_pipe_wm_parameters params = {};
-
 
3714
		struct skl_pipe_wm pipe_wm = {};
-
 
3715
		bool wm_changed;
-
 
3716
 
-
 
3717
		if (this_crtc->pipe == intel_crtc->pipe)
-
 
3718
			continue;
-
 
3719
 
-
 
3720
		if (!intel_crtc->active)
-
 
3721
			continue;
-
 
3722
 
-
 
3723
		wm_changed = skl_update_pipe_wm(&intel_crtc->base,
-
 
3724
						¶ms, config,
-
 
3725
						&r->ddb, &pipe_wm);
-
 
3726
 
-
 
3727
		/*
-
 
3728
		 * If we end up re-computing the other pipe WM values, it's
-
 
3729
		 * because it was really needed, so we expect the WM values to
-
 
3730
		 * be different.
-
 
3731
	 */
-
 
3732
		WARN_ON(!wm_changed);
-
 
3733
 
-
 
3734
		skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc);
-
 
3735
		r->dirty[intel_crtc->pipe] = true;
-
 
3736
	}
-
 
3737
}
-
 
3738
 
-
 
3739
static void skl_update_wm(struct drm_crtc *crtc)
-
 
3740
{
-
 
3741
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3742
	struct drm_device *dev = crtc->dev;
-
 
3743
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3744
	struct skl_pipe_wm_parameters params = {};
-
 
3745
	struct skl_wm_values *results = &dev_priv->wm.skl_results;
-
 
3746
	struct skl_pipe_wm pipe_wm = {};
-
 
3747
	struct intel_wm_config config = {};
-
 
3748
 
-
 
3749
	memset(results, 0, sizeof(*results));
-
 
3750
 
-
 
3751
	skl_compute_wm_global_parameters(dev, &config);
-
 
3752
 
-
 
3753
	if (!skl_update_pipe_wm(crtc, ¶ms, &config,
-
 
3754
				&results->ddb, &pipe_wm))
-
 
3755
		return;
-
 
3756
 
-
 
3757
	skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc);
-
 
3758
	results->dirty[intel_crtc->pipe] = true;
-
 
3759
 
-
 
3760
	skl_update_other_pipe_wm(dev, crtc, &config, results);
-
 
3761
	skl_write_wm_values(dev_priv, results);
-
 
3762
	skl_flush_wm_values(dev_priv, results);
-
 
3763
 
-
 
3764
	/* store the new configuration */
-
 
3765
	dev_priv->wm.skl_hw = *results;
-
 
3766
}
-
 
3767
 
-
 
3768
static void
-
 
3769
skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
-
 
3770
		     uint32_t sprite_width, uint32_t sprite_height,
-
 
3771
		     int pixel_size, bool enabled, bool scaled)
-
 
3772
{
-
 
3773
	struct intel_plane *intel_plane = to_intel_plane(plane);
-
 
3774
 
-
 
3775
	intel_plane->wm.enabled = enabled;
-
 
3776
	intel_plane->wm.scaled = scaled;
-
 
3777
	intel_plane->wm.horiz_pixels = sprite_width;
-
 
3778
	intel_plane->wm.vert_pixels = sprite_height;
-
 
3779
	intel_plane->wm.bytes_per_pixel = pixel_size;
-
 
3780
 
-
 
3781
	skl_update_wm(crtc);
2726
}
3782
}
2727
 
3783
 
2728
static void ilk_update_wm(struct drm_crtc *crtc)
3784
static void ilk_update_wm(struct drm_crtc *crtc)
2729
{
3785
{
2730
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3786
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
Line 2797... Line 3853...
2797
		intel_wait_for_vblank(dev, intel_plane->pipe);
3853
		intel_wait_for_vblank(dev, intel_plane->pipe);
Line 2798... Line 3854...
2798
 
3854
 
2799
	ilk_update_wm(crtc);
3855
	ilk_update_wm(crtc);
Line -... Line 3856...
-
 
3856
}
-
 
3857
 
-
 
3858
static void skl_pipe_wm_active_state(uint32_t val,
-
 
3859
				     struct skl_pipe_wm *active,
-
 
3860
				     bool is_transwm,
-
 
3861
				     bool is_cursor,
-
 
3862
				     int i,
-
 
3863
				     int level)
-
 
3864
{
-
 
3865
	bool is_enabled = (val & PLANE_WM_EN) != 0;
-
 
3866
 
-
 
3867
	if (!is_transwm) {
-
 
3868
		if (!is_cursor) {
-
 
3869
			active->wm[level].plane_en[i] = is_enabled;
-
 
3870
			active->wm[level].plane_res_b[i] =
-
 
3871
					val & PLANE_WM_BLOCKS_MASK;
-
 
3872
			active->wm[level].plane_res_l[i] =
-
 
3873
					(val >> PLANE_WM_LINES_SHIFT) &
-
 
3874
						PLANE_WM_LINES_MASK;
-
 
3875
		} else {
-
 
3876
			active->wm[level].cursor_en = is_enabled;
-
 
3877
			active->wm[level].cursor_res_b =
-
 
3878
					val & PLANE_WM_BLOCKS_MASK;
-
 
3879
			active->wm[level].cursor_res_l =
-
 
3880
					(val >> PLANE_WM_LINES_SHIFT) &
-
 
3881
						PLANE_WM_LINES_MASK;
-
 
3882
		}
-
 
3883
	} else {
-
 
3884
		if (!is_cursor) {
-
 
3885
			active->trans_wm.plane_en[i] = is_enabled;
-
 
3886
			active->trans_wm.plane_res_b[i] =
-
 
3887
					val & PLANE_WM_BLOCKS_MASK;
-
 
3888
			active->trans_wm.plane_res_l[i] =
-
 
3889
					(val >> PLANE_WM_LINES_SHIFT) &
-
 
3890
						PLANE_WM_LINES_MASK;
-
 
3891
		} else {
-
 
3892
			active->trans_wm.cursor_en = is_enabled;
-
 
3893
			active->trans_wm.cursor_res_b =
-
 
3894
					val & PLANE_WM_BLOCKS_MASK;
-
 
3895
			active->trans_wm.cursor_res_l =
-
 
3896
					(val >> PLANE_WM_LINES_SHIFT) &
-
 
3897
						PLANE_WM_LINES_MASK;
-
 
3898
		}
-
 
3899
	}
-
 
3900
}
-
 
3901
 
-
 
3902
static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
-
 
3903
{
-
 
3904
	struct drm_device *dev = crtc->dev;
-
 
3905
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3906
	struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
-
 
3907
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
 
3908
	struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
-
 
3909
	enum pipe pipe = intel_crtc->pipe;
-
 
3910
	int level, i, max_level;
-
 
3911
	uint32_t temp;
-
 
3912
 
-
 
3913
	max_level = ilk_wm_max_level(dev);
-
 
3914
 
-
 
3915
	hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
-
 
3916
 
-
 
3917
	for (level = 0; level <= max_level; level++) {
-
 
3918
		for (i = 0; i < intel_num_planes(intel_crtc); i++)
-
 
3919
			hw->plane[pipe][i][level] =
-
 
3920
					I915_READ(PLANE_WM(pipe, i, level));
-
 
3921
		hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
-
 
3922
	}
-
 
3923
 
-
 
3924
	for (i = 0; i < intel_num_planes(intel_crtc); i++)
-
 
3925
		hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
-
 
3926
	hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
-
 
3927
 
-
 
3928
	if (!intel_crtc_active(crtc))
-
 
3929
		return;
-
 
3930
 
-
 
3931
	hw->dirty[pipe] = true;
-
 
3932
 
-
 
3933
	active->linetime = hw->wm_linetime[pipe];
-
 
3934
 
-
 
3935
	for (level = 0; level <= max_level; level++) {
-
 
3936
		for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-
 
3937
			temp = hw->plane[pipe][i][level];
-
 
3938
			skl_pipe_wm_active_state(temp, active, false,
-
 
3939
						false, i, level);
-
 
3940
		}
-
 
3941
		temp = hw->cursor[pipe][level];
-
 
3942
		skl_pipe_wm_active_state(temp, active, false, true, i, level);
-
 
3943
	}
-
 
3944
 
-
 
3945
	for (i = 0; i < intel_num_planes(intel_crtc); i++) {
-
 
3946
		temp = hw->plane_trans[pipe][i];
-
 
3947
		skl_pipe_wm_active_state(temp, active, true, false, i, 0);
-
 
3948
	}
-
 
3949
 
-
 
3950
	temp = hw->cursor_trans[pipe];
-
 
3951
	skl_pipe_wm_active_state(temp, active, true, true, i, 0);
-
 
3952
}
-
 
3953
 
-
 
3954
void skl_wm_get_hw_state(struct drm_device *dev)
-
 
3955
{
-
 
3956
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3957
	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
-
 
3958
	struct drm_crtc *crtc;
-
 
3959
 
-
 
3960
	skl_ddb_get_hw_state(dev_priv, ddb);
-
 
3961
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-
 
3962
		skl_pipe_wm_get_hw_state(crtc);
2800
}
3963
}
2801
 
3964
 
2802
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3965
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2803
{
3966
{
2804
	struct drm_device *dev = crtc->dev;
3967
	struct drm_device *dev = crtc->dev;
Line 3305... Line 4468...
3305
 
4468
 
3306
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
4469
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
Line 3307... Line 4470...
3307
					dev_priv->rps.min_freq_softlimit);
4470
					dev_priv->rps.min_freq_softlimit);
3308
 
4471
 
3309
	if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
4472
	if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
Line 3310... Line 4473...
3310
				& GENFREQSTATUS) == 0, 5))
4473
				& GENFREQSTATUS) == 0, 100))
Line 3311... Line 4474...
3311
		DRM_ERROR("timed out waiting for Punit\n");
4474
		DRM_ERROR("timed out waiting for Punit\n");
Line 3354... Line 4517...
3354
 
4517
 
3355
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4518
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3356
	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
4519
	WARN_ON(val > dev_priv->rps.max_freq_softlimit);
Line 3357... Line -...
3357
	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
-
 
3358
 
4520
	WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3359
	DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
4521
 
3360
			 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4522
	if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
Line 3361... Line 4523...
3361
			 dev_priv->rps.cur_freq,
4523
		      "Odd GPU freq value\n"))
3362
			 vlv_gpu_freq(dev_priv, val), val);
4524
		val &= ~1;
Line 3363... Line 4525...
3363
 
4525
 
Line 3364... Line 4526...
3364
	if (val != dev_priv->rps.cur_freq)
4526
	if (val != dev_priv->rps.cur_freq)
3365
	vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4527
		vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3366
 
4528
 
Line 3367... Line 4529...
3367
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4529
	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3368
 
-
 
3369
	dev_priv->rps.cur_freq = val;
-
 
3370
	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
-
 
3371
}
-
 
3372
 
-
 
3373
static void gen8_disable_rps_interrupts(struct drm_device *dev)
-
 
3374
{
-
 
3375
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3376
 
-
 
3377
	I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
-
 
3378
	I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
-
 
3379
				   ~dev_priv->pm_rps_events);
-
 
3380
	/* Complete PM interrupt masking here doesn't race with the rps work
-
 
3381
	 * item again unmasking PM interrupts because that is using a different
-
 
3382
	 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
-
 
3383
	 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
-
 
3384
	 * gen8_enable_rps will clean up. */
-
 
3385
 
-
 
3386
	spin_lock_irq(&dev_priv->irq_lock);
-
 
3387
	dev_priv->rps.pm_iir = 0;
-
 
3388
	spin_unlock_irq(&dev_priv->irq_lock);
4530
 
3389
 
4531
	dev_priv->rps.cur_freq = val;
Line 3390... Line 4532...
3390
	I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
4532
	trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3391
}
-
 
3392
 
-
 
3393
static void gen6_disable_rps_interrupts(struct drm_device *dev)
-
 
3394
{
-
 
3395
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3396
 
-
 
3397
	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-
 
3398
	I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
-
 
3399
				~dev_priv->pm_rps_events);
-
 
3400
	/* Complete PM interrupt masking here doesn't race with the rps work
-
 
3401
	 * item again unmasking PM interrupts because that is using a different
-
 
3402
	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
-
 
3403
	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
4533
}
Line 3404... Line 4534...
3404
 
4534
 
3405
	spin_lock_irq(&dev_priv->irq_lock);
4535
static void gen9_disable_rps(struct drm_device *dev)
3406
	dev_priv->rps.pm_iir = 0;
4536
{
Line 3407... Line 4537...
3407
	spin_unlock_irq(&dev_priv->irq_lock);
4537
	struct drm_i915_private *dev_priv = dev->dev_private;
3408
 
4538
 
3409
	I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
-
 
3410
}
-
 
3411
 
-
 
3412
static void gen6_disable_rps(struct drm_device *dev)
-
 
3413
{
-
 
3414
	struct drm_i915_private *dev_priv = dev->dev_private;
4539
	I915_WRITE(GEN6_RC_CONTROL, 0);
Line 3415... Line 4540...
3415
 
4540
}
3416
	I915_WRITE(GEN6_RC_CONTROL, 0);
4541
 
3417
	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4542
static void gen6_disable_rps(struct drm_device *dev)
Line 3418... Line 4543...
3418
 
4543
{
3419
	if (IS_BROADWELL(dev))
-
 
3420
		gen8_disable_rps_interrupts(dev);
-
 
3421
	else
4544
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3422... Line 4545...
3422
	gen6_disable_rps_interrupts(dev);
4545
 
3423
}
4546
	I915_WRITE(GEN6_RC_CONTROL, 0);
3424
 
4547
	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
Line -... Line 4548...
-
 
4548
}
-
 
4549
 
-
 
4550
static void cherryview_disable_rps(struct drm_device *dev)
-
 
4551
{
3425
static void cherryview_disable_rps(struct drm_device *dev)
4552
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3426... Line 4553...
3426
{
4553
 
3427
	struct drm_i915_private *dev_priv = dev->dev_private;
4554
	I915_WRITE(GEN6_RC_CONTROL, 0);
Line 3428... Line 4555...
3428
 
4555
}
3429
	I915_WRITE(GEN6_RC_CONTROL, 0);
4556
 
3430
 
4557
static void valleyview_disable_rps(struct drm_device *dev)
3431
	gen8_disable_rps_interrupts(dev);
4558
{
3432
}
4559
	struct drm_i915_private *dev_priv = dev->dev_private;
3433
 
4560
 
3434
static void valleyview_disable_rps(struct drm_device *dev)
4561
	/* we're doing forcewake before Disabling RC6,
3435
{
4562
	 * This what the BIOS expects when going into suspend */
-
 
4563
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3436
	struct drm_i915_private *dev_priv = dev->dev_private;
4564
 
3437
 
4565
	I915_WRITE(GEN6_RC_CONTROL, 0);
3438
	I915_WRITE(GEN6_RC_CONTROL, 0);
4566
 
3439
 
4567
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
-
 
4568
}
-
 
4569
 
-
 
4570
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
-
 
4571
{
3440
	gen6_disable_rps_interrupts(dev);
4572
	if (IS_VALLEYVIEW(dev)) {
Line 3441... Line 4573...
3441
}
4573
		if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3442
 
4574
			mode = GEN6_RC_CTL_RC6_ENABLE;
3443
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4575
		else
Line 3466... Line 4598...
3466
 
4598
 
3467
	/* Respect the kernel parameter if it is set */
4599
	/* Respect the kernel parameter if it is set */
3468
	if (enable_rc6 >= 0) {
4600
	if (enable_rc6 >= 0) {
Line 3469... Line 4601...
3469
		int mask;
4601
		int mask;
3470
 
4602
 
3471
		if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
4603
		if (HAS_RC6p(dev))
3472
			mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4604
			mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3473
			       INTEL_RC6pp_ENABLE;
4605
			       INTEL_RC6pp_ENABLE;
Line 3494... Line 4626...
3494
int intel_enable_rc6(const struct drm_device *dev)
4626
int intel_enable_rc6(const struct drm_device *dev)
3495
{
4627
{
3496
	return i915.enable_rc6;
4628
	return i915.enable_rc6;
3497
}
4629
}
Line 3498... Line 4630...
3498
 
4630
 
3499
static void gen8_enable_rps_interrupts(struct drm_device *dev)
4631
static void gen6_init_rps_frequencies(struct drm_device *dev)
3500
{
4632
{
-
 
4633
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4634
	uint32_t rp_state_cap;
-
 
4635
	u32 ddcc_status = 0;
Line 3501... Line -...
3501
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3502
 
-
 
3503
	spin_lock_irq(&dev_priv->irq_lock);
4636
	int ret;
3504
	WARN_ON(dev_priv->rps.pm_iir);
-
 
3505
	gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
 
3506
	I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
-
 
3507
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
3508
}
-
 
3509
 
-
 
3510
static void gen6_enable_rps_interrupts(struct drm_device *dev)
-
 
3511
{
-
 
3512
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
3513
 
-
 
3514
	spin_lock_irq(&dev_priv->irq_lock);
-
 
3515
	WARN_ON(dev_priv->rps.pm_iir);
-
 
3516
	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
-
 
3517
	I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
-
 
3518
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
3519
}
-
 
3520
 
-
 
3521
static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
4637
 
3522
{
4638
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3523
	/* All of these values are in units of 50MHz */
4639
	/* All of these values are in units of 50MHz */
3524
	dev_priv->rps.cur_freq		= 0;
-
 
3525
	/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
4640
	dev_priv->rps.cur_freq		= 0;
-
 
4641
	/* static values from HW: RP0 > RP1 > RPn (min_freq) */
3526
	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff;
4642
	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff;
3527
	dev_priv->rps.rp0_freq		= (rp_state_cap >>  0) & 0xff;
-
 
3528
	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff;
-
 
3529
	/* XXX: only BYT has a special efficient freq */
4643
	dev_priv->rps.rp1_freq		= (rp_state_cap >>  8) & 0xff;
3530
	dev_priv->rps.efficient_freq	= dev_priv->rps.rp1_freq;
4644
	dev_priv->rps.min_freq		= (rp_state_cap >> 16) & 0xff;
Line -... Line 4645...
-
 
4645
	/* hw_max = RP0 until we check for overclocking */
-
 
4646
	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
-
 
4647
 
-
 
4648
	dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-
 
4649
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-
 
4650
		ret = sandybridge_pcode_read(dev_priv,
-
 
4651
					HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
-
 
4652
					&ddcc_status);
-
 
4653
		if (0 == ret)
-
 
4654
			dev_priv->rps.efficient_freq =
3531
	/* hw_max = RP0 until we check for overclocking */
4655
				(ddcc_status >> 8) & 0xff;
3532
	dev_priv->rps.max_freq		= dev_priv->rps.rp0_freq;
4656
	}
3533
 
4657
 
Line 3534... Line 4658...
3534
	/* Preserve min/max settings in case of re-init */
4658
	/* Preserve min/max settings in case of re-init */
-
 
4659
	if (dev_priv->rps.max_freq_softlimit == 0)
-
 
4660
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
-
 
4661
 
-
 
4662
	if (dev_priv->rps.min_freq_softlimit == 0) {
-
 
4663
		if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3535
	if (dev_priv->rps.max_freq_softlimit == 0)
4664
			dev_priv->rps.min_freq_softlimit =
-
 
4665
				/* max(RPe, 450 MHz) */
-
 
4666
				max(dev_priv->rps.efficient_freq, (u8) 9);
-
 
4667
		else
-
 
4668
			dev_priv->rps.min_freq_softlimit =
-
 
4669
				dev_priv->rps.min_freq;
-
 
4670
	}
-
 
4671
}
-
 
4672
 
-
 
4673
static void gen9_enable_rps(struct drm_device *dev)
-
 
4674
{
-
 
4675
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
4676
	struct intel_engine_cs *ring;
-
 
4677
	uint32_t rc6_mask = 0;
-
 
4678
	int unused;
-
 
4679
 
-
 
4680
	/* 1a: Software RC state - RC0 */
-
 
4681
	I915_WRITE(GEN6_RC_STATE, 0);
-
 
4682
 
-
 
4683
	/* 1b: Get forcewake during program sequence. Although the driver
-
 
4684
	 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
-
 
4685
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
-
 
4686
 
-
 
4687
	/* 2a: Disable RC states. */
-
 
4688
	I915_WRITE(GEN6_RC_CONTROL, 0);
-
 
4689
 
-
 
4690
	/* 2b: Program RC6 thresholds.*/
-
 
4691
	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
-
 
4692
	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-
 
4693
	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-
 
4694
	for_each_ring(ring, dev_priv, unused)
-
 
4695
		I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
-
 
4696
	I915_WRITE(GEN6_RC_SLEEP, 0);
-
 
4697
	I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
-
 
4698
 
-
 
4699
	/* 3a: Enable RC6 */
-
 
4700
	if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
-
 
4701
		rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-
 
4702
	DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
-
 
4703
			"on" : "off");
-
 
4704
	I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-
 
4705
				   GEN6_RC_CTL_EI_MODE(1) |
3536
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4706
				   rc6_mask);
Line 3537... Line 4707...
3537
 
4707
 
3538
	if (dev_priv->rps.min_freq_softlimit == 0)
4708
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3539
		dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4709
 
3540
}
4710
}
3541
 
4711
 
3542
static void gen8_enable_rps(struct drm_device *dev)
4712
static void gen8_enable_rps(struct drm_device *dev)
Line 3543... Line 4713...
3543
{
4713
{
3544
	struct drm_i915_private *dev_priv = dev->dev_private;
4714
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3554... Line 4724...
3554
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4724
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
Line 3555... Line 4725...
3555
 
4725
 
3556
	/* 2a: Disable RC states. */
4726
	/* 2a: Disable RC states. */
Line 3557... Line 4727...
3557
	I915_WRITE(GEN6_RC_CONTROL, 0);
4727
	I915_WRITE(GEN6_RC_CONTROL, 0);
3558
 
4728
 
Line 3559... Line 4729...
3559
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4729
	/* Initialize rps frequencies */
3560
	parse_rp_state_cap(dev_priv, rp_state_cap);
4730
	gen6_init_rps_frequencies(dev);
3561
 
4731
 
3562
	/* 2b: Program RC6 thresholds.*/
4732
	/* 2b: Program RC6 thresholds.*/
Line 3613... Line 4783...
3613
		   GEN6_RP_UP_BUSY_AVG |
4783
		   GEN6_RP_UP_BUSY_AVG |
3614
		   GEN6_RP_DOWN_IDLE_AVG);
4784
		   GEN6_RP_DOWN_IDLE_AVG);
Line 3615... Line 4785...
3615
 
4785
 
Line 3616... Line 4786...
3616
	/* 6: Ring frequency + overclocking (our driver does this later */
4786
	/* 6: Ring frequency + overclocking (our driver does this later */
3617
 
-
 
3618
	gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
4787
 
Line 3619... Line 4788...
3619
 
4788
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
3620
	gen8_enable_rps_interrupts(dev);
4789
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
Line 3621... Line 4790...
3621
 
4790
 
3622
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4791
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3623
}
4792
}
3624
 
4793
 
3625
static void gen6_enable_rps(struct drm_device *dev)
-
 
3626
{
-
 
3627
	struct drm_i915_private *dev_priv = dev->dev_private;
4794
static void gen6_enable_rps(struct drm_device *dev)
3628
	struct intel_engine_cs *ring;
4795
{
3629
	u32 rp_state_cap;
4796
	struct drm_i915_private *dev_priv = dev->dev_private;
3630
	u32 gt_perf_status;
4797
	struct intel_engine_cs *ring;
Line 3649... Line 4816...
3649
		I915_WRITE(GTFIFODBG, gtfifodbg);
4816
		I915_WRITE(GTFIFODBG, gtfifodbg);
3650
	}
4817
	}
Line 3651... Line 4818...
3651
 
4818
 
Line 3652... Line 4819...
3652
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4819
	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3653
 
-
 
3654
	rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
-
 
3655
	gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
4820
 
Line 3656... Line 4821...
3656
 
4821
	/* Initialize rps frequencies */
3657
	parse_rp_state_cap(dev_priv, rp_state_cap);
4822
	gen6_init_rps_frequencies(dev);
Line 3658... Line 4823...
3658
 
4823
 
Line 3715... Line 4880...
3715
	}
4880
	}
Line 3716... Line 4881...
3716
 
4881
 
3717
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
4882
	dev_priv->rps.power = HIGH_POWER; /* force a reset */
Line 3718... Line -...
3718
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
-
 
3719
 
-
 
3720
	gen6_enable_rps_interrupts(dev);
4883
	gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3721
 
4884
 
3722
	rc6vids = 0;
4885
	rc6vids = 0;
3723
	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4886
	ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3724
	if (IS_GEN6(dev) && ret) {
4887
	if (IS_GEN6(dev) && ret) {
Line 3764... Line 4927...
3764
	/*
4927
	/*
3765
	 * For each potential GPU frequency, load a ring frequency we'd like
4928
	 * For each potential GPU frequency, load a ring frequency we'd like
3766
	 * to use for memory access.  We do this by specifying the IA frequency
4929
	 * to use for memory access.  We do this by specifying the IA frequency
3767
	 * the PCU should use as a reference to determine the ring frequency.
4930
	 * the PCU should use as a reference to determine the ring frequency.
3768
	 */
4931
	 */
3769
	for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
4932
	for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
3770
	     gpu_freq--) {
4933
	     gpu_freq--) {
3771
		int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
4934
		int diff = dev_priv->rps.max_freq - gpu_freq;
3772
		unsigned int ia_freq = 0, ring_freq = 0;
4935
		unsigned int ia_freq = 0, ring_freq = 0;
Line 3773... Line 4936...
3773
 
4936
 
3774
		if (INTEL_INFO(dev)->gen >= 8) {
4937
		if (INTEL_INFO(dev)->gen >= 8) {
3775
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
4938
			/* max(2 * GT, DDR). NB: GT is 50MHz units */
Line 3921... Line 5084...
3921
 
5084
 
Line 3922... Line 5085...
3922
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5085
	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3923
 
5086
 
-
 
5087
	pcbr = I915_READ(VLV_PCBR);
3924
	pcbr = I915_READ(VLV_PCBR);
5088
	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
3925
	if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5089
		DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
Line 3926... Line 5090...
3926
		paddr = (dev_priv->mm.stolen_base +
5090
		paddr = (dev_priv->mm.stolen_base +
3927
			 (gtt->stolen_size - pctx_size));
5091
			 (gtt->stolen_size - pctx_size));
3928
 
5092
 
-
 
5093
		pctx_paddr = (paddr & (~4095));
-
 
5094
		I915_WRITE(VLV_PCBR, pctx_paddr);
3929
		pctx_paddr = (paddr & (~4095));
5095
	}
Line 3930... Line 5096...
3930
		I915_WRITE(VLV_PCBR, pctx_paddr);
5096
 
3931
	}
5097
	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
3932
}
5098
}
Line 3952... Line 5118...
3952
								      I915_GTT_OFFSET_NONE,
5118
								      I915_GTT_OFFSET_NONE,
3953
								      pctx_size);
5119
								      pctx_size);
3954
		goto out;
5120
		goto out;
3955
	}
5121
	}
Line -... Line 5122...
-
 
5122
 
-
 
5123
	DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
3956
 
5124
 
3957
	/*
5125
	/*
3958
	 * From the Gunit register HAS:
5126
	 * From the Gunit register HAS:
3959
	 * The Gfx driver is expected to program this register and ensure
5127
	 * The Gfx driver is expected to program this register and ensure
3960
	 * proper allocation within Gfx stolen memory.  For example, this
5128
	 * proper allocation within Gfx stolen memory.  For example, this
Line 3970... Line 5138...
3970
 
5138
 
3971
	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5139
	pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
Line 3972... Line 5140...
3972
	I915_WRITE(VLV_PCBR, pctx_paddr);
5140
	I915_WRITE(VLV_PCBR, pctx_paddr);
-
 
5141
 
3973
 
5142
out:
3974
out:
5143
	DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
Line 3975... Line 5144...
3975
	dev_priv->vlv_pctx = pctx;
5144
	dev_priv->vlv_pctx = pctx;
3976
}
5145
}
Line 3987... Line 5156...
3987
}
5156
}
Line 3988... Line 5157...
3988
 
5157
 
3989
static void valleyview_init_gt_powersave(struct drm_device *dev)
5158
static void valleyview_init_gt_powersave(struct drm_device *dev)
3990
{
5159
{
-
 
5160
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 3991... Line 5161...
3991
	struct drm_i915_private *dev_priv = dev->dev_private;
5161
	u32 val;
Line 3992... Line 5162...
3992
 
5162
 
Line -... Line 5163...
-
 
5163
	valleyview_setup_pctx(dev);
-
 
5164
 
-
 
5165
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5166
 
-
 
5167
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
5168
	switch ((val >> 6) & 3) {
-
 
5169
	case 0:
-
 
5170
	case 1:
-
 
5171
		dev_priv->mem_freq = 800;
-
 
5172
		break;
-
 
5173
	case 2:
-
 
5174
		dev_priv->mem_freq = 1066;
-
 
5175
		break;
-
 
5176
	case 3:
-
 
5177
		dev_priv->mem_freq = 1333;
3993
	valleyview_setup_pctx(dev);
5178
		break;
3994
 
5179
	}
3995
	mutex_lock(&dev_priv->rps.hw_lock);
5180
	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
3996
 
5181
 
3997
	dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5182
	dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
Line 4026... Line 5211...
4026
}
5211
}
Line 4027... Line 5212...
4027
 
5212
 
4028
static void cherryview_init_gt_powersave(struct drm_device *dev)
5213
static void cherryview_init_gt_powersave(struct drm_device *dev)
4029
{
5214
{
-
 
5215
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4030... Line 5216...
4030
	struct drm_i915_private *dev_priv = dev->dev_private;
5216
	u32 val;
Line 4031... Line 5217...
4031
 
5217
 
Line -... Line 5218...
-
 
5218
	cherryview_setup_pctx(dev);
-
 
5219
 
-
 
5220
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5221
 
-
 
5222
	mutex_lock(&dev_priv->dpio_lock);
-
 
5223
	val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
-
 
5224
	mutex_unlock(&dev_priv->dpio_lock);
-
 
5225
 
-
 
5226
	switch ((val >> 2) & 0x7) {
-
 
5227
	case 0:
-
 
5228
	case 1:
-
 
5229
		dev_priv->rps.cz_freq = 200;
-
 
5230
		dev_priv->mem_freq = 1600;
-
 
5231
		break;
-
 
5232
	case 2:
-
 
5233
		dev_priv->rps.cz_freq = 267;
-
 
5234
		dev_priv->mem_freq = 1600;
-
 
5235
		break;
-
 
5236
	case 3:
-
 
5237
		dev_priv->rps.cz_freq = 333;
-
 
5238
		dev_priv->mem_freq = 2000;
-
 
5239
		break;
-
 
5240
	case 4:
-
 
5241
		dev_priv->rps.cz_freq = 320;
-
 
5242
		dev_priv->mem_freq = 1600;
-
 
5243
		break;
-
 
5244
	case 5:
-
 
5245
		dev_priv->rps.cz_freq = 400;
-
 
5246
		dev_priv->mem_freq = 1600;
4032
	cherryview_setup_pctx(dev);
5247
		break;
4033
 
5248
	}
4034
	mutex_lock(&dev_priv->rps.hw_lock);
5249
	DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4035
 
5250
 
4036
	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5251
	dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
Line 4052... Line 5267...
4052
	dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
5267
	dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4053
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5268
	DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4054
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5269
			 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4055
			 dev_priv->rps.min_freq);
5270
			 dev_priv->rps.min_freq);
Line -... Line 5271...
-
 
5271
 
-
 
5272
	WARN_ONCE((dev_priv->rps.max_freq |
-
 
5273
		   dev_priv->rps.efficient_freq |
-
 
5274
		   dev_priv->rps.rp1_freq |
-
 
5275
		   dev_priv->rps.min_freq) & 1,
-
 
5276
		  "Odd GPU freq values\n");
4056
 
5277
 
4057
	/* Preserve min/max settings in case of re-init */
5278
	/* Preserve min/max settings in case of re-init */
4058
	if (dev_priv->rps.max_freq_softlimit == 0)
5279
	if (dev_priv->rps.max_freq_softlimit == 0)
Line 4059... Line 5280...
4059
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5280
		dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
Line 4109... Line 5330...
4109
				      VLV_RENDER_RC6_COUNT_EN));
5330
				      VLV_RENDER_RC6_COUNT_EN));
Line 4110... Line 5331...
4110
 
5331
 
4111
	/* For now we assume BIOS is allocating and populating the PCBR  */
5332
	/* For now we assume BIOS is allocating and populating the PCBR  */
Line 4112... Line -...
4112
	pcbr = I915_READ(VLV_PCBR);
-
 
4113
 
-
 
4114
	DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
5333
	pcbr = I915_READ(VLV_PCBR);
4115
 
5334
 
4116
	/* 3: Enable RC6 */
5335
	/* 3: Enable RC6 */
4117
	if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5336
	if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
Line 4140... Line 5359...
4140
		   GEN6_RP_UP_BUSY_AVG |
5359
		   GEN6_RP_UP_BUSY_AVG |
4141
		   GEN6_RP_DOWN_IDLE_AVG);
5360
		   GEN6_RP_DOWN_IDLE_AVG);
Line 4142... Line 5361...
4142
 
5361
 
Line -... Line 5362...
-
 
5362
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
5363
 
-
 
5364
	/* RPS code assumes GPLL is used */
4143
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5365
	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
4144
 
5366
 
Line 4145... Line 5367...
4145
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
5367
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4146
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5368
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4147
 
5369
 
Line 4154... Line 5376...
4154
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5376
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4155
			 dev_priv->rps.efficient_freq);
5377
			 dev_priv->rps.efficient_freq);
Line 4156... Line 5378...
4156
 
5378
 
Line 4157... Line -...
4157
	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
-
 
4158
 
-
 
4159
	gen8_enable_rps_interrupts(dev);
5379
	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4160
 
5380
 
Line 4161... Line 5381...
4161
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5381
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4162
}
5382
}
Line 4220... Line 5440...
4220
 
5440
 
Line 4221... Line 5441...
4221
	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5441
	I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
Line -... Line 5442...
-
 
5442
 
-
 
5443
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
5444
 
4222
 
5445
	/* RPS code assumes GPLL is used */
4223
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5446
	WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
Line 4224... Line 5447...
4224
 
5447
 
4225
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
5448
	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4226
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5449
	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
Line 4234... Line 5457...
4234
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5457
			 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4235
			 dev_priv->rps.efficient_freq);
5458
			 dev_priv->rps.efficient_freq);
Line 4236... Line 5459...
4236
 
5459
 
Line 4237... Line -...
4237
	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
-
 
4238
 
-
 
4239
	gen6_enable_rps_interrupts(dev);
5460
	valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4240
 
5461
 
Line 4241... Line 5462...
4241
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5462
	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4242
}
5463
}
Line 4982... Line 6203...
4982
		return;
6203
		return;
4983
	else if (IS_VALLEYVIEW(dev))
6204
	else if (IS_VALLEYVIEW(dev))
4984
		valleyview_cleanup_gt_powersave(dev);
6205
		valleyview_cleanup_gt_powersave(dev);
4985
}
6206
}
Line -... Line 6207...
-
 
6207
 
-
 
6208
static void gen6_suspend_rps(struct drm_device *dev)
-
 
6209
{
-
 
6210
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6211
 
-
 
6212
//   flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
 
6213
 
-
 
6214
	/*
-
 
6215
	 * TODO: disable RPS interrupts on GEN9+ too once RPS support
-
 
6216
	 * is added for it.
-
 
6217
	 */
-
 
6218
	if (INTEL_INFO(dev)->gen < 9)
-
 
6219
		gen6_disable_rps_interrupts(dev);
-
 
6220
}
4986
 
6221
 
4987
/**
6222
/**
4988
 * intel_suspend_gt_powersave - suspend PM work and helper threads
6223
 * intel_suspend_gt_powersave - suspend PM work and helper threads
4989
 * @dev: drm device
6224
 * @dev: drm device
4990
 *
6225
 *
Line 4994... Line 6229...
4994
 */
6229
 */
4995
void intel_suspend_gt_powersave(struct drm_device *dev)
6230
void intel_suspend_gt_powersave(struct drm_device *dev)
4996
{
6231
{
4997
	struct drm_i915_private *dev_priv = dev->dev_private;
6232
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 4998... Line -...
4998
 
-
 
4999
	/* Interrupts should be disabled already to avoid re-arming. */
6233
 
5000
	WARN_ON(intel_irqs_enabled(dev_priv));
6234
	if (INTEL_INFO(dev)->gen < 6)
5001
 
-
 
Line 5002... Line 6235...
5002
//	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6235
		return;
Line 5003... Line 6236...
5003
 
6236
 
5004
	cancel_work_sync(&dev_priv->rps.work);
6237
	gen6_suspend_rps(dev);
5005
 
6238
 
Line 5006... Line 6239...
5006
	/* Force GPU to min freq during suspend */
6239
	/* Force GPU to min freq during suspend */
5007
	gen6_rps_idle(dev_priv);
6240
	gen6_rps_idle(dev_priv);
5008
}
6241
}
Line 5009... Line -...
5009
 
-
 
5010
void intel_disable_gt_powersave(struct drm_device *dev)
-
 
5011
{
-
 
5012
	struct drm_i915_private *dev_priv = dev->dev_private;
6242
 
5013
 
6243
void intel_disable_gt_powersave(struct drm_device *dev)
5014
	/* Interrupts should be disabled already to avoid re-arming. */
6244
{
5015
	WARN_ON(intel_irqs_enabled(dev_priv));
6245
	struct drm_i915_private *dev_priv = dev->dev_private;
5016
 
6246
 
Line 5017... Line 6247...
5017
	if (IS_IRONLAKE_M(dev)) {
6247
	if (IS_IRONLAKE_M(dev)) {
-
 
6248
		ironlake_disable_drps(dev);
-
 
6249
		ironlake_disable_rc6(dev);
5018
		ironlake_disable_drps(dev);
6250
	} else if (INTEL_INFO(dev)->gen >= 6) {
5019
		ironlake_disable_rc6(dev);
6251
		intel_suspend_gt_powersave(dev);
5020
	} else if (INTEL_INFO(dev)->gen >= 6) {
6252
 
5021
		intel_suspend_gt_powersave(dev);
6253
		mutex_lock(&dev_priv->rps.hw_lock);
5022
 
6254
		if (INTEL_INFO(dev)->gen >= 9)
5023
		mutex_lock(&dev_priv->rps.hw_lock);
6255
			gen9_disable_rps(dev);
-
 
6256
		else if (IS_CHERRYVIEW(dev))
5024
		if (IS_CHERRYVIEW(dev))
6257
			cherryview_disable_rps(dev);
5025
			cherryview_disable_rps(dev);
6258
		else if (IS_VALLEYVIEW(dev))
5026
		else if (IS_VALLEYVIEW(dev))
6259
			valleyview_disable_rps(dev);
5027
			valleyview_disable_rps(dev);
6260
		else
Line 5039... Line 6272...
5039
			     rps.delayed_resume_work.work);
6272
			     rps.delayed_resume_work.work);
5040
	struct drm_device *dev = dev_priv->dev;
6273
	struct drm_device *dev = dev_priv->dev;
Line 5041... Line 6274...
5041
 
6274
 
Line -... Line 6275...
-
 
6275
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6276
 
-
 
6277
	/*
-
 
6278
	 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
-
 
6279
	 * added for it.
-
 
6280
	 */
-
 
6281
	if (INTEL_INFO(dev)->gen < 9)
5042
	mutex_lock(&dev_priv->rps.hw_lock);
6282
		gen6_reset_rps_interrupts(dev);
5043
 
6283
 
5044
	if (IS_CHERRYVIEW(dev)) {
6284
	if (IS_CHERRYVIEW(dev)) {
5045
		cherryview_enable_rps(dev);
6285
		cherryview_enable_rps(dev);
-
 
6286
	} else if (IS_VALLEYVIEW(dev)) {
-
 
6287
		valleyview_enable_rps(dev);
5046
	} else if (IS_VALLEYVIEW(dev)) {
6288
	} else if (INTEL_INFO(dev)->gen >= 9) {
5047
		valleyview_enable_rps(dev);
6289
		gen9_enable_rps(dev);
5048
	} else if (IS_BROADWELL(dev)) {
6290
	} else if (IS_BROADWELL(dev)) {
5049
		gen8_enable_rps(dev);
6291
		gen8_enable_rps(dev);
5050
		__gen6_update_ring_freq(dev);
6292
		__gen6_update_ring_freq(dev);
5051
	} else {
6293
	} else {
5052
	gen6_enable_rps(dev);
6294
	gen6_enable_rps(dev);
5053
		__gen6_update_ring_freq(dev);
6295
		__gen6_update_ring_freq(dev);
-
 
6296
	}
-
 
6297
	dev_priv->rps.enabled = true;
-
 
6298
 
-
 
6299
	if (INTEL_INFO(dev)->gen < 9)
5054
	}
6300
		gen6_enable_rps_interrupts(dev);
Line 5055... Line 6301...
5055
	dev_priv->rps.enabled = true;
6301
 
5056
	mutex_unlock(&dev_priv->rps.hw_lock);
6302
	mutex_unlock(&dev_priv->rps.hw_lock);
Line 5089... Line 6335...
5089
 
6335
 
5090
void intel_reset_gt_powersave(struct drm_device *dev)
6336
void intel_reset_gt_powersave(struct drm_device *dev)
5091
{
6337
{
Line -... Line 6338...
-
 
6338
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6339
 
-
 
6340
	if (INTEL_INFO(dev)->gen < 6)
-
 
6341
		return;
5092
	struct drm_i915_private *dev_priv = dev->dev_private;
6342
 
5093
 
-
 
5094
	dev_priv->rps.enabled = false;
6343
	gen6_suspend_rps(dev);
Line 5095... Line 6344...
5095
	intel_enable_gt_powersave(dev);
6344
	dev_priv->rps.enabled = false;
5096
}
6345
}
5097
 
6346
 
Line 5110... Line 6359...
5110
static void g4x_disable_trickle_feed(struct drm_device *dev)
6359
static void g4x_disable_trickle_feed(struct drm_device *dev)
5111
{
6360
{
5112
	struct drm_i915_private *dev_priv = dev->dev_private;
6361
	struct drm_i915_private *dev_priv = dev->dev_private;
5113
	int pipe;
6362
	int pipe;
Line 5114... Line 6363...
5114
 
6363
 
5115
	for_each_pipe(pipe) {
6364
	for_each_pipe(dev_priv, pipe) {
5116
		I915_WRITE(DSPCNTR(pipe),
6365
		I915_WRITE(DSPCNTR(pipe),
5117
			   I915_READ(DSPCNTR(pipe)) |
6366
			   I915_READ(DSPCNTR(pipe)) |
5118
			   DISPPLANE_TRICKLE_FEED_DISABLE);
6367
			   DISPPLANE_TRICKLE_FEED_DISABLE);
5119
		intel_flush_primary_plane(dev_priv, pipe);
6368
		intel_flush_primary_plane(dev_priv, pipe);
Line 5225... Line 6474...
5225
	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6474
	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5226
		   DPLS_EDP_PPS_FIX_DIS);
6475
		   DPLS_EDP_PPS_FIX_DIS);
5227
	/* The below fixes the weird display corruption, a few pixels shifted
6476
	/* The below fixes the weird display corruption, a few pixels shifted
5228
	 * downward, on (only) LVDS of some HP laptops with IVY.
6477
	 * downward, on (only) LVDS of some HP laptops with IVY.
5229
	 */
6478
	 */
5230
	for_each_pipe(pipe) {
6479
	for_each_pipe(dev_priv, pipe) {
5231
		val = I915_READ(TRANS_CHICKEN2(pipe));
6480
		val = I915_READ(TRANS_CHICKEN2(pipe));
5232
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6481
		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5233
		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6482
		val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5234
		if (dev_priv->vbt.fdi_rx_polarity_inverted)
6483
		if (dev_priv->vbt.fdi_rx_polarity_inverted)
5235
			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6484
			val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
Line 5237... Line 6486...
5237
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6486
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5238
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6487
		val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5239
		I915_WRITE(TRANS_CHICKEN2(pipe), val);
6488
		I915_WRITE(TRANS_CHICKEN2(pipe), val);
5240
	}
6489
	}
5241
	/* WADP0ClockGatingDisable */
6490
	/* WADP0ClockGatingDisable */
5242
	for_each_pipe(pipe) {
6491
	for_each_pipe(dev_priv, pipe) {
5243
		I915_WRITE(TRANS_CHICKEN1(pipe),
6492
		I915_WRITE(TRANS_CHICKEN1(pipe),
5244
			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6493
			   TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5245
	}
6494
	}
5246
}
6495
}
Line 5269... Line 6518...
5269
 
6518
 
5270
	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6519
	/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5271
	I915_WRITE(_3D_CHICKEN,
6520
	I915_WRITE(_3D_CHICKEN,
Line 5272... Line -...
5272
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
-
 
5273
 
-
 
5274
	/* WaSetupGtModeTdRowDispatch:snb */
-
 
5275
	if (IS_SNB_GT1(dev))
-
 
5276
		I915_WRITE(GEN6_GT_MODE,
-
 
5277
			   _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
6521
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5278
 
6522
 
Line 5279... Line 6523...
5279
	/* WaDisable_RenderCache_OperationalFlush:snb */
6523
	/* WaDisable_RenderCache_OperationalFlush:snb */
5280
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6524
	I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
Line 5286... Line 6530...
5286
	 * Note that PS/WM thread counts depend on the WIZ hashing
6530
	 * Note that PS/WM thread counts depend on the WIZ hashing
5287
	 * disable bit, which we don't touch here, but it's good
6531
	 * disable bit, which we don't touch here, but it's good
5288
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6532
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5289
	 */
6533
	 */
5290
	I915_WRITE(GEN6_GT_MODE,
6534
	I915_WRITE(GEN6_GT_MODE,
5291
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
6535
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Line 5292... Line 6536...
5292
 
6536
 
Line 5293... Line 6537...
5293
	ilk_init_lp_watermarks(dev);
6537
	ilk_init_lp_watermarks(dev);
5294
 
6538
 
Line 5405... Line 6649...
5405
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6649
		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5406
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6650
		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5407
	}
6651
	}
5408
}
6652
}
Line 5409... Line 6653...
5409
 
6653
 
5410
static void gen8_init_clock_gating(struct drm_device *dev)
6654
static void broadwell_init_clock_gating(struct drm_device *dev)
5411
{
6655
{
5412
	struct drm_i915_private *dev_priv = dev->dev_private;
6656
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 5413... Line 6657...
5413
	enum pipe pipe;
6657
	enum pipe pipe;
5414
 
6658
 
5415
	I915_WRITE(WM3_LP_ILK, 0);
6659
	I915_WRITE(WM3_LP_ILK, 0);
Line 5416... Line -...
5416
	I915_WRITE(WM2_LP_ILK, 0);
-
 
5417
	I915_WRITE(WM1_LP_ILK, 0);
-
 
5418
 
-
 
5419
	/* FIXME(BDW): Check all the w/a, some might only apply to
-
 
5420
	 * pre-production hw. */
-
 
5421
 
-
 
5422
	/* WaDisablePartialInstShootdown:bdw */
-
 
5423
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5424
		   _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
-
 
5425
 
-
 
5426
	/* WaDisableThreadStallDopClockGating:bdw */
-
 
5427
	/* FIXME: Unclear whether we really need this on production bdw. */
-
 
5428
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5429
		   _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
-
 
5430
 
-
 
5431
	/*
-
 
5432
	 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
-
 
5433
	 * pre-production hardware
-
 
5434
	 */
-
 
5435
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
5436
		   _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
-
 
5437
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
5438
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
-
 
5439
	I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
-
 
5440
 
-
 
5441
	I915_WRITE(_3D_CHICKEN3,
-
 
5442
		   _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
-
 
5443
 
-
 
5444
	I915_WRITE(COMMON_SLICE_CHICKEN2,
-
 
5445
		   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
-
 
5446
 
-
 
5447
	I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-
 
5448
		   _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
-
 
5449
 
-
 
5450
	/* WaDisableDopClockGating:bdw May not be needed for production */
-
 
5451
	I915_WRITE(GEN7_ROW_CHICKEN2,
6660
	I915_WRITE(WM2_LP_ILK, 0);
5452
		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6661
	I915_WRITE(WM1_LP_ILK, 0);
Line 5453... Line 6662...
5453
 
6662
 
5454
	/* WaSwitchSolVfFArbitrationPriority:bdw */
6663
	/* WaSwitchSolVfFArbitrationPriority:bdw */
5455
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6664
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
Line 5456... Line 6665...
5456
 
6665
 
5457
	/* WaPsrDPAMaskVBlankInSRD:bdw */
6666
	/* WaPsrDPAMaskVBlankInSRD:bdw */
5458
	I915_WRITE(CHICKEN_PAR1_1,
6667
	I915_WRITE(CHICKEN_PAR1_1,
5459
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6668
		   I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5460
 
6669
 
5461
	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6670
	/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
Line 5462... Line -...
5462
	for_each_pipe(pipe) {
-
 
5463
		I915_WRITE(CHICKEN_PIPESL_1(pipe),
-
 
5464
			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
-
 
5465
			   BDW_DPRS_MASK_VBLANK_SRD);
-
 
5466
	}
-
 
5467
 
-
 
5468
	/* Use Force Non-Coherent whenever executing a 3D context. This is a
-
 
5469
	 * workaround for for a possible hang in the unlikely event a TLB
-
 
5470
	 * invalidation occurs during a PSD flush.
6671
	for_each_pipe(dev_priv, pipe) {
5471
	 */
6672
		I915_WRITE(CHICKEN_PIPESL_1(pipe),
5472
	I915_WRITE(HDC_CHICKEN0,
6673
			   I915_READ(CHICKEN_PIPESL_1(pipe)) |
5473
		   I915_READ(HDC_CHICKEN0) |
6674
			   BDW_DPRS_MASK_VBLANK_SRD);
5474
		   _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
6675
	}
Line 5475... Line -...
5475
 
-
 
5476
	/* WaVSRefCountFullforceMissDisable:bdw */
-
 
5477
	/* WaDSRefCountFullforceMissDisable:bdw */
-
 
5478
	I915_WRITE(GEN7_FF_THREAD_MODE,
-
 
5479
		   I915_READ(GEN7_FF_THREAD_MODE) &
-
 
5480
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
-
 
5481
 
-
 
5482
	/*
-
 
5483
	 * BSpec recommends 8x4 when MSAA is used,
-
 
5484
	 * however in practice 16x4 seems fastest.
-
 
5485
	 *
-
 
5486
	 * Note that PS/WM thread counts depend on the WIZ hashing
6676
 
5487
	 * disable bit, which we don't touch here, but it's good
6677
	/* WaVSRefCountFullforceMissDisable:bdw */
Line 5488... Line 6678...
5488
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6678
	/* WaDSRefCountFullforceMissDisable:bdw */
5489
	 */
6679
	I915_WRITE(GEN7_FF_THREAD_MODE,
5490
	I915_WRITE(GEN7_GT_MODE,
6680
		   I915_READ(GEN7_FF_THREAD_MODE) &
Line 5491... Line 6681...
5491
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
6681
		   ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5492
 
-
 
5493
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-
 
5494
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6682
 
Line 5495... Line 6683...
5495
 
6683
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5496
	/* WaDisableSDEUnitClockGating:bdw */
6684
		   _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5497
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6685
 
Line 5540... Line 6728...
5540
	 * Note that PS/WM thread counts depend on the WIZ hashing
6728
	 * Note that PS/WM thread counts depend on the WIZ hashing
5541
	 * disable bit, which we don't touch here, but it's good
6729
	 * disable bit, which we don't touch here, but it's good
5542
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6730
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5543
	 */
6731
	 */
5544
	I915_WRITE(GEN7_GT_MODE,
6732
	I915_WRITE(GEN7_GT_MODE,
5545
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
6733
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Line 5546... Line 6734...
5546
 
6734
 
5547
	/* WaSwitchSolVfFArbitrationPriority:hsw */
6735
	/* WaSwitchSolVfFArbitrationPriority:hsw */
Line 5548... Line 6736...
5548
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6736
	I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
Line 5637... Line 6825...
5637
	 * Note that PS/WM thread counts depend on the WIZ hashing
6825
	 * Note that PS/WM thread counts depend on the WIZ hashing
5638
	 * disable bit, which we don't touch here, but it's good
6826
	 * disable bit, which we don't touch here, but it's good
5639
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6827
	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5640
	 */
6828
	 */
5641
	I915_WRITE(GEN7_GT_MODE,
6829
	I915_WRITE(GEN7_GT_MODE,
5642
		   GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
6830
		   _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
Line 5643... Line 6831...
5643
 
6831
 
5644
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6832
	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5645
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
6833
	snpcr &= ~GEN6_MBC_SNPCR_MASK;
5646
	snpcr |= GEN6_MBC_SNPCR_MED;
6834
	snpcr |= GEN6_MBC_SNPCR_MED;
Line 5653... Line 6841...
5653
}
6841
}
Line 5654... Line 6842...
5654
 
6842
 
5655
static void valleyview_init_clock_gating(struct drm_device *dev)
6843
static void valleyview_init_clock_gating(struct drm_device *dev)
5656
{
6844
{
5657
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5658
	u32 val;
-
 
5659
 
-
 
5660
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5661
	val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-
 
5662
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
5663
	switch ((val >> 6) & 3) {
-
 
5664
	case 0:
-
 
5665
	case 1:
-
 
5666
		dev_priv->mem_freq = 800;
-
 
5667
		break;
-
 
5668
	case 2:
-
 
5669
		dev_priv->mem_freq = 1066;
-
 
5670
		break;
-
 
5671
	case 3:
-
 
5672
		dev_priv->mem_freq = 1333;
-
 
5673
		break;
-
 
5674
	}
-
 
Line 5675... Line 6845...
5675
	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
6845
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 5676... Line 6846...
5676
 
6846
 
5677
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6847
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
Line 5746... Line 6916...
5746
}
6916
}
Line 5747... Line 6917...
5747
 
6917
 
5748
static void cherryview_init_clock_gating(struct drm_device *dev)
6918
static void cherryview_init_clock_gating(struct drm_device *dev)
5749
{
6919
{
5750
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
5751
	u32 val;
-
 
5752
 
-
 
5753
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
5754
	val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
-
 
5755
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
5756
	switch ((val >> 2) & 0x7) {
-
 
5757
	case 0:
-
 
5758
	case 1:
-
 
5759
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
-
 
5760
			dev_priv->mem_freq = 1600;
-
 
5761
			break;
-
 
5762
	case 2:
-
 
5763
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
-
 
5764
			dev_priv->mem_freq = 1600;
-
 
5765
			break;
-
 
5766
	case 3:
-
 
5767
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
-
 
5768
			dev_priv->mem_freq = 2000;
-
 
5769
			break;
-
 
5770
	case 4:
-
 
5771
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
-
 
5772
			dev_priv->mem_freq = 1600;
-
 
5773
			break;
-
 
5774
	case 5:
-
 
5775
			dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
-
 
5776
			dev_priv->mem_freq = 1600;
-
 
5777
			break;
-
 
5778
	}
-
 
Line 5779... Line 6920...
5779
	DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
6920
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 5780... Line 6921...
5780
 
6921
 
Line 5781... Line -...
5781
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
 
5782
 
-
 
5783
	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
-
 
5784
 
-
 
5785
	/* WaDisablePartialInstShootdown:chv */
-
 
5786
	I915_WRITE(GEN8_ROW_CHICKEN,
-
 
5787
		   _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
-
 
5788
 
-
 
5789
	/* WaDisableThreadStallDopClockGating:chv */
6922
	I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5790
	I915_WRITE(GEN8_ROW_CHICKEN,
6923
 
5791
		   _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
6924
	I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5792
 
6925
 
5793
	/* WaVSRefCountFullforceMissDisable:chv */
6926
	/* WaVSRefCountFullforceMissDisable:chv */
Line 5805... Line 6938...
5805
		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6938
		   GEN6_CSUNIT_CLOCK_GATE_DISABLE);
Line 5806... Line 6939...
5806
 
6939
 
5807
	/* WaDisableSDEUnitClockGating:chv */
6940
	/* WaDisableSDEUnitClockGating:chv */
5808
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6941
	I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5809
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
-
 
5810
 
-
 
5811
	/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
-
 
5812
	I915_WRITE(HALF_SLICE_CHICKEN3,
-
 
5813
		   _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
-
 
5814
 
-
 
5815
	/* WaDisableGunitClockGating:chv (pre-production hw) */
-
 
5816
	I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
-
 
5817
		   GINT_DIS);
-
 
5818
 
-
 
5819
	/* WaDisableFfDopClockGating:chv (pre-production hw) */
-
 
5820
	I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
-
 
5821
		   _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
-
 
5822
 
-
 
5823
	/* WaDisableDopClockGating:chv (pre-production hw) */
-
 
5824
	I915_WRITE(GEN7_ROW_CHICKEN2,
-
 
5825
		   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
-
 
5826
	I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
-
 
5827
		   GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
6942
		   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
Line 5828... Line 6943...
5828
}
6943
}
5829
 
6944
 
5830
static void g4x_init_clock_gating(struct drm_device *dev)
6945
static void g4x_init_clock_gating(struct drm_device *dev)
Line 5905... Line 7020...
5905
	/* interrupts should cause a wake up from C3 */
7020
	/* interrupts should cause a wake up from C3 */
5906
	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
7021
	I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
Line 5907... Line 7022...
5907
 
7022
 
5908
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7023
	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-
 
7024
	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
-
 
7025
 
-
 
7026
	I915_WRITE(MI_ARB_STATE,
5909
	I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7027
		   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
Line 5910... Line 7028...
5910
}
7028
}
5911
 
7029
 
5912
static void i85x_init_clock_gating(struct drm_device *dev)
7030
static void i85x_init_clock_gating(struct drm_device *dev)
Line 5916... Line 7034...
5916
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7034
	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
Line 5917... Line 7035...
5917
 
7035
 
5918
	/* interrupts should cause a wake up from C3 */
7036
	/* interrupts should cause a wake up from C3 */
5919
	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7037
	I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
-
 
7038
		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
-
 
7039
 
-
 
7040
	I915_WRITE(MEM_MODE,
5920
		   _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7041
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
Line 5921... Line 7042...
5921
}
7042
}
5922
 
7043
 
5923
static void i830_init_clock_gating(struct drm_device *dev)
7044
static void i830_init_clock_gating(struct drm_device *dev)
Line 5924... Line 7045...
5924
{
7045
{
-
 
7046
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
7047
 
-
 
7048
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
 
7049
 
5925
	struct drm_i915_private *dev_priv = dev->dev_private;
7050
	I915_WRITE(MEM_MODE,
Line 5926... Line 7051...
5926
 
7051
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
5927
	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7052
		   _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
5928
}
7053
}
Line 5938... Line 7063...
5938
{
7063
{
5939
	if (HAS_PCH_LPT(dev))
7064
	if (HAS_PCH_LPT(dev))
5940
		lpt_suspend_hw(dev);
7065
		lpt_suspend_hw(dev);
5941
}
7066
}
Line 5942... Line -...
5942
 
-
 
5943
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
-
 
5944
	for (i = 0;							\
-
 
5945
	     i < (power_domains)->power_well_count &&			\
-
 
5946
		 ((power_well) = &(power_domains)->power_wells[i]);	\
-
 
5947
	     i++)							\
-
 
5948
		if ((power_well)->domains & (domain_mask))
-
 
5949
 
-
 
5950
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
-
 
5951
	for (i = (power_domains)->power_well_count - 1;			 \
-
 
5952
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
-
 
5953
	     i--)							 \
-
 
5954
		if ((power_well)->domains & (domain_mask))
-
 
5955
 
-
 
5956
/**
-
 
5957
 * We should only use the power well if we explicitly asked the hardware to
-
 
5958
 * enable it, so check if it's enabled and also check if we've requested it to
-
 
5959
 * be enabled.
-
 
5960
 */
-
 
5961
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
5962
				   struct i915_power_well *power_well)
-
 
5963
{
-
 
5964
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
-
 
5965
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-
 
5966
}
-
 
5967
 
-
 
5968
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
-
 
5969
				 enum intel_display_power_domain domain)
-
 
5970
{
-
 
5971
	struct i915_power_domains *power_domains;
-
 
5972
	struct i915_power_well *power_well;
-
 
5973
	bool is_enabled;
-
 
5974
	int i;
-
 
5975
 
-
 
5976
	if (dev_priv->pm.suspended)
-
 
5977
		return false;
-
 
5978
 
-
 
5979
	power_domains = &dev_priv->power_domains;
-
 
5980
 
-
 
5981
	is_enabled = true;
-
 
5982
 
-
 
5983
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-
 
5984
		if (power_well->always_on)
-
 
5985
			continue;
-
 
5986
 
-
 
5987
		if (!power_well->hw_enabled) {
-
 
5988
			is_enabled = false;
-
 
5989
			break;
-
 
5990
		}
-
 
5991
	}
-
 
5992
 
-
 
5993
	return is_enabled;
-
 
5994
}
-
 
5995
 
-
 
5996
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
-
 
5997
				 enum intel_display_power_domain domain)
-
 
5998
{
-
 
5999
	struct i915_power_domains *power_domains;
-
 
6000
	bool ret;
-
 
6001
 
-
 
6002
	power_domains = &dev_priv->power_domains;
-
 
6003
 
-
 
6004
	mutex_lock(&power_domains->lock);
-
 
6005
	ret = intel_display_power_enabled_unlocked(dev_priv, domain);
-
 
6006
	mutex_unlock(&power_domains->lock);
-
 
6007
 
-
 
6008
	return ret;
-
 
6009
}
-
 
6010
 
-
 
6011
/*
-
 
6012
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
-
 
6013
 * when not needed anymore. We have 4 registers that can request the power well
-
 
6014
 * to be enabled, and it will only be disabled if none of the registers is
-
 
6015
 * requesting it to be enabled.
-
 
6016
 */
-
 
6017
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
-
 
6018
{
-
 
6019
	struct drm_device *dev = dev_priv->dev;
-
 
6020
 
-
 
6021
	/*
-
 
6022
	 * After we re-enable the power well, if we touch VGA register 0x3d5
-
 
6023
	 * we'll get unclaimed register interrupts. This stops after we write
-
 
6024
	 * anything to the VGA MSR register. The vgacon module uses this
-
 
6025
	 * register all the time, so if we unbind our driver and, as a
-
 
6026
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
-
 
6027
	 * console_unlock(). So make here we touch the VGA MSR register, making
-
 
6028
	 * sure vgacon can keep working normally without triggering interrupts
-
 
6029
	 * and error messages.
-
 
6030
	 */
-
 
6031
//   vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-
 
6032
    outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
-
 
6033
//   vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-
 
6034
 
-
 
6035
	if (IS_BROADWELL(dev))
-
 
6036
		gen8_irq_power_well_post_enable(dev_priv);
-
 
6037
}
-
 
6038
 
-
 
6039
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
-
 
6040
			       struct i915_power_well *power_well, bool enable)
-
 
6041
{
-
 
6042
	bool is_enabled, enable_requested;
-
 
6043
	uint32_t tmp;
-
 
6044
 
-
 
6045
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
-
 
6046
	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
-
 
6047
	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
 
6048
 
-
 
6049
	if (enable) {
-
 
6050
		if (!enable_requested)
-
 
6051
			I915_WRITE(HSW_PWR_WELL_DRIVER,
-
 
6052
				   HSW_PWR_WELL_ENABLE_REQUEST);
-
 
6053
 
-
 
6054
		if (!is_enabled) {
-
 
6055
			DRM_DEBUG_KMS("Enabling power well\n");
-
 
6056
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-
 
6057
				      HSW_PWR_WELL_STATE_ENABLED), 20))
-
 
6058
				DRM_ERROR("Timeout enabling power well\n");
-
 
6059
		}
-
 
6060
 
-
 
6061
		hsw_power_well_post_enable(dev_priv);
-
 
6062
	} else {
-
 
6063
		if (enable_requested) {
-
 
6064
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
-
 
6065
			POSTING_READ(HSW_PWR_WELL_DRIVER);
-
 
6066
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
-
 
6067
		}
-
 
6068
		}
-
 
6069
}
-
 
6070
 
-
 
6071
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
-
 
6072
				   struct i915_power_well *power_well)
-
 
6073
{
-
 
6074
	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
 
6075
 
-
 
6076
	/*
-
 
6077
	 * We're taking over the BIOS, so clear any requests made by it since
-
 
6078
	 * the driver is in charge now.
-
 
6079
	 */
-
 
6080
	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
-
 
6081
		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
-
 
6082
}
-
 
6083
 
-
 
6084
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6085
				  struct i915_power_well *power_well)
-
 
6086
{
-
 
6087
	hsw_set_power_well(dev_priv, power_well, true);
-
 
6088
}
-
 
6089
 
-
 
6090
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6091
				   struct i915_power_well *power_well)
-
 
6092
{
-
 
6093
	hsw_set_power_well(dev_priv, power_well, false);
-
 
6094
}
-
 
6095
 
-
 
6096
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
-
 
6097
					   struct i915_power_well *power_well)
-
 
6098
{
-
 
6099
}
-
 
6100
 
-
 
6101
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
6102
					     struct i915_power_well *power_well)
-
 
6103
{
-
 
6104
	return true;
-
 
6105
}
-
 
6106
 
7067
 
6107
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-
 
6108
			       struct i915_power_well *power_well, bool enable)
-
 
6109
{
-
 
6110
	enum punit_power_well power_well_id = power_well->data;
-
 
6111
	u32 mask;
-
 
6112
	u32 state;
-
 
6113
	u32 ctrl;
-
 
6114
 
-
 
6115
	mask = PUNIT_PWRGT_MASK(power_well_id);
-
 
6116
	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
-
 
6117
			 PUNIT_PWRGT_PWR_GATE(power_well_id);
-
 
6118
 
-
 
6119
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6120
 
-
 
6121
#define COND \
-
 
6122
	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
 
6123
 
-
 
6124
	if (COND)
-
 
6125
		goto out;
-
 
6126
 
-
 
6127
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
-
 
6128
	ctrl &= ~mask;
-
 
6129
	ctrl |= state;
-
 
6130
	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
 
6131
 
-
 
6132
	if (wait_for(COND, 100))
-
 
6133
		DRM_ERROR("timout setting power well state %08x (%08x)\n",
-
 
6134
			  state,
-
 
6135
			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
 
6136
 
-
 
6137
#undef COND
-
 
6138
 
-
 
6139
out:
-
 
6140
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
6141
}
-
 
6142
 
-
 
6143
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
-
 
6144
				   struct i915_power_well *power_well)
-
 
6145
{
-
 
6146
	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-
 
6147
}
-
 
6148
 
-
 
6149
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6150
				  struct i915_power_well *power_well)
-
 
6151
{
-
 
6152
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6153
}
-
 
6154
 
-
 
6155
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6156
				   struct i915_power_well *power_well)
-
 
6157
{
-
 
6158
	vlv_set_power_well(dev_priv, power_well, false);
-
 
6159
}
-
 
6160
 
-
 
6161
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
6162
				   struct i915_power_well *power_well)
-
 
6163
{
-
 
6164
	int power_well_id = power_well->data;
-
 
6165
	bool enabled = false;
-
 
6166
	u32 mask;
-
 
6167
	u32 state;
-
 
6168
	u32 ctrl;
-
 
6169
 
-
 
6170
	mask = PUNIT_PWRGT_MASK(power_well_id);
-
 
6171
	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
-
 
6172
 
-
 
6173
	mutex_lock(&dev_priv->rps.hw_lock);
-
 
6174
 
-
 
6175
	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
-
 
6176
	/*
-
 
6177
	 * We only ever set the power-on and power-gate states, anything
-
 
6178
	 * else is unexpected.
-
 
6179
	 */
-
 
6180
	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
-
 
6181
		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
-
 
6182
	if (state == ctrl)
-
 
6183
		enabled = true;
-
 
6184
 
-
 
6185
	/*
-
 
6186
	 * A transient state at this point would mean some unexpected party
-
 
6187
	 * is poking at the power controls too.
-
 
6188
	 */
-
 
6189
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
-
 
6190
	WARN_ON(ctrl != state);
-
 
6191
 
-
 
6192
	mutex_unlock(&dev_priv->rps.hw_lock);
-
 
6193
 
-
 
6194
	return enabled;
-
 
6195
}
-
 
6196
 
-
 
6197
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6198
					  struct i915_power_well *power_well)
-
 
6199
{
-
 
6200
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
 
6201
 
-
 
6202
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6203
 
-
 
6204
	spin_lock_irq(&dev_priv->irq_lock);
-
 
6205
	valleyview_enable_display_irqs(dev_priv);
-
 
6206
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
6207
 
-
 
6208
	/*
-
 
6209
	 * During driver initialization/resume we can avoid restoring the
-
 
6210
	 * part of the HW/SW state that will be inited anyway explicitly.
-
 
6211
	 */
-
 
6212
	if (dev_priv->power_domains.initializing)
-
 
6213
		return;
-
 
6214
 
-
 
6215
		intel_hpd_init(dev_priv->dev);
-
 
6216
 
-
 
6217
	i915_redisable_vga_power_on(dev_priv->dev);
-
 
6218
}
-
 
6219
 
-
 
6220
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6221
				   struct i915_power_well *power_well)
-
 
6222
{
-
 
6223
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
-
 
6224
 
-
 
6225
	spin_lock_irq(&dev_priv->irq_lock);
-
 
6226
	valleyview_disable_display_irqs(dev_priv);
-
 
6227
	spin_unlock_irq(&dev_priv->irq_lock);
-
 
6228
 
-
 
6229
	vlv_set_power_well(dev_priv, power_well, false);
-
 
6230
}
-
 
6231
 
-
 
6232
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
-
 
6233
					   struct i915_power_well *power_well)
-
 
6234
{
-
 
6235
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
 
6236
 
-
 
6237
	/*
-
 
6238
	 * Enable the CRI clock source so we can get at the
-
 
6239
	 * display and the reference clock for VGA
-
 
6240
	 * hotplug / manual detection.
-
 
6241
	 */
-
 
6242
	I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-
 
6243
		   DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
-
 
6244
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
 
6245
 
-
 
6246
	vlv_set_power_well(dev_priv, power_well, true);
-
 
6247
 
-
 
6248
	/*
-
 
6249
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-
 
6250
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
-
 
6251
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
-
 
6252
	 *   b.	The other bits such as sfr settings / modesel may all
-
 
6253
	 *	be set to 0.
-
 
6254
	 *
-
 
6255
	 * This should only be done on init and resume from S3 with
-
 
6256
	 * both PLLs disabled, or we risk losing DPIO and PLL
-
 
6257
	 * synchronization.
-
 
6258
	 */
-
 
6259
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-
 
6260
}
-
 
6261
 
-
 
6262
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
-
 
6263
				   struct i915_power_well *power_well)
7068
static void intel_init_fbc(struct drm_i915_private *dev_priv)
6264
{
-
 
6265
	struct drm_device *dev = dev_priv->dev;
-
 
6266
	enum pipe pipe;
-
 
6267
 
-
 
6268
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
-
 
6269
 
-
 
6270
	for_each_pipe(pipe)
7069
{
6271
		assert_pll_disabled(dev_priv, pipe);
-
 
6272
 
-
 
6273
	/* Assert common reset */
-
 
6274
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
-
 
6275
 
7070
	if (!HAS_FBC(dev_priv)) {
6276
	vlv_set_power_well(dev_priv, power_well, false);
-
 
6277
}
-
 
6278
 
-
 
6279
static void check_power_well_state(struct drm_i915_private *dev_priv,
-
 
6280
				   struct i915_power_well *power_well)
-
 
6281
{
-
 
6282
	bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
 
6283
 
-
 
6284
	if (power_well->always_on || !i915.disable_power_well) {
-
 
6285
		if (!enabled)
-
 
6286
			goto mismatch;
-
 
6287
 
-
 
6288
		return;
-
 
6289
	}
-
 
6290
 
-
 
6291
	if (enabled != (power_well->count > 0))
-
 
6292
		goto mismatch;
-
 
6293
 
7071
		dev_priv->fbc.enabled = false;
6294
	return;
-
 
6295
 
-
 
6296
mismatch:
-
 
6297
	WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
-
 
6298
		  power_well->name, power_well->always_on, enabled,
-
 
6299
		  power_well->count, i915.disable_power_well);
-
 
6300
}
-
 
6301
 
-
 
6302
void intel_display_power_get(struct drm_i915_private *dev_priv,
-
 
6303
			     enum intel_display_power_domain domain)
-
 
6304
{
-
 
6305
	struct i915_power_domains *power_domains;
-
 
6306
	struct i915_power_well *power_well;
-
 
6307
	int i;
-
 
6308
 
-
 
6309
	intel_runtime_pm_get(dev_priv);
-
 
6310
 
-
 
6311
	power_domains = &dev_priv->power_domains;
-
 
6312
 
-
 
6313
	mutex_lock(&power_domains->lock);
-
 
6314
 
-
 
6315
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
-
 
6316
		if (!power_well->count++) {
-
 
6317
			DRM_DEBUG_KMS("enabling %s\n", power_well->name);
-
 
6318
			power_well->ops->enable(dev_priv, power_well);
-
 
6319
			power_well->hw_enabled = true;
-
 
6320
		}
-
 
6321
 
-
 
6322
		check_power_well_state(dev_priv, power_well);
-
 
6323
	}
-
 
6324
 
-
 
6325
	power_domains->domain_use_count[domain]++;
-
 
6326
 
-
 
6327
	mutex_unlock(&power_domains->lock);
7072
		return;
Line 6328... Line -...
6328
}
-
 
6329
 
-
 
6330
void intel_display_power_put(struct drm_i915_private *dev_priv,
-
 
6331
			     enum intel_display_power_domain domain)
-
 
6332
{
-
 
6333
	struct i915_power_domains *power_domains;
-
 
6334
	struct i915_power_well *power_well;
-
 
6335
	int i;
-
 
6336
 
-
 
6337
	power_domains = &dev_priv->power_domains;
-
 
6338
 
-
 
6339
	mutex_lock(&power_domains->lock);
-
 
6340
 
-
 
6341
	WARN_ON(!power_domains->domain_use_count[domain]);
-
 
6342
	power_domains->domain_use_count[domain]--;
-
 
6343
 
-
 
6344
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-
 
6345
		WARN_ON(!power_well->count);
-
 
6346
 
-
 
6347
		if (!--power_well->count && i915.disable_power_well) {
-
 
6348
			DRM_DEBUG_KMS("disabling %s\n", power_well->name);
-
 
6349
			power_well->hw_enabled = false;
-
 
6350
			power_well->ops->disable(dev_priv, power_well);
-
 
6351
		}
-
 
6352
 
-
 
6353
		check_power_well_state(dev_priv, power_well);
-
 
6354
	}
-
 
6355
 
-
 
6356
	mutex_unlock(&power_domains->lock);
-
 
6357
 
-
 
6358
	intel_runtime_pm_put(dev_priv);
-
 
6359
}
-
 
6360
 
-
 
6361
static struct i915_power_domains *hsw_pwr;
-
 
6362
 
-
 
6363
/* Display audio driver power well request */
-
 
6364
int i915_request_power_well(void)
-
 
6365
{
-
 
6366
	struct drm_i915_private *dev_priv;
-
 
6367
 
-
 
6368
	if (!hsw_pwr)
-
 
6369
		return -ENODEV;
-
 
6370
 
-
 
6371
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
6372
				power_domains);
-
 
6373
	intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-
 
6374
	return 0;
-
 
6375
}
-
 
6376
EXPORT_SYMBOL_GPL(i915_request_power_well);
-
 
6377
 
-
 
6378
/* Display audio driver power well release */
-
 
6379
int i915_release_power_well(void)
-
 
6380
{
-
 
6381
	struct drm_i915_private *dev_priv;
-
 
6382
 
-
 
6383
	if (!hsw_pwr)
-
 
6384
		return -ENODEV;
-
 
6385
 
-
 
6386
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
6387
				power_domains);
-
 
6388
	intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
 
6389
	return 0;
-
 
6390
}
-
 
6391
EXPORT_SYMBOL_GPL(i915_release_power_well);
-
 
6392
 
-
 
6393
/*
-
 
6394
 * Private interface for the audio driver to get CDCLK in kHz.
-
 
6395
 *
-
 
6396
 * Caller must request power well using i915_request_power_well() prior to
-
 
6397
 * making the call.
-
 
6398
 */
-
 
6399
int i915_get_cdclk_freq(void)
-
 
6400
{
-
 
6401
	struct drm_i915_private *dev_priv;
-
 
6402
 
-
 
6403
	if (!hsw_pwr)
-
 
6404
		return -ENODEV;
-
 
6405
 
-
 
6406
	dev_priv = container_of(hsw_pwr, struct drm_i915_private,
-
 
6407
				power_domains);
-
 
6408
 
-
 
6409
	return intel_ddi_get_cdclk_freq(dev_priv);
-
 
6410
}
-
 
6411
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
-
 
6412
 
-
 
6413
 
-
 
6414
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
-
 
6415
 
-
 
6416
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
-
 
6417
	BIT(POWER_DOMAIN_PIPE_A) |			\
-
 
6418
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-
 
6419
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
-
 
6420
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
-
 
6421
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
-
 
6422
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
-
 
6423
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
-
 
6424
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
-
 
6425
	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
-
 
6426
	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
-
 
6427
	BIT(POWER_DOMAIN_PORT_CRT) |			\
-
 
6428
	BIT(POWER_DOMAIN_PLLS) |			\
-
 
6429
	BIT(POWER_DOMAIN_INIT))
-
 
6430
#define HSW_DISPLAY_POWER_DOMAINS (				\
-
 
6431
	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
-
 
6432
	BIT(POWER_DOMAIN_INIT))
-
 
6433
 
-
 
6434
#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
-
 
6435
	HSW_ALWAYS_ON_POWER_DOMAINS |			\
-
 
6436
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-
 
6437
#define BDW_DISPLAY_POWER_DOMAINS (				\
-
 
6438
	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
-
 
6439
	BIT(POWER_DOMAIN_INIT))
-
 
6440
 
-
 
6441
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
-
 
6442
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
-
 
6443
 
-
 
6444
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
-
 
6445
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
-
 
6446
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6447
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
-
 
6448
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
6449
	BIT(POWER_DOMAIN_PORT_CRT) |		\
-
 
6450
	BIT(POWER_DOMAIN_INIT))
-
 
6451
 
-
 
6452
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
-
 
6453
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
-
 
6454
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6455
	BIT(POWER_DOMAIN_INIT))
-
 
6456
 
-
 
6457
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
-
 
6458
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
6459
	BIT(POWER_DOMAIN_INIT))
-
 
6460
 
-
 
6461
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
-
 
6462
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
-
 
6463
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
6464
	BIT(POWER_DOMAIN_INIT))
-
 
6465
 
-
 
6466
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
-
 
6467
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
6468
	BIT(POWER_DOMAIN_INIT))
-
 
6469
 
-
 
6470
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
-
 
6471
	.sync_hw = i9xx_always_on_power_well_noop,
-
 
6472
	.enable = i9xx_always_on_power_well_noop,
-
 
6473
	.disable = i9xx_always_on_power_well_noop,
-
 
6474
	.is_enabled = i9xx_always_on_power_well_enabled,
-
 
6475
};
-
 
6476
 
-
 
6477
static struct i915_power_well i9xx_always_on_power_well[] = {
-
 
6478
	{
-
 
6479
		.name = "always-on",
-
 
6480
		.always_on = 1,
-
 
6481
		.domains = POWER_DOMAIN_MASK,
-
 
6482
		.ops = &i9xx_always_on_power_well_ops,
-
 
6483
	},
-
 
6484
};
-
 
6485
 
-
 
6486
static const struct i915_power_well_ops hsw_power_well_ops = {
-
 
6487
	.sync_hw = hsw_power_well_sync_hw,
-
 
6488
	.enable = hsw_power_well_enable,
-
 
6489
	.disable = hsw_power_well_disable,
-
 
6490
	.is_enabled = hsw_power_well_enabled,
-
 
6491
};
-
 
6492
 
-
 
6493
static struct i915_power_well hsw_power_wells[] = {
-
 
6494
	{
-
 
6495
		.name = "always-on",
-
 
6496
		.always_on = 1,
-
 
6497
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
-
 
6498
		.ops = &i9xx_always_on_power_well_ops,
-
 
6499
	},
-
 
6500
	{
-
 
6501
		.name = "display",
-
 
6502
		.domains = HSW_DISPLAY_POWER_DOMAINS,
-
 
6503
		.ops = &hsw_power_well_ops,
-
 
6504
	},
-
 
6505
};
-
 
6506
 
-
 
6507
static struct i915_power_well bdw_power_wells[] = {
-
 
6508
	{
-
 
6509
		.name = "always-on",
-
 
6510
		.always_on = 1,
-
 
6511
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
-
 
6512
		.ops = &i9xx_always_on_power_well_ops,
-
 
6513
	},
-
 
6514
	{
-
 
6515
		.name = "display",
-
 
6516
		.domains = BDW_DISPLAY_POWER_DOMAINS,
-
 
6517
		.ops = &hsw_power_well_ops,
-
 
6518
	},
-
 
6519
};
-
 
6520
 
-
 
6521
static const struct i915_power_well_ops vlv_display_power_well_ops = {
-
 
6522
	.sync_hw = vlv_power_well_sync_hw,
-
 
6523
	.enable = vlv_display_power_well_enable,
-
 
6524
	.disable = vlv_display_power_well_disable,
-
 
6525
	.is_enabled = vlv_power_well_enabled,
-
 
6526
};
-
 
6527
 
-
 
6528
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
-
 
6529
	.sync_hw = vlv_power_well_sync_hw,
-
 
6530
	.enable = vlv_dpio_cmn_power_well_enable,
-
 
6531
	.disable = vlv_dpio_cmn_power_well_disable,
-
 
6532
	.is_enabled = vlv_power_well_enabled,
-
 
6533
};
-
 
6534
 
-
 
6535
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
-
 
6536
	.sync_hw = vlv_power_well_sync_hw,
-
 
6537
	.enable = vlv_power_well_enable,
-
 
6538
	.disable = vlv_power_well_disable,
-
 
6539
	.is_enabled = vlv_power_well_enabled,
-
 
6540
};
-
 
6541
 
-
 
6542
static struct i915_power_well vlv_power_wells[] = {
-
 
6543
	{
-
 
6544
		.name = "always-on",
-
 
6545
		.always_on = 1,
-
 
6546
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
-
 
6547
		.ops = &i9xx_always_on_power_well_ops,
-
 
6548
	},
-
 
6549
	{
-
 
6550
		.name = "display",
-
 
6551
		.domains = VLV_DISPLAY_POWER_DOMAINS,
-
 
6552
		.data = PUNIT_POWER_WELL_DISP2D,
-
 
6553
		.ops = &vlv_display_power_well_ops,
-
 
6554
	},
-
 
6555
	{
-
 
6556
		.name = "dpio-tx-b-01",
-
 
6557
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6558
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6559
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6560
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6561
		.ops = &vlv_dpio_power_well_ops,
-
 
6562
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
-
 
6563
	},
-
 
6564
	{
-
 
6565
		.name = "dpio-tx-b-23",
-
 
6566
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6567
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6568
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6569
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6570
		.ops = &vlv_dpio_power_well_ops,
-
 
6571
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
-
 
6572
	},
-
 
6573
	{
-
 
6574
		.name = "dpio-tx-c-01",
-
 
6575
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6576
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6577
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6578
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6579
		.ops = &vlv_dpio_power_well_ops,
-
 
6580
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
-
 
6581
	},
-
 
6582
	{
-
 
6583
		.name = "dpio-tx-c-23",
-
 
6584
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
-
 
6585
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
-
 
6586
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
-
 
6587
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
-
 
6588
		.ops = &vlv_dpio_power_well_ops,
-
 
6589
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
-
 
6590
	},
-
 
6591
	{
-
 
6592
		.name = "dpio-common",
-
 
6593
		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
-
 
6594
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-
 
6595
		.ops = &vlv_dpio_cmn_power_well_ops,
-
 
6596
	},
-
 
6597
};
-
 
6598
 
-
 
6599
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
-
 
6600
						 enum punit_power_well power_well_id)
-
 
6601
{
-
 
6602
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
6603
	struct i915_power_well *power_well;
-
 
6604
	int i;
-
 
6605
 
-
 
6606
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-
 
6607
		if (power_well->data == power_well_id)
-
 
6608
			return power_well;
-
 
6609
	}
-
 
6610
 
-
 
6611
	return NULL;
-
 
6612
}
-
 
6613
 
-
 
6614
#define set_power_wells(power_domains, __power_wells) ({		\
-
 
6615
	(power_domains)->power_wells = (__power_wells);			\
-
 
6616
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
-
 
6617
})
-
 
6618
 
-
 
6619
int intel_power_domains_init(struct drm_i915_private *dev_priv)
-
 
6620
{
-
 
6621
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
6622
 
-
 
6623
	mutex_init(&power_domains->lock);
-
 
6624
 
-
 
6625
	/*
-
 
6626
	 * The enabling order will be from lower to higher indexed wells,
-
 
6627
	 * the disabling order is reversed.
-
 
6628
	 */
-
 
6629
	if (IS_HASWELL(dev_priv->dev)) {
-
 
6630
		set_power_wells(power_domains, hsw_power_wells);
-
 
6631
		hsw_pwr = power_domains;
-
 
6632
	} else if (IS_BROADWELL(dev_priv->dev)) {
-
 
6633
		set_power_wells(power_domains, bdw_power_wells);
-
 
6634
		hsw_pwr = power_domains;
-
 
6635
	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
-
 
6636
		set_power_wells(power_domains, vlv_power_wells);
-
 
6637
	} else {
-
 
6638
		set_power_wells(power_domains, i9xx_always_on_power_well);
-
 
6639
	}
-
 
6640
 
-
 
6641
	return 0;
-
 
6642
}
-
 
6643
 
-
 
6644
void intel_power_domains_remove(struct drm_i915_private *dev_priv)
-
 
6645
{
-
 
6646
	hsw_pwr = NULL;
-
 
6647
}
-
 
6648
 
-
 
6649
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
-
 
6650
{
-
 
6651
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
6652
	struct i915_power_well *power_well;
-
 
6653
	int i;
-
 
6654
 
-
 
6655
	mutex_lock(&power_domains->lock);
-
 
6656
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
-
 
6657
		power_well->ops->sync_hw(dev_priv, power_well);
-
 
6658
		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
-
 
6659
								     power_well);
-
 
6660
	}
-
 
6661
	mutex_unlock(&power_domains->lock);
-
 
6662
}
-
 
6663
 
-
 
6664
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
-
 
6665
{
-
 
6666
	struct i915_power_well *cmn =
-
 
6667
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
-
 
6668
	struct i915_power_well *disp2d =
-
 
6669
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
-
 
6670
 
-
 
6671
	/* nothing to do if common lane is already off */
-
 
6672
	if (!cmn->ops->is_enabled(dev_priv, cmn))
-
 
6673
		return;
-
 
6674
 
-
 
6675
	/* If the display might be already active skip this */
-
 
6676
	if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
-
 
6677
	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
-
 
6678
		return;
-
 
6679
 
-
 
6680
	DRM_DEBUG_KMS("toggling display PHY side reset\n");
-
 
6681
 
-
 
6682
	/* cmnlane needs DPLL registers */
-
 
6683
	disp2d->ops->enable(dev_priv, disp2d);
-
 
6684
 
-
 
6685
	/*
-
 
6686
	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
-
 
6687
	 * Need to assert and de-assert PHY SB reset by gating the
-
 
6688
	 * common lane power, then un-gating it.
-
 
6689
	 * Simply ungating isn't enough to reset the PHY enough to get
-
 
6690
	 * ports and lanes running.
-
 
6691
 */
-
 
6692
	cmn->ops->disable(dev_priv, cmn);
-
 
6693
}
-
 
6694
 
-
 
6695
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
-
 
6696
{
-
 
6697
	struct drm_device *dev = dev_priv->dev;
-
 
6698
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
6699
 
-
 
6700
	power_domains->initializing = true;
-
 
6701
 
-
 
6702
	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
-
 
6703
		mutex_lock(&power_domains->lock);
-
 
6704
		vlv_cmnlane_wa(dev_priv);
-
 
6705
		mutex_unlock(&power_domains->lock);
-
 
6706
	}
-
 
6707
 
-
 
6708
	/* For now, we need the power well to be always enabled. */
-
 
6709
	intel_display_set_init_power(dev_priv, true);
-
 
6710
	intel_power_domains_resume(dev_priv);
-
 
6711
	power_domains->initializing = false;
-
 
6712
}
-
 
6713
 
-
 
6714
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
-
 
6715
{
-
 
6716
	intel_runtime_pm_get(dev_priv);
-
 
6717
}
-
 
6718
 
-
 
6719
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
-
 
6720
{
-
 
6721
	intel_runtime_pm_put(dev_priv);
-
 
6722
}
-
 
6723
 
-
 
6724
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
-
 
6725
{
-
 
6726
	struct drm_device *dev = dev_priv->dev;
-
 
6727
	struct device *device = &dev->pdev->dev;
-
 
6728
 
-
 
6729
	if (!HAS_RUNTIME_PM(dev))
-
 
6730
    return;
-
 
6731
 
-
 
6732
//	pm_runtime_get_sync(device);
-
 
6733
	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
-
 
6734
}
-
 
6735
 
-
 
6736
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
-
 
6737
{
-
 
6738
	struct drm_device *dev = dev_priv->dev;
-
 
6739
	struct device *device = &dev->pdev->dev;
-
 
6740
 
-
 
6741
	if (!HAS_RUNTIME_PM(dev))
-
 
6742
		return;
-
 
6743
 
-
 
6744
	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
-
 
6745
//   pm_runtime_get_noresume(device);
-
 
6746
}
-
 
6747
 
-
 
6748
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
-
 
6749
{
-
 
6750
	struct drm_device *dev = dev_priv->dev;
-
 
6751
	struct device *device = &dev->pdev->dev;
-
 
6752
 
-
 
6753
	if (!HAS_RUNTIME_PM(dev))
-
 
6754
    return;
-
 
6755
 
-
 
6756
}
-
 
6757
 
-
 
6758
void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
-
 
6759
{
-
 
6760
	struct drm_device *dev = dev_priv->dev;
-
 
6761
	struct device *device = &dev->pdev->dev;
-
 
6762
 
-
 
6763
	if (!HAS_RUNTIME_PM(dev))
-
 
6764
		return;
-
 
6765
 
-
 
6766
 
-
 
6767
	/*
-
 
6768
	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
-
 
6769
	 * requirement.
-
 
6770
	 */
-
 
6771
	if (!intel_enable_rc6(dev)) {
-
 
6772
		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-
 
6773
    return;
-
 
6774
	}
-
 
6775
 
-
 
6776
 
-
 
6777
}
-
 
6778
 
-
 
6779
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
-
 
6780
{
-
 
6781
	struct drm_device *dev = dev_priv->dev;
-
 
6782
	struct device *device = &dev->pdev->dev;
-
 
6783
 
-
 
6784
	if (!HAS_RUNTIME_PM(dev))
-
 
6785
		return;
-
 
6786
 
-
 
6787
	if (!intel_enable_rc6(dev))
-
 
6788
    return;
-
 
6789
 
-
 
6790
}
-
 
6791
 
-
 
6792
/* Set up chip specific power management-related functions */
-
 
6793
void intel_init_pm(struct drm_device *dev)
-
 
6794
{
-
 
6795
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
6796
 
7073
	}
6797
	if (HAS_FBC(dev)) {
7074
 
6798
		if (INTEL_INFO(dev)->gen >= 7) {
7075
	if (INTEL_INFO(dev_priv)->gen >= 7) {
6799
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7076
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6800
			dev_priv->display.enable_fbc = gen7_enable_fbc;
7077
			dev_priv->display.enable_fbc = gen7_enable_fbc;
6801
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
7078
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
6802
		} else if (INTEL_INFO(dev)->gen >= 5) {
7079
	} else if (INTEL_INFO(dev_priv)->gen >= 5) {
6803
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7080
			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6804
			dev_priv->display.enable_fbc = ironlake_enable_fbc;
7081
			dev_priv->display.enable_fbc = ironlake_enable_fbc;
6805
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
7082
			dev_priv->display.disable_fbc = ironlake_disable_fbc;
6806
		} else if (IS_GM45(dev)) {
7083
	} else if (IS_GM45(dev_priv)) {
6807
			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7084
			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6808
			dev_priv->display.enable_fbc = g4x_enable_fbc;
7085
			dev_priv->display.enable_fbc = g4x_enable_fbc;
6809
			dev_priv->display.disable_fbc = g4x_disable_fbc;
7086
			dev_priv->display.disable_fbc = g4x_disable_fbc;
Line 6813... Line 7090...
6813
			dev_priv->display.disable_fbc = i8xx_disable_fbc;
7090
			dev_priv->display.disable_fbc = i8xx_disable_fbc;
Line 6814... Line 7091...
6814
 
7091
 
6815
			/* This value was pulled out of someone's hat */
7092
			/* This value was pulled out of someone's hat */
6816
			I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7093
			I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-
 
7094
		}
-
 
7095
 
6817
		}
7096
	dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
Line -... Line 7097...
-
 
7097
}
-
 
7098
 
-
 
7099
/* Set up chip specific power management-related functions */
-
 
7100
void intel_init_pm(struct drm_device *dev)
-
 
7101
{
-
 
7102
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
7103
 
6818
	}
7104
	intel_init_fbc(dev_priv);
6819
 
7105
 
6820
	/* For cxsr */
7106
	/* For cxsr */
6821
	if (IS_PINEVIEW(dev))
7107
	if (IS_PINEVIEW(dev))
6822
		i915_pineview_get_mem_freq(dev);
7108
		i915_pineview_get_mem_freq(dev);
Line 6823... Line 7109...
6823
	else if (IS_GEN5(dev))
7109
	else if (IS_GEN5(dev))
-
 
7110
		i915_ironlake_get_mem_freq(dev);
-
 
7111
 
-
 
7112
	/* For FIFO watermark updates */
-
 
7113
	if (INTEL_INFO(dev)->gen >= 9) {
-
 
7114
		skl_setup_wm_latency(dev);
-
 
7115
 
6824
		i915_ironlake_get_mem_freq(dev);
7116
		dev_priv->display.init_clock_gating = gen9_init_clock_gating;
6825
 
7117
		dev_priv->display.update_wm = skl_update_wm;
Line 6826... Line 7118...
6826
	/* For FIFO watermark updates */
7118
		dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
6827
	if (HAS_PCH_SPLIT(dev)) {
7119
	} else if (HAS_PCH_SPLIT(dev)) {
6828
		ilk_setup_wm_latency(dev);
7120
		ilk_setup_wm_latency(dev);
Line 6845... Line 7137...
6845
		else if (IS_IVYBRIDGE(dev))
7137
		else if (IS_IVYBRIDGE(dev))
6846
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7138
			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6847
		else if (IS_HASWELL(dev))
7139
		else if (IS_HASWELL(dev))
6848
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7140
			dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6849
		else if (INTEL_INFO(dev)->gen == 8)
7141
		else if (INTEL_INFO(dev)->gen == 8)
6850
			dev_priv->display.init_clock_gating = gen8_init_clock_gating;
7142
			dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6851
	} else if (IS_CHERRYVIEW(dev)) {
7143
	} else if (IS_CHERRYVIEW(dev)) {
6852
		dev_priv->display.update_wm = valleyview_update_wm;
7144
		dev_priv->display.update_wm = cherryview_update_wm;
-
 
7145
		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6853
		dev_priv->display.init_clock_gating =
7146
		dev_priv->display.init_clock_gating =
6854
			cherryview_init_clock_gating;
7147
			cherryview_init_clock_gating;
6855
	} else if (IS_VALLEYVIEW(dev)) {
7148
	} else if (IS_VALLEYVIEW(dev)) {
6856
		dev_priv->display.update_wm = valleyview_update_wm;
7149
		dev_priv->display.update_wm = valleyview_update_wm;
-
 
7150
		dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6857
		dev_priv->display.init_clock_gating =
7151
		dev_priv->display.init_clock_gating =
6858
			valleyview_init_clock_gating;
7152
			valleyview_init_clock_gating;
6859
	} else if (IS_PINEVIEW(dev)) {
7153
	} else if (IS_PINEVIEW(dev)) {
6860
		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7154
		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6861
					    dev_priv->is_ddr3,
7155
					    dev_priv->is_ddr3,
Line 6901... Line 7195...
6901
	} else {
7195
	} else {
6902
		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7196
		DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6903
	}
7197
	}
6904
}
7198
}
Line 6905... Line 7199...
6905
 
7199
 
6906
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7200
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
6907
{
7201
{
Line 6908... Line 7202...
6908
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7202
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6909
 
7203
 
6910
	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7204
	if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6911
		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7205
		DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
Line 6912... Line 7206...
6912
		return -EAGAIN;
7206
		return -EAGAIN;
-
 
7207
	}
6913
	}
7208
 
Line 6914... Line 7209...
6914
 
7209
	I915_WRITE(GEN6_PCODE_DATA, *val);
6915
	I915_WRITE(GEN6_PCODE_DATA, *val);
7210
	I915_WRITE(GEN6_PCODE_DATA1, 0);
6916
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7211
	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
Line 6925... Line 7220...
6925
	I915_WRITE(GEN6_PCODE_DATA, 0);
7220
	I915_WRITE(GEN6_PCODE_DATA, 0);
Line 6926... Line 7221...
6926
 
7221
 
6927
	return 0;
7222
	return 0;
Line 6928... Line 7223...
6928
}
7223
}
6929
 
7224
 
6930
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7225
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
Line 6931... Line 7226...
6931
{
7226
{
6932
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7227
	WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
Line 6948... Line 7243...
6948
	I915_WRITE(GEN6_PCODE_DATA, 0);
7243
	I915_WRITE(GEN6_PCODE_DATA, 0);
Line 6949... Line 7244...
6949
 
7244
 
6950
	return 0;
7245
	return 0;
Line 6951... Line 7246...
6951
}
7246
}
6952
 
7247
 
6953
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
-
 
6954
{
-
 
6955
	int div;
-
 
6956
 
7248
static int vlv_gpu_freq_div(unsigned int czclk_freq)
6957
	/* 4 x czclk */
7249
{
6958
	switch (dev_priv->mem_freq) {
7250
	switch (czclk_freq) {
6959
	case 800:
-
 
6960
		div = 10;
7251
	case 200:
6961
		break;
7252
		return 10;
6962
	case 1066:
7253
	case 267:
6963
		div = 12;
7254
		return 12;
6964
		break;
7255
	case 320:
-
 
7256
	case 333:
6965
	case 1333:
7257
		return 16;
6966
		div = 16;
7258
	case 400:
6967
		break;
7259
		return 20;
6968
	default:
7260
	default:
-
 
7261
		return -1;
-
 
7262
	}
-
 
7263
}
-
 
7264
 
-
 
7265
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
Line -... Line 7266...
-
 
7266
{
-
 
7267
	int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
-
 
7268
 
-
 
7269
	div = vlv_gpu_freq_div(czclk_freq);
6969
		return -1;
7270
	if (div < 0)
6970
	}
7271
		return div;
Line 6971... Line 7272...
6971
 
7272
 
6972
	return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
7273
	return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
6973
}
7274
}
Line 6974... Line -...
6974
 
-
 
6975
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7275
 
6976
{
-
 
6977
	int mul;
7276
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6978
 
-
 
6979
	/* 4 x czclk */
-
 
6980
	switch (dev_priv->mem_freq) {
-
 
6981
	case 800:
-
 
6982
		mul = 10;
-
 
6983
		break;
-
 
6984
	case 1066:
-
 
6985
		mul = 12;
-
 
6986
		break;
7277
{
6987
	case 1333:
-
 
Line 6988... Line 7278...
6988
		mul = 16;
7278
	int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
6989
		break;
7279
 
Line 6990... Line 7280...
6990
	default:
7280
	mul = vlv_gpu_freq_div(czclk_freq);
6991
		return -1;
7281
	if (mul < 0)
6992
	}
7282
		return mul;
Line 6993... Line 7283...
6993
 
7283
 
6994
	return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
-
 
6995
}
-
 
6996
 
-
 
6997
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
-
 
6998
{
-
 
6999
	int div, freq;
-
 
7000
 
-
 
7001
	switch (dev_priv->rps.cz_freq) {
-
 
7002
	case 200:
-
 
7003
		div = 5;
7284
	return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7004
		break;
-
 
7005
	case 267:
-
 
7006
		div = 6;
7285
}
7007
		break;
-
 
7008
	case 320:
-
 
7009
	case 333:
-
 
Line 7010... Line 7286...
7010
	case 400:
7286
 
7011
		div = 8;
7287
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
Line 7012... Line 7288...
7012
		break;
7288
{
7013
	default:
7289
	int div, czclk_freq = dev_priv->rps.cz_freq;
7014
		return -1;
-
 
7015
	}
-
 
7016
 
7290
 
7017
	freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
-
 
7018
 
-
 
7019
	return freq;
-
 
7020
}
-
 
7021
 
-
 
7022
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
-
 
7023
{
-
 
7024
	int mul, opcode;
-
 
7025
 
-
 
7026
	switch (dev_priv->rps.cz_freq) {
-
 
7027
	case 200:
-
 
7028
		mul = 5;
-
 
7029
		break;
-
 
7030
	case 267:
-
 
Line 7031... Line 7291...
7031
		mul = 6;
7291
	div = vlv_gpu_freq_div(czclk_freq) / 2;
-
 
7292
	if (div < 0)
-
 
7293
		return div;
Line 7032... Line 7294...
7032
		break;
7294
 
-
 
7295
	return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7033
	case 320:
7296
}
Line 7034... Line 7297...
7034
	case 333:
7297
 
7035
	case 400:
7298
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7036
		mul = 8;
7299
{
Line 7076... Line 7339...
7076
 
7339
 
7077
	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7340
	INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
Line 7078... Line 7341...
7078
			  intel_gen6_powersave_work);
7341
			  intel_gen6_powersave_work);
7079
 
-
 
7080
	dev_priv->pm.suspended = false;
7342