Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5354 serge 1
/*
2
 * Copyright © 2014 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
22
 */
23
 
24
/**
25
 * DOC: Panel Self Refresh (PSR/SRD)
26
 *
27
 * Since Haswell Display controller supports Panel Self-Refresh on display
28
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30
 * when system is idle but display is on as it eliminates display refresh
31
 * request to DDR memory completely as long as the frame buffer for that
32
 * display is unchanged.
33
 *
34
 * Panel Self Refresh must be supported by both Hardware (source) and
35
 * Panel (sink).
36
 *
37
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38
 * to power down the link and memory controller. For DSI panels the same idea
39
 * is called "manual mode".
40
 *
41
 * The implementation uses the hardware-based PSR support which automatically
42
 * enters/exits self-refresh mode. The hardware takes care of sending the
43
 * required DP aux message and could even retrain the link (that part isn't
44
 * enabled yet though). The hardware also keeps track of any frontbuffer
45
 * changes to know when to exit self-refresh mode again. Unfortunately that
46
 * part doesn't work too well, hence why the i915 PSR support uses the
47
 * software frontbuffer tracking to make sure it doesn't miss a screen
48
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49
 * get called by the frontbuffer tracking code. Note that because of locking
50
 * issues the self-refresh re-enable code is done from a work queue, which
51
 * must be correctly synchronized/cancelled when shutting down the pipe."
52
 */
53
 
54
#include 
55
 
56
#include "intel_drv.h"
57
#include "i915_drv.h"
58
 
59
static bool is_edp_psr(struct intel_dp *intel_dp)
60
{
61
	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62
}
63
 
6084 serge 64
static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
5354 serge 65
{
66
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 67
	uint32_t val;
5354 serge 68
 
6084 serge 69
	val = I915_READ(VLV_PSRSTAT(pipe)) &
70
	      VLV_EDP_PSR_CURR_STATE_MASK;
71
	return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
72
	       (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
5354 serge 73
}
74
 
75
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
6084 serge 76
				const struct edp_vsc_psr *vsc_psr)
5354 serge 77
{
78
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79
	struct drm_device *dev = dig_port->base.base.dev;
80
	struct drm_i915_private *dev_priv = dev->dev_private;
81
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
6084 serge 82
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
6937 serge 83
	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
5354 serge 84
	uint32_t *data = (uint32_t *) vsc_psr;
85
	unsigned int i;
86
 
87
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
88
	   the video DIP being updated before program video DIP data buffer
89
	   registers for DIP being updated. */
90
	I915_WRITE(ctl_reg, 0);
91
	POSTING_READ(ctl_reg);
92
 
6084 serge 93
	for (i = 0; i < sizeof(*vsc_psr); i += 4) {
94
		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
95
						   i >> 2), *data);
96
		data++;
5354 serge 97
	}
6084 serge 98
	for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4)
99
		I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder,
100
						   i >> 2), 0);
5354 serge 101
 
102
	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
103
	POSTING_READ(ctl_reg);
104
}
105
 
6084 serge 106
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
5354 serge 107
{
6084 serge 108
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
	struct drm_device *dev = intel_dig_port->base.base.dev;
110
	struct drm_i915_private *dev_priv = dev->dev_private;
111
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
113
	uint32_t val;
114
 
115
	/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
116
	val  = I915_READ(VLV_VSCSDP(pipe));
117
	val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
118
	val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
119
	I915_WRITE(VLV_VSCSDP(pipe), val);
120
}
121
 
122
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
123
{
5354 serge 124
	struct edp_vsc_psr psr_vsc;
125
 
6084 serge 126
	/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
127
	memset(&psr_vsc, 0, sizeof(psr_vsc));
128
	psr_vsc.sdp_header.HB0 = 0;
129
	psr_vsc.sdp_header.HB1 = 0x7;
130
	psr_vsc.sdp_header.HB2 = 0x3;
131
	psr_vsc.sdp_header.HB3 = 0xb;
132
	intel_psr_write_vsc(intel_dp, &psr_vsc);
133
}
134
 
135
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
136
{
137
	struct edp_vsc_psr psr_vsc;
138
 
5354 serge 139
	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
140
	memset(&psr_vsc, 0, sizeof(psr_vsc));
141
	psr_vsc.sdp_header.HB0 = 0;
142
	psr_vsc.sdp_header.HB1 = 0x7;
143
	psr_vsc.sdp_header.HB2 = 0x2;
144
	psr_vsc.sdp_header.HB3 = 0x8;
145
	intel_psr_write_vsc(intel_dp, &psr_vsc);
146
}
147
 
6084 serge 148
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
5354 serge 149
{
6084 serge 150
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
151
			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152
}
153
 
6937 serge 154
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
155
				       enum port port)
156
{
157
	if (INTEL_INFO(dev_priv)->gen >= 9)
158
		return DP_AUX_CH_CTL(port);
159
	else
160
		return EDP_PSR_AUX_CTL;
161
}
162
 
163
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
164
					enum port port, int index)
165
{
166
	if (INTEL_INFO(dev_priv)->gen >= 9)
167
		return DP_AUX_CH_DATA(port, index);
168
	else
169
		return EDP_PSR_AUX_DATA(index);
170
}
171
 
6084 serge 172
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
173
{
5354 serge 174
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175
	struct drm_device *dev = dig_port->base.base.dev;
176
	struct drm_i915_private *dev_priv = dev->dev_private;
177
	uint32_t aux_clock_divider;
6937 serge 178
	i915_reg_t aux_ctl_reg;
5354 serge 179
	int precharge = 0x3;
180
	static const uint8_t aux_msg[] = {
181
		[0] = DP_AUX_NATIVE_WRITE << 4,
182
		[1] = DP_SET_POWER >> 8,
183
		[2] = DP_SET_POWER & 0xff,
184
		[3] = 1 - 1,
185
		[4] = DP_SET_POWER_D0,
186
	};
6937 serge 187
	enum port port = dig_port->port;
5354 serge 188
	int i;
189
 
190
	BUILD_BUG_ON(sizeof(aux_msg) > 20);
191
 
192
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
193
 
6084 serge 194
	/* Enable AUX frame sync at sink */
195
	if (dev_priv->psr.aux_frame_sync)
196
		drm_dp_dpcd_writeb(&intel_dp->aux,
197
				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198
				DP_AUX_FRAME_SYNC_ENABLE);
5354 serge 199
 
6937 serge 200
	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
6084 serge 201
 
5354 serge 202
	/* Setup AUX registers */
203
	for (i = 0; i < sizeof(aux_msg); i += 4)
6937 serge 204
		I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
5354 serge 205
			   intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206
 
6084 serge 207
	if (INTEL_INFO(dev)->gen >= 9) {
208
		uint32_t val;
209
 
210
		val = I915_READ(aux_ctl_reg);
211
		val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212
		val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213
		val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214
		val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215
		/* Use hardcoded data values for PSR, frame sync and GTC */
216
		val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217
		val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218
		val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219
		I915_WRITE(aux_ctl_reg, val);
220
	} else {
221
		I915_WRITE(aux_ctl_reg,
5354 serge 222
		   DP_AUX_CH_CTL_TIME_OUT_400us |
223
		   (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224
		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225
		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
6084 serge 226
	}
227
 
228
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
5354 serge 229
}
230
 
6084 serge 231
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
5354 serge 232
{
233
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
234
	struct drm_device *dev = dig_port->base.base.dev;
235
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 236
	struct drm_crtc *crtc = dig_port->base.base.crtc;
237
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
238
 
239
	/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
240
	I915_WRITE(VLV_PSRCTL(pipe),
241
		   VLV_EDP_PSR_MODE_SW_TIMER |
242
		   VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
243
		   VLV_EDP_PSR_ENABLE);
244
}
245
 
246
static void vlv_psr_activate(struct intel_dp *intel_dp)
247
{
248
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
249
	struct drm_device *dev = dig_port->base.base.dev;
250
	struct drm_i915_private *dev_priv = dev->dev_private;
251
	struct drm_crtc *crtc = dig_port->base.base.crtc;
252
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
253
 
254
	/* Let's do the transition from PSR_state 1 to PSR_state 2
255
	 * that is PSR transition to active - static frame transmission.
256
	 * Then Hardware is responsible for the transition to PSR_state 3
257
	 * that is PSR active - no Remote Frame Buffer (RFB) update.
258
	 */
259
	I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
260
		   VLV_EDP_PSR_ACTIVE_ENTRY);
261
}
262
 
263
static void hsw_psr_enable_source(struct intel_dp *intel_dp)
264
{
265
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
266
	struct drm_device *dev = dig_port->base.base.dev;
267
	struct drm_i915_private *dev_priv = dev->dev_private;
268
 
5354 serge 269
	uint32_t max_sleep_time = 0x1f;
6937 serge 270
	/*
271
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
272
	 * Let's use 6 as the minimum to cover all known cases including
273
	 * the off-by-one issue that HW has in some cases. Also there are
274
	 * cases where sink should be able to train
275
	 * with the 5 or 6 idle patterns.
6084 serge 276
	 */
6937 serge 277
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
278
	uint32_t val = EDP_PSR_ENABLE;
5354 serge 279
 
6937 serge 280
	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
281
	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
282
 
283
	if (IS_HASWELL(dev))
284
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
285
 
286
	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
287
		val |= EDP_PSR_TP1_TIME_2500us;
288
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
289
		val |= EDP_PSR_TP1_TIME_500us;
290
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
6084 serge 291
		val |= EDP_PSR_TP1_TIME_100us;
6937 serge 292
	else
293
		val |= EDP_PSR_TP1_TIME_0us;
294
 
295
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
296
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
297
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
298
		val |= EDP_PSR_TP2_TP3_TIME_500us;
299
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
300
		val |= EDP_PSR_TP2_TP3_TIME_100us;
301
	else
5354 serge 302
		val |= EDP_PSR_TP2_TP3_TIME_0us;
303
 
6937 serge 304
	if (intel_dp_source_supports_hbr2(intel_dp) &&
305
	    drm_dp_tps3_supported(intel_dp->dpcd))
306
		val |= EDP_PSR_TP1_TP3_SEL;
307
	else
308
		val |= EDP_PSR_TP1_TP2_SEL;
6084 serge 309
 
6937 serge 310
	I915_WRITE(EDP_PSR_CTL, val);
311
 
312
	if (!dev_priv->psr.psr2_support)
313
		return;
314
 
315
	/* FIXME: selective update is probably totally broken because it doesn't
316
	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
317
	 * good enough. */
318
	val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
319
 
320
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
321
		val |= EDP_PSR2_TP2_TIME_2500;
322
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
323
		val |= EDP_PSR2_TP2_TIME_500;
324
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
325
		val |= EDP_PSR2_TP2_TIME_100;
326
	else
327
		val |= EDP_PSR2_TP2_TIME_50;
328
 
329
	I915_WRITE(EDP_PSR2_CTL, val);
5354 serge 330
}
331
 
332
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
333
{
334
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
335
	struct drm_device *dev = dig_port->base.base.dev;
336
	struct drm_i915_private *dev_priv = dev->dev_private;
337
	struct drm_crtc *crtc = dig_port->base.base.crtc;
338
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
339
 
340
	lockdep_assert_held(&dev_priv->psr.lock);
341
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
342
	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
343
 
344
	dev_priv->psr.source_ok = false;
345
 
346
	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
347
		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
348
		return false;
349
	}
350
 
351
	if (!i915.enable_psr) {
352
		DRM_DEBUG_KMS("PSR disable by flag\n");
353
		return false;
354
	}
355
 
6084 serge 356
	if (IS_HASWELL(dev) &&
357
	    I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
358
		      S3D_ENABLE) {
5354 serge 359
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
360
		return false;
361
	}
362
 
6084 serge 363
	if (IS_HASWELL(dev) &&
364
	    intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5354 serge 365
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
366
		return false;
367
	}
368
 
6937 serge 369
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
370
	    ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
6084 serge 371
		DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
372
		return false;
373
	}
374
 
5354 serge 375
	dev_priv->psr.source_ok = true;
376
	return true;
377
}
378
 
6084 serge 379
static void intel_psr_activate(struct intel_dp *intel_dp)
5354 serge 380
{
381
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
382
	struct drm_device *dev = intel_dig_port->base.base.dev;
383
	struct drm_i915_private *dev_priv = dev->dev_private;
384
 
6937 serge 385
	WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
5354 serge 386
	WARN_ON(dev_priv->psr.active);
387
	lockdep_assert_held(&dev_priv->psr.lock);
388
 
389
	/* Enable/Re-enable PSR on the host */
6084 serge 390
	if (HAS_DDI(dev))
391
		/* On HSW+ after we enable PSR on source it will activate it
392
		 * as soon as it match configure idle_frame count. So
393
		 * we just actually enable it here on activation time.
394
		 */
395
		hsw_psr_enable_source(intel_dp);
396
	else
397
		vlv_psr_activate(intel_dp);
5354 serge 398
 
399
	dev_priv->psr.active = true;
400
}
401
 
402
/**
403
 * intel_psr_enable - Enable PSR
404
 * @intel_dp: Intel DP
405
 *
406
 * This function can only be called after the pipe is fully trained and enabled.
407
 */
408
void intel_psr_enable(struct intel_dp *intel_dp)
409
{
410
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
411
	struct drm_device *dev = intel_dig_port->base.base.dev;
412
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 413
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
5354 serge 414
 
415
	if (!HAS_PSR(dev)) {
416
		DRM_DEBUG_KMS("PSR not supported on this platform\n");
417
		return;
418
	}
419
 
420
	if (!is_edp_psr(intel_dp)) {
421
		DRM_DEBUG_KMS("PSR not supported by this panel\n");
422
		return;
423
	}
424
 
425
	mutex_lock(&dev_priv->psr.lock);
426
	if (dev_priv->psr.enabled) {
427
		DRM_DEBUG_KMS("PSR already in use\n");
428
		goto unlock;
429
	}
430
 
431
	if (!intel_psr_match_conditions(intel_dp))
432
		goto unlock;
433
 
434
	dev_priv->psr.busy_frontbuffer_bits = 0;
435
 
6084 serge 436
	if (HAS_DDI(dev)) {
437
		hsw_psr_setup_vsc(intel_dp);
5354 serge 438
 
6084 serge 439
		if (dev_priv->psr.psr2_support) {
440
			/* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
441
			if (crtc->config->pipe_src_w > 3200 ||
442
				crtc->config->pipe_src_h > 2000)
443
				dev_priv->psr.psr2_support = false;
444
			else
445
				skl_psr_setup_su_vsc(intel_dp);
446
		}
5354 serge 447
 
6937 serge 448
		/*
449
		 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
450
		 * Also mask LPSP to avoid dependency on other drivers that
451
		 * might block runtime_pm besides preventing other hw tracking
452
		 * issues now we can rely on frontbuffer tracking.
453
		 */
454
		I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
455
			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
5354 serge 456
 
6084 serge 457
		/* Enable PSR on the panel */
458
		hsw_psr_enable_sink(intel_dp);
459
 
460
		if (INTEL_INFO(dev)->gen >= 9)
461
			intel_psr_activate(intel_dp);
462
	} else {
463
		vlv_psr_setup_vsc(intel_dp);
464
 
465
		/* Enable PSR on the panel */
466
		vlv_psr_enable_sink(intel_dp);
467
 
468
		/* On HSW+ enable_source also means go to PSR entry/active
469
		 * state as soon as idle_frame achieved and here would be
470
		 * to soon. However on VLV enable_source just enable PSR
471
		 * but let it on inactive state. So we might do this prior
472
		 * to active transition, i.e. here.
473
		 */
474
		vlv_psr_enable_source(intel_dp);
475
	}
476
 
6937 serge 477
	/*
478
	 * FIXME: Activation should happen immediately since this function
479
	 * is just called after pipe is fully trained and enabled.
480
	 * However on every platform we face issues when first activation
481
	 * follows a modeset so quickly.
482
	 *     - On VLV/CHV we get bank screen on first activation
483
	 *     - On HSW/BDW we get a recoverable frozen screen until next
484
	 *       exit-activate sequence.
485
	 */
486
	if (INTEL_INFO(dev)->gen < 9)
487
		schedule_delayed_work(&dev_priv->psr.work,
488
				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
489
 
5354 serge 490
	dev_priv->psr.enabled = intel_dp;
491
unlock:
492
	mutex_unlock(&dev_priv->psr.lock);
493
}
494
 
6084 serge 495
static void vlv_psr_disable(struct intel_dp *intel_dp)
5354 serge 496
{
497
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
498
	struct drm_device *dev = intel_dig_port->base.base.dev;
499
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 500
	struct intel_crtc *intel_crtc =
501
		to_intel_crtc(intel_dig_port->base.base.crtc);
502
	uint32_t val;
5354 serge 503
 
6084 serge 504
	if (dev_priv->psr.active) {
505
		/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
506
		if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
507
			      VLV_EDP_PSR_IN_TRANS) == 0, 1))
508
			WARN(1, "PSR transition took longer than expected\n");
509
 
510
		val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
511
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
512
		val &= ~VLV_EDP_PSR_ENABLE;
513
		val &= ~VLV_EDP_PSR_MODE_MASK;
514
		I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
515
 
516
		dev_priv->psr.active = false;
517
	} else {
518
		WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
5354 serge 519
	}
6084 serge 520
}
5354 serge 521
 
6084 serge 522
static void hsw_psr_disable(struct intel_dp *intel_dp)
523
{
524
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
525
	struct drm_device *dev = intel_dig_port->base.base.dev;
526
	struct drm_i915_private *dev_priv = dev->dev_private;
527
 
5354 serge 528
	if (dev_priv->psr.active) {
6937 serge 529
		I915_WRITE(EDP_PSR_CTL,
530
			   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
5354 serge 531
 
532
		/* Wait till PSR is idle */
6937 serge 533
		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
5354 serge 534
			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
535
			DRM_ERROR("Timed out waiting for PSR Idle State\n");
536
 
537
		dev_priv->psr.active = false;
538
	} else {
6937 serge 539
		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
5354 serge 540
	}
6084 serge 541
}
5354 serge 542
 
6084 serge 543
/**
544
 * intel_psr_disable - Disable PSR
545
 * @intel_dp: Intel DP
546
 *
547
 * This function needs to be called before disabling pipe.
548
 */
549
void intel_psr_disable(struct intel_dp *intel_dp)
550
{
551
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
552
	struct drm_device *dev = intel_dig_port->base.base.dev;
553
	struct drm_i915_private *dev_priv = dev->dev_private;
554
 
555
	mutex_lock(&dev_priv->psr.lock);
556
	if (!dev_priv->psr.enabled) {
557
		mutex_unlock(&dev_priv->psr.lock);
558
		return;
559
	}
560
 
6937 serge 561
	/* Disable PSR on Source */
6084 serge 562
	if (HAS_DDI(dev))
563
		hsw_psr_disable(intel_dp);
564
	else
565
		vlv_psr_disable(intel_dp);
566
 
6937 serge 567
	/* Disable PSR on Sink */
568
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
569
 
5354 serge 570
	dev_priv->psr.enabled = NULL;
571
	mutex_unlock(&dev_priv->psr.lock);
572
 
573
	cancel_delayed_work_sync(&dev_priv->psr.work);
574
}
575
 
576
static void intel_psr_work(struct work_struct *work)
577
{
578
	struct drm_i915_private *dev_priv =
579
		container_of(work, typeof(*dev_priv), psr.work.work);
580
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
6084 serge 581
	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
582
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
5354 serge 583
 
584
	/* We have to make sure PSR is ready for re-enable
585
	 * otherwise it keeps disabled until next full enable/disable cycle.
586
	 * PSR might take some time to get fully disabled
587
	 * and be ready for re-enable.
588
	 */
6084 serge 589
	if (HAS_DDI(dev_priv->dev)) {
6937 serge 590
		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
6084 serge 591
			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
592
			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
593
			return;
594
		}
595
	} else {
596
		if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
597
			      VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
598
			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
599
			return;
600
		}
5354 serge 601
	}
602
	mutex_lock(&dev_priv->psr.lock);
603
	intel_dp = dev_priv->psr.enabled;
604
 
605
	if (!intel_dp)
606
		goto unlock;
607
 
608
	/*
609
	 * The delayed work can race with an invalidate hence we need to
610
	 * recheck. Since psr_flush first clears this and then reschedules we
611
	 * won't ever miss a flush when bailing out here.
612
	 */
613
	if (dev_priv->psr.busy_frontbuffer_bits)
614
		goto unlock;
615
 
6084 serge 616
	intel_psr_activate(intel_dp);
5354 serge 617
unlock:
618
	mutex_unlock(&dev_priv->psr.lock);
619
}
620
 
621
static void intel_psr_exit(struct drm_device *dev)
622
{
623
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 624
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
625
	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
626
	enum pipe pipe = to_intel_crtc(crtc)->pipe;
627
	u32 val;
5354 serge 628
 
6084 serge 629
	if (!dev_priv->psr.active)
630
		return;
5354 serge 631
 
6084 serge 632
	if (HAS_DDI(dev)) {
6937 serge 633
		val = I915_READ(EDP_PSR_CTL);
6084 serge 634
 
5354 serge 635
		WARN_ON(!(val & EDP_PSR_ENABLE));
636
 
6937 serge 637
		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
6084 serge 638
	} else {
639
		val = I915_READ(VLV_PSRCTL(pipe));
5354 serge 640
 
6084 serge 641
		/* Here we do the transition from PSR_state 3 to PSR_state 5
642
		 * directly once PSR State 4 that is active with single frame
643
		 * update can be skipped. PSR_state 5 that is PSR exit then
644
		 * Hardware is responsible to transition back to PSR_state 1
645
		 * that is PSR inactive. Same state after
646
		 * vlv_edp_psr_enable_source.
647
		 */
648
		val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
649
		I915_WRITE(VLV_PSRCTL(pipe), val);
650
 
651
		/* Send AUX wake up - Spec says after transitioning to PSR
652
		 * active we have to send AUX wake up by writing 01h in DPCD
653
		 * 600h of sink device.
654
		 * XXX: This might slow down the transition, but without this
655
		 * HW doesn't complete the transition to PSR_state 1 and we
656
		 * never get the screen updated.
657
		 */
658
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
659
				   DP_SET_POWER_D0);
5354 serge 660
	}
661
 
6084 serge 662
	dev_priv->psr.active = false;
5354 serge 663
}
664
 
665
/**
6084 serge 666
 * intel_psr_single_frame_update - Single Frame Update
667
 * @dev: DRM device
668
 * @frontbuffer_bits: frontbuffer plane tracking bits
669
 *
670
 * Some platforms support a single frame update feature that is used to
671
 * send and update only one frame on Remote Frame Buffer.
672
 * So far it is only implemented for Valleyview and Cherryview because
673
 * hardware requires this to be done before a page flip.
674
 */
675
void intel_psr_single_frame_update(struct drm_device *dev,
676
				   unsigned frontbuffer_bits)
677
{
678
	struct drm_i915_private *dev_priv = dev->dev_private;
679
	struct drm_crtc *crtc;
680
	enum pipe pipe;
681
	u32 val;
682
 
683
	/*
684
	 * Single frame update is already supported on BDW+ but it requires
685
	 * many W/A and it isn't really needed.
686
	 */
6937 serge 687
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
6084 serge 688
		return;
689
 
690
	mutex_lock(&dev_priv->psr.lock);
691
	if (!dev_priv->psr.enabled) {
692
		mutex_unlock(&dev_priv->psr.lock);
693
		return;
694
	}
695
 
696
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
697
	pipe = to_intel_crtc(crtc)->pipe;
698
 
699
	if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
700
		val = I915_READ(VLV_PSRCTL(pipe));
701
 
702
		/*
703
		 * We need to set this bit before writing registers for a flip.
704
		 * This bit will be self-clear when it gets to the PSR active state.
705
		 */
706
		I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
707
	}
708
	mutex_unlock(&dev_priv->psr.lock);
709
}
710
 
711
/**
5354 serge 712
 * intel_psr_invalidate - Invalidade PSR
713
 * @dev: DRM device
714
 * @frontbuffer_bits: frontbuffer plane tracking bits
715
 *
716
 * Since the hardware frontbuffer tracking has gaps we need to integrate
717
 * with the software frontbuffer tracking. This function gets called every
718
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
719
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
720
 *
721
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
722
 */
723
void intel_psr_invalidate(struct drm_device *dev,
6084 serge 724
			  unsigned frontbuffer_bits)
5354 serge 725
{
726
	struct drm_i915_private *dev_priv = dev->dev_private;
727
	struct drm_crtc *crtc;
728
	enum pipe pipe;
729
 
730
	mutex_lock(&dev_priv->psr.lock);
731
	if (!dev_priv->psr.enabled) {
732
		mutex_unlock(&dev_priv->psr.lock);
733
		return;
734
	}
735
 
736
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
737
	pipe = to_intel_crtc(crtc)->pipe;
738
 
739
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6084 serge 740
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
5354 serge 741
 
6084 serge 742
	if (frontbuffer_bits)
743
		intel_psr_exit(dev);
744
 
5354 serge 745
	mutex_unlock(&dev_priv->psr.lock);
746
}
747
 
748
/**
749
 * intel_psr_flush - Flush PSR
750
 * @dev: DRM device
751
 * @frontbuffer_bits: frontbuffer plane tracking bits
6084 serge 752
 * @origin: which operation caused the flush
5354 serge 753
 *
754
 * Since the hardware frontbuffer tracking has gaps we need to integrate
755
 * with the software frontbuffer tracking. This function gets called every
756
 * time frontbuffer rendering has completed and flushed out to memory. PSR
757
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
758
 *
759
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
760
 */
761
void intel_psr_flush(struct drm_device *dev,
6084 serge 762
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
5354 serge 763
{
764
	struct drm_i915_private *dev_priv = dev->dev_private;
765
	struct drm_crtc *crtc;
766
	enum pipe pipe;
767
 
768
	mutex_lock(&dev_priv->psr.lock);
769
	if (!dev_priv->psr.enabled) {
770
		mutex_unlock(&dev_priv->psr.lock);
771
		return;
772
	}
773
 
774
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
775
	pipe = to_intel_crtc(crtc)->pipe;
6084 serge 776
 
777
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5354 serge 778
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
779
 
6937 serge 780
	/* By definition flush = invalidate + flush */
6084 serge 781
		if (frontbuffer_bits)
782
			intel_psr_exit(dev);
5354 serge 783
 
6937 serge 784
//   if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
785
//       if (!work_busy(&dev_priv->psr.work.work))
786
//       schedule_delayed_work(&dev_priv->psr.work,
787
//                         msecs_to_jiffies(100));
5354 serge 788
	mutex_unlock(&dev_priv->psr.lock);
789
}
790
 
791
/**
792
 * intel_psr_init - Init basic PSR work and mutex.
793
 * @dev: DRM device
794
 *
795
 * This function is  called only once at driver load to initialize basic
796
 * PSR stuff.
797
 */
798
void intel_psr_init(struct drm_device *dev)
799
{
800
	struct drm_i915_private *dev_priv = dev->dev_private;
801
 
6937 serge 802
	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
803
		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
804
 
5354 serge 805
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
806
	mutex_init(&dev_priv->psr.lock);
807
}