Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6084 Rev 6937
Line 78... Line 78...
78
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
78
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79
	struct drm_device *dev = dig_port->base.base.dev;
79
	struct drm_device *dev = dig_port->base.base.dev;
80
	struct drm_i915_private *dev_priv = dev->dev_private;
80
	struct drm_i915_private *dev_priv = dev->dev_private;
81
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
81
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
82
	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83
	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
83
	i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
84
	uint32_t *data = (uint32_t *) vsc_psr;
84
	uint32_t *data = (uint32_t *) vsc_psr;
85
	unsigned int i;
85
	unsigned int i;
Line 86... Line 86...
86
 
86
 
87
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
87
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
Line 149... Line 149...
149
{
149
{
150
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
150
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
151
			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
151
			   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
152
}
152
}
Line -... Line 153...
-
 
153
 
-
 
154
static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
-
 
155
				       enum port port)
-
 
156
{
-
 
157
	if (INTEL_INFO(dev_priv)->gen >= 9)
-
 
158
		return DP_AUX_CH_CTL(port);
-
 
159
	else
-
 
160
		return EDP_PSR_AUX_CTL;
-
 
161
}
-
 
162
 
-
 
163
static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
-
 
164
					enum port port, int index)
-
 
165
{
-
 
166
	if (INTEL_INFO(dev_priv)->gen >= 9)
-
 
167
		return DP_AUX_CH_DATA(port, index);
-
 
168
	else
-
 
169
		return EDP_PSR_AUX_DATA(index);
-
 
170
}
153
 
171
 
154
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
172
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
155
{
173
{
156
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
174
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
157
	struct drm_device *dev = dig_port->base.base.dev;
175
	struct drm_device *dev = dig_port->base.base.dev;
158
	struct drm_i915_private *dev_priv = dev->dev_private;
176
	struct drm_i915_private *dev_priv = dev->dev_private;
159
	uint32_t aux_clock_divider;
177
	uint32_t aux_clock_divider;
160
	uint32_t aux_data_reg, aux_ctl_reg;
178
	i915_reg_t aux_ctl_reg;
161
	int precharge = 0x3;
179
	int precharge = 0x3;
162
	static const uint8_t aux_msg[] = {
180
	static const uint8_t aux_msg[] = {
163
		[0] = DP_AUX_NATIVE_WRITE << 4,
181
		[0] = DP_AUX_NATIVE_WRITE << 4,
164
		[1] = DP_SET_POWER >> 8,
182
		[1] = DP_SET_POWER >> 8,
165
		[2] = DP_SET_POWER & 0xff,
183
		[2] = DP_SET_POWER & 0xff,
166
		[3] = 1 - 1,
184
		[3] = 1 - 1,
167
		[4] = DP_SET_POWER_D0,
185
		[4] = DP_SET_POWER_D0,
-
 
186
	};
168
	};
187
	enum port port = dig_port->port;
Line 169... Line 188...
169
	int i;
188
	int i;
Line 170... Line 189...
170
 
189
 
Line 171... Line -...
171
	BUILD_BUG_ON(sizeof(aux_msg) > 20);
-
 
172
 
-
 
173
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
 
174
 
190
	BUILD_BUG_ON(sizeof(aux_msg) > 20);
175
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
191
 
176
			   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
192
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
177
 
193
 
178
	/* Enable AUX frame sync at sink */
194
	/* Enable AUX frame sync at sink */
Line 179... Line -...
179
	if (dev_priv->psr.aux_frame_sync)
-
 
180
		drm_dp_dpcd_writeb(&intel_dp->aux,
-
 
181
				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
195
	if (dev_priv->psr.aux_frame_sync)
182
				DP_AUX_FRAME_SYNC_ENABLE);
-
 
Line 183... Line 196...
183
 
196
		drm_dp_dpcd_writeb(&intel_dp->aux,
184
	aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
197
				DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
185
				DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
198
				DP_AUX_FRAME_SYNC_ENABLE);
186
	aux_ctl_reg = (INTEL_INFO(dev)->gen >= 9) ?
199
 
Line 187... Line 200...
187
				DPA_AUX_CH_CTL : EDP_PSR_AUX_CTL(dev);
200
	aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
188
 
201
 
Line 252... Line 265...
252
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
265
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
253
	struct drm_device *dev = dig_port->base.base.dev;
266
	struct drm_device *dev = dig_port->base.base.dev;
254
	struct drm_i915_private *dev_priv = dev->dev_private;
267
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 255... Line 268...
255
 
268
 
-
 
269
	uint32_t max_sleep_time = 0x1f;
256
	uint32_t max_sleep_time = 0x1f;
270
	/*
257
	/* Lately it was identified that depending on panel idle frame count
271
	 * Let's respect VBT in case VBT asks a higher idle_frame value.
258
	 * calculated at HW can be off by 1. So let's use what came
-
 
259
	 * from VBT + 1.
272
	 * Let's use 6 as the minimum to cover all known cases including
260
	 * There are also other cases where panel demands at least 4
273
	 * the off-by-one issue that HW has in some cases. Also there are
261
	 * but VBT is not being set. To cover these 2 cases lets use
274
	 * cases where sink should be able to train
262
	 * at least 5 when VBT isn't set to be on the safest side.
275
	 * with the 5 or 6 idle patterns.
263
	 */
276
	 */
-
 
277
	uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
 
278
	uint32_t val = EDP_PSR_ENABLE;
-
 
279
 
264
	uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ?
280
	val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
-
 
281
	val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
265
			       dev_priv->vbt.psr.idle_frames + 1 : 5;
282
 
266
	uint32_t val = 0x0;
283
	if (IS_HASWELL(dev))
267
	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
284
		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
268
 
285
 
-
 
286
	if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
269
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
287
		val |= EDP_PSR_TP1_TIME_2500us;
-
 
288
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
270
		/* It doesn't mean we shouldn't send TPS patters, so let's
289
		val |= EDP_PSR_TP1_TIME_500us;
271
		   send the minimal TP1 possible and skip TP2. */
290
	else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
-
 
291
		val |= EDP_PSR_TP1_TIME_100us;
-
 
292
	else
-
 
293
		val |= EDP_PSR_TP1_TIME_0us;
-
 
294
 
-
 
295
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-
 
296
		val |= EDP_PSR_TP2_TP3_TIME_2500us;
-
 
297
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
-
 
298
		val |= EDP_PSR_TP2_TP3_TIME_500us;
-
 
299
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
-
 
300
		val |= EDP_PSR_TP2_TP3_TIME_100us;
272
		val |= EDP_PSR_TP1_TIME_100us;
301
	else
273
		val |= EDP_PSR_TP2_TP3_TIME_0us;
-
 
274
		val |= EDP_PSR_SKIP_AUX_EXIT;
-
 
275
		/* Sink should be able to train with the 5 or 6 idle patterns */
-
 
276
		idle_frames += 4;
-
 
Line 277... Line 302...
277
	}
302
		val |= EDP_PSR_TP2_TP3_TIME_0us;
278
 
303
 
279
	I915_WRITE(EDP_PSR_CTL(dev), val |
304
	if (intel_dp_source_supports_hbr2(intel_dp) &&
-
 
305
	    drm_dp_tps3_supported(intel_dp->dpcd))
280
		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
306
		val |= EDP_PSR_TP1_TP3_SEL;
-
 
307
	else
281
		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
308
		val |= EDP_PSR_TP1_TP2_SEL;
282
		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
309
 
283
		   EDP_PSR_ENABLE);
310
	I915_WRITE(EDP_PSR_CTL, val);
-
 
311
 
-
 
312
	if (!dev_priv->psr.psr2_support)
-
 
313
		return;
-
 
314
 
-
 
315
	/* FIXME: selective update is probably totally broken because it doesn't
284
 
316
	 * mesh at all with our frontbuffer tracking. And the hw alone isn't
-
 
317
	 * good enough. */
-
 
318
	val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
-
 
319
 
-
 
320
	if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-
 
321
		val |= EDP_PSR2_TP2_TIME_2500;
-
 
322
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
285
	if (dev_priv->psr.psr2_support)
323
		val |= EDP_PSR2_TP2_TIME_500;
-
 
324
	else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
-
 
325
		val |= EDP_PSR2_TP2_TIME_100;
-
 
326
	else
-
 
327
		val |= EDP_PSR2_TP2_TIME_50;
286
		I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
328
 
Line 287... Line 329...
287
				EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
329
	I915_WRITE(EDP_PSR2_CTL, val);
288
}
330
}
289
 
331
 
Line 322... Line 364...
322
	    intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
364
	    intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
323
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
365
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
324
		return false;
366
		return false;
325
	}
367
	}
Line 326... Line 368...
326
 
368
 
327
	if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
369
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
328
				    (dig_port->port != PORT_A))) {
370
	    ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
329
		DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
371
		DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
330
		return false;
372
		return false;
Line 331... Line 373...
331
	}
373
	}
Line 338... Line 380...
338
{
380
{
339
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
381
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
340
	struct drm_device *dev = intel_dig_port->base.base.dev;
382
	struct drm_device *dev = intel_dig_port->base.base.dev;
341
	struct drm_i915_private *dev_priv = dev->dev_private;
383
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 342... Line 384...
342
 
384
 
343
	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
385
	WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
344
	WARN_ON(dev_priv->psr.active);
386
	WARN_ON(dev_priv->psr.active);
Line 345... Line 387...
345
	lockdep_assert_held(&dev_priv->psr.lock);
387
	lockdep_assert_held(&dev_priv->psr.lock);
346
 
388
 
Line 401... Line 443...
401
				dev_priv->psr.psr2_support = false;
443
				dev_priv->psr.psr2_support = false;
402
			else
444
			else
403
				skl_psr_setup_su_vsc(intel_dp);
445
				skl_psr_setup_su_vsc(intel_dp);
404
		}
446
		}
Line -... Line 447...
-
 
447
 
405
 
448
		/*
-
 
449
		 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
-
 
450
		 * Also mask LPSP to avoid dependency on other drivers that
-
 
451
		 * might block runtime_pm besides preventing other hw tracking
-
 
452
		 * issues now we can rely on frontbuffer tracking.
406
		/* Avoid continuous PSR exit by masking memup and hpd */
453
		 */
407
		I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
454
		I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
Line 408... Line 455...
408
			   EDP_PSR_DEBUG_MASK_HPD);
455
			   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
409
 
456
 
Line 410... Line 457...
410
		/* Enable PSR on the panel */
457
		/* Enable PSR on the panel */
Line 425... Line 472...
425
		 * to active transition, i.e. here.
472
		 * to active transition, i.e. here.
426
		 */
473
		 */
427
		vlv_psr_enable_source(intel_dp);
474
		vlv_psr_enable_source(intel_dp);
428
	}
475
	}
Line -... Line 476...
-
 
476
 
-
 
477
	/*
-
 
478
	 * FIXME: Activation should happen immediately since this function
-
 
479
	 * is just called after pipe is fully trained and enabled.
-
 
480
	 * However on every platform we face issues when first activation
-
 
481
	 * follows a modeset so quickly.
-
 
482
	 *     - On VLV/CHV we get bank screen on first activation
-
 
483
	 *     - On HSW/BDW we get a recoverable frozen screen until next
-
 
484
	 *       exit-activate sequence.
-
 
485
	 */
-
 
486
	if (INTEL_INFO(dev)->gen < 9)
-
 
487
		schedule_delayed_work(&dev_priv->psr.work,
-
 
488
				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
429
 
489
 
430
	dev_priv->psr.enabled = intel_dp;
490
	dev_priv->psr.enabled = intel_dp;
431
unlock:
491
unlock:
432
	mutex_unlock(&dev_priv->psr.lock);
492
	mutex_unlock(&dev_priv->psr.lock);
Line 464... Line 524...
464
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
524
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
465
	struct drm_device *dev = intel_dig_port->base.base.dev;
525
	struct drm_device *dev = intel_dig_port->base.base.dev;
466
	struct drm_i915_private *dev_priv = dev->dev_private;
526
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 467... Line 527...
467
 
527
 
468
	if (dev_priv->psr.active) {
528
	if (dev_priv->psr.active) {
469
		I915_WRITE(EDP_PSR_CTL(dev),
529
		I915_WRITE(EDP_PSR_CTL,
Line 470... Line 530...
470
			   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
530
			   I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
471
 
531
 
472
		/* Wait till PSR is idle */
532
		/* Wait till PSR is idle */
473
		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
533
		if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
Line 474... Line 534...
474
			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
534
			       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
475
			DRM_ERROR("Timed out waiting for PSR Idle State\n");
535
			DRM_ERROR("Timed out waiting for PSR Idle State\n");
476
 
536
 
477
		dev_priv->psr.active = false;
537
		dev_priv->psr.active = false;
478
	} else {
538
	} else {
Line 479... Line 539...
479
		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
539
		WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
480
	}
540
	}
Line 496... Line 556...
496
	if (!dev_priv->psr.enabled) {
556
	if (!dev_priv->psr.enabled) {
497
		mutex_unlock(&dev_priv->psr.lock);
557
		mutex_unlock(&dev_priv->psr.lock);
498
		return;
558
		return;
499
	}
559
	}
Line -... Line 560...
-
 
560
 
500
 
561
	/* Disable PSR on Source */
501
	if (HAS_DDI(dev))
562
	if (HAS_DDI(dev))
502
		hsw_psr_disable(intel_dp);
563
		hsw_psr_disable(intel_dp);
503
	else
564
	else
Line -... Line 565...
-
 
565
		vlv_psr_disable(intel_dp);
-
 
566
 
-
 
567
	/* Disable PSR on Sink */
504
		vlv_psr_disable(intel_dp);
568
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
505
 
569
 
Line 506... Line 570...
506
	dev_priv->psr.enabled = NULL;
570
	dev_priv->psr.enabled = NULL;
507
	mutex_unlock(&dev_priv->psr.lock);
571
	mutex_unlock(&dev_priv->psr.lock);
Line 521... Line 585...
521
	 * otherwise it keeps disabled until next full enable/disable cycle.
585
	 * otherwise it keeps disabled until next full enable/disable cycle.
522
	 * PSR might take some time to get fully disabled
586
	 * PSR might take some time to get fully disabled
523
	 * and be ready for re-enable.
587
	 * and be ready for re-enable.
524
	 */
588
	 */
525
	if (HAS_DDI(dev_priv->dev)) {
589
	if (HAS_DDI(dev_priv->dev)) {
526
		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
590
		if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
527
			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
591
			      EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
528
			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
592
			DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
529
			return;
593
			return;
530
		}
594
		}
531
	} else {
595
	} else {
Line 564... Line 628...
564
 
628
 
565
	if (!dev_priv->psr.active)
629
	if (!dev_priv->psr.active)
Line 566... Line 630...
566
		return;
630
		return;
567
 
631
 
Line 568... Line 632...
568
	if (HAS_DDI(dev)) {
632
	if (HAS_DDI(dev)) {
Line 569... Line 633...
569
		val = I915_READ(EDP_PSR_CTL(dev));
633
		val = I915_READ(EDP_PSR_CTL);
570
 
634
 
571
		WARN_ON(!(val & EDP_PSR_ENABLE));
635
		WARN_ON(!(val & EDP_PSR_ENABLE));
Line 572... Line 636...
572
 
636
 
573
		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
637
		I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
Line 618... Line 682...
618
 
682
 
619
	/*
683
	/*
620
	 * Single frame update is already supported on BDW+ but it requires
684
	 * Single frame update is already supported on BDW+ but it requires
621
	 * many W/A and it isn't really needed.
685
	 * many W/A and it isn't really needed.
622
	 */
686
	 */
623
	if (!IS_VALLEYVIEW(dev))
687
	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
Line 624... Line 688...
624
		return;
688
		return;
625
 
689
 
626
	mutex_lock(&dev_priv->psr.lock);
690
	mutex_lock(&dev_priv->psr.lock);
Line 698... Line 762...
698
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
762
		     unsigned frontbuffer_bits, enum fb_op_origin origin)
699
{
763
{
700
	struct drm_i915_private *dev_priv = dev->dev_private;
764
	struct drm_i915_private *dev_priv = dev->dev_private;
701
	struct drm_crtc *crtc;
765
	struct drm_crtc *crtc;
702
	enum pipe pipe;
766
	enum pipe pipe;
703
	int delay_ms = HAS_DDI(dev) ? 100 : 500;
-
 
Line 704... Line 767...
704
 
767
 
705
	mutex_lock(&dev_priv->psr.lock);
768
	mutex_lock(&dev_priv->psr.lock);
706
	if (!dev_priv->psr.enabled) {
769
	if (!dev_priv->psr.enabled) {
707
		mutex_unlock(&dev_priv->psr.lock);
770
		mutex_unlock(&dev_priv->psr.lock);
Line 712... Line 775...
712
	pipe = to_intel_crtc(crtc)->pipe;
775
	pipe = to_intel_crtc(crtc)->pipe;
Line 713... Line 776...
713
 
776
 
714
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
777
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
Line 715... Line -...
715
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
 
716
 
-
 
717
	if (HAS_DDI(dev)) {
778
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
718
		/*
-
 
719
		 * By definition every flush should mean invalidate + flush,
-
 
720
		 * however on core platforms let's minimize the
-
 
721
		 * disable/re-enable so we can avoid the invalidate when flip
-
 
722
		 * originated the flush.
-
 
723
		 */
-
 
724
		if (frontbuffer_bits && origin != ORIGIN_FLIP)
-
 
725
			intel_psr_exit(dev);
-
 
726
	} else {
-
 
727
		/*
-
 
728
		 * On Valleyview and Cherryview we don't use hardware tracking
-
 
729
		 * so any plane updates or cursor moves don't result in a PSR
-
 
730
		 * invalidating. Which means we need to manually fake this in
-
 
731
		 * software for all flushes.
779
 
732
		 */
780
	/* By definition flush = invalidate + flush */
733
		if (frontbuffer_bits)
-
 
Line 734... Line 781...
734
			intel_psr_exit(dev);
781
		if (frontbuffer_bits)
-
 
782
			intel_psr_exit(dev);
735
	}
783
 
736
 
784
//   if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
737
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
785
//       if (!work_busy(&dev_priv->psr.work.work))
738
		schedule_delayed_work(&dev_priv->psr.work,
786
//       schedule_delayed_work(&dev_priv->psr.work,
Line 739... Line 787...
739
				      msecs_to_jiffies(delay_ms));
787
//                         msecs_to_jiffies(100));
740
	mutex_unlock(&dev_priv->psr.lock);
788
	mutex_unlock(&dev_priv->psr.lock);
Line 749... Line 797...
749
 */
797
 */
750
void intel_psr_init(struct drm_device *dev)
798
void intel_psr_init(struct drm_device *dev)
751
{
799
{
752
	struct drm_i915_private *dev_priv = dev->dev_private;
800
	struct drm_i915_private *dev_priv = dev->dev_private;
Line -... Line 801...
-
 
801
 
-
 
802
	dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
-
 
803
		HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
753
 
804
 
754
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
805
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
755
	mutex_init(&dev_priv->psr.lock);
806
	mutex_init(&dev_priv->psr.lock);