Subversion Repositories Kolibri OS

Rev

Rev 6296 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6296 Rev 6937
Line 47... Line 47...
47
 * generic functions to the driver for grabbing and releasing references for
47
 * generic functions to the driver for grabbing and releasing references for
48
 * abstract power domains. It then maps those to the actual power wells
48
 * abstract power domains. It then maps those to the actual power wells
49
 * present for a given platform.
49
 * present for a given platform.
50
 */
50
 */
Line 51... Line -...
51
 
-
 
52
#define GEN9_ENABLE_DC5(dev) 0
-
 
53
#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
-
 
54
 
51
 
55
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
52
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
56
	for (i = 0;							\
53
	for (i = 0;							\
57
	     i < (power_domains)->power_well_count &&			\
54
	     i < (power_domains)->power_well_count &&			\
58
		 ((power_well) = &(power_domains)->power_wells[i]);	\
55
		 ((power_well) = &(power_domains)->power_wells[i]);	\
59
	     i++)							\
56
	     i++)							\
Line 60... Line 57...
60
		if ((power_well)->domains & (domain_mask))
57
		for_each_if ((power_well)->domains & (domain_mask))
61
 
58
 
62
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
59
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
63
	for (i = (power_domains)->power_well_count - 1;			 \
60
	for (i = (power_domains)->power_well_count - 1;			 \
64
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
61
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
Line 65... Line 62...
65
	     i--)							 \
62
	     i--)							 \
66
		if ((power_well)->domains & (domain_mask))
63
		for_each_if ((power_well)->domains & (domain_mask))
Line -... Line 64...
-
 
64
 
-
 
65
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
-
 
66
				    int power_well_id);
-
 
67
 
-
 
68
const char *
-
 
69
intel_display_power_domain_str(enum intel_display_power_domain domain)
-
 
70
{
-
 
71
	switch (domain) {
-
 
72
	case POWER_DOMAIN_PIPE_A:
-
 
73
		return "PIPE_A";
-
 
74
	case POWER_DOMAIN_PIPE_B:
-
 
75
		return "PIPE_B";
-
 
76
	case POWER_DOMAIN_PIPE_C:
-
 
77
		return "PIPE_C";
-
 
78
	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-
 
79
		return "PIPE_A_PANEL_FITTER";
-
 
80
	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-
 
81
		return "PIPE_B_PANEL_FITTER";
-
 
82
	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-
 
83
		return "PIPE_C_PANEL_FITTER";
-
 
84
	case POWER_DOMAIN_TRANSCODER_A:
-
 
85
		return "TRANSCODER_A";
-
 
86
	case POWER_DOMAIN_TRANSCODER_B:
-
 
87
		return "TRANSCODER_B";
-
 
88
	case POWER_DOMAIN_TRANSCODER_C:
-
 
89
		return "TRANSCODER_C";
-
 
90
	case POWER_DOMAIN_TRANSCODER_EDP:
-
 
91
		return "TRANSCODER_EDP";
-
 
92
	case POWER_DOMAIN_PORT_DDI_A_LANES:
-
 
93
		return "PORT_DDI_A_LANES";
-
 
94
	case POWER_DOMAIN_PORT_DDI_B_LANES:
-
 
95
		return "PORT_DDI_B_LANES";
-
 
96
	case POWER_DOMAIN_PORT_DDI_C_LANES:
-
 
97
		return "PORT_DDI_C_LANES";
-
 
98
	case POWER_DOMAIN_PORT_DDI_D_LANES:
-
 
99
		return "PORT_DDI_D_LANES";
-
 
100
	case POWER_DOMAIN_PORT_DDI_E_LANES:
-
 
101
		return "PORT_DDI_E_LANES";
-
 
102
	case POWER_DOMAIN_PORT_DSI:
-
 
103
		return "PORT_DSI";
-
 
104
	case POWER_DOMAIN_PORT_CRT:
-
 
105
		return "PORT_CRT";
-
 
106
	case POWER_DOMAIN_PORT_OTHER:
-
 
107
		return "PORT_OTHER";
-
 
108
	case POWER_DOMAIN_VGA:
-
 
109
		return "VGA";
-
 
110
	case POWER_DOMAIN_AUDIO:
-
 
111
		return "AUDIO";
-
 
112
	case POWER_DOMAIN_PLLS:
-
 
113
		return "PLLS";
-
 
114
	case POWER_DOMAIN_AUX_A:
-
 
115
		return "AUX_A";
-
 
116
	case POWER_DOMAIN_AUX_B:
-
 
117
		return "AUX_B";
-
 
118
	case POWER_DOMAIN_AUX_C:
-
 
119
		return "AUX_C";
-
 
120
	case POWER_DOMAIN_AUX_D:
-
 
121
		return "AUX_D";
-
 
122
	case POWER_DOMAIN_GMBUS:
-
 
123
		return "GMBUS";
-
 
124
	case POWER_DOMAIN_INIT:
-
 
125
		return "INIT";
-
 
126
	case POWER_DOMAIN_MODESET:
-
 
127
		return "MODESET";
-
 
128
	default:
-
 
129
		MISSING_CASE(domain);
67
 
130
		return "?";
68
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
131
	}
69
				    int power_well_id);
132
}
70
 
133
 
71
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
134
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
Line 242... Line 305...
242
		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
305
		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
Line 243... Line 306...
243
 
306
 
244
		gen8_irq_power_well_post_enable(dev_priv,
307
		gen8_irq_power_well_post_enable(dev_priv,
245
						1 << PIPE_C | 1 << PIPE_B);
308
						1 << PIPE_C | 1 << PIPE_B);
246
	}
-
 
247
 
-
 
248
	if (power_well->data == SKL_DISP_PW_1) {
-
 
249
		if (!dev_priv->power_domains.initializing)
-
 
250
			intel_prepare_ddi(dev);
-
 
251
		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
-
 
252
	}
309
	}
Line 253... Line 310...
253
}
310
}
254
 
311
 
255
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
312
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
Line 290... Line 347...
290
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
347
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
291
	BIT(POWER_DOMAIN_PIPE_C) |			\
348
	BIT(POWER_DOMAIN_PIPE_C) |			\
292
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
349
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
293
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
350
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
294
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
351
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
295
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
-
 
296
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
352
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
297
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
-
 
298
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
353
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
299
	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
-
 
300
	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
354
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
301
	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
355
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
302
	BIT(POWER_DOMAIN_AUX_B) |                       \
356
	BIT(POWER_DOMAIN_AUX_B) |                       \
303
	BIT(POWER_DOMAIN_AUX_C) |			\
357
	BIT(POWER_DOMAIN_AUX_C) |			\
304
	BIT(POWER_DOMAIN_AUX_D) |			\
358
	BIT(POWER_DOMAIN_AUX_D) |			\
305
	BIT(POWER_DOMAIN_AUDIO) |			\
359
	BIT(POWER_DOMAIN_AUDIO) |			\
306
	BIT(POWER_DOMAIN_VGA) |				\
360
	BIT(POWER_DOMAIN_VGA) |				\
307
	BIT(POWER_DOMAIN_INIT))
361
	BIT(POWER_DOMAIN_INIT))
308
#define SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
-
 
309
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
-
 
310
	BIT(POWER_DOMAIN_PLLS) |			\
-
 
311
	BIT(POWER_DOMAIN_PIPE_A) |			\
-
 
312
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-
 
313
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
-
 
314
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
-
 
315
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
-
 
316
	BIT(POWER_DOMAIN_AUX_A) |			\
-
 
317
	BIT(POWER_DOMAIN_INIT))
-
 
318
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
362
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
319
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
-
 
320
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
363
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
321
	BIT(POWER_DOMAIN_PORT_DDI_E_2_LANES) |		\
364
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
322
	BIT(POWER_DOMAIN_INIT))
365
	BIT(POWER_DOMAIN_INIT))
323
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
366
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
324
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
-
 
325
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
367
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
326
	BIT(POWER_DOMAIN_INIT))
368
	BIT(POWER_DOMAIN_INIT))
327
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
369
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
328
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
-
 
329
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
370
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
330
	BIT(POWER_DOMAIN_INIT))
371
	BIT(POWER_DOMAIN_INIT))
331
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
372
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
332
	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
-
 
333
	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
373
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
334
	BIT(POWER_DOMAIN_INIT))
374
	BIT(POWER_DOMAIN_INIT))
335
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS (		\
375
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
336
	SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |		\
376
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
-
 
377
	BIT(POWER_DOMAIN_MODESET) |			\
337
	BIT(POWER_DOMAIN_PLLS) |			\
378
	BIT(POWER_DOMAIN_AUX_A) |			\
338
	BIT(POWER_DOMAIN_INIT))
379
	BIT(POWER_DOMAIN_INIT))
339
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
380
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
340
	(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
381
	(POWER_DOMAIN_MASK & ~(				\
341
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
382
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
342
	SKL_DISPLAY_DDI_A_E_POWER_DOMAINS |		\
-
 
343
	SKL_DISPLAY_DDI_B_POWER_DOMAINS |		\
-
 
344
	SKL_DISPLAY_DDI_C_POWER_DOMAINS |		\
-
 
345
	SKL_DISPLAY_DDI_D_POWER_DOMAINS |		\
-
 
346
	SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) |		\
383
	SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |		\
347
	BIT(POWER_DOMAIN_INIT))
384
	BIT(POWER_DOMAIN_INIT))
Line 348... Line 385...
348
 
385
 
349
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
386
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
350
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
387
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
351
	BIT(POWER_DOMAIN_PIPE_B) |			\
388
	BIT(POWER_DOMAIN_PIPE_B) |			\
352
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
389
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
353
	BIT(POWER_DOMAIN_PIPE_C) |			\
390
	BIT(POWER_DOMAIN_PIPE_C) |			\
354
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
391
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
355
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
392
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
356
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
-
 
357
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
393
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
358
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
-
 
359
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
394
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
360
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
395
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
361
	BIT(POWER_DOMAIN_AUX_B) |			\
396
	BIT(POWER_DOMAIN_AUX_B) |			\
362
	BIT(POWER_DOMAIN_AUX_C) |			\
397
	BIT(POWER_DOMAIN_AUX_C) |			\
363
	BIT(POWER_DOMAIN_AUDIO) |			\
398
	BIT(POWER_DOMAIN_AUDIO) |			\
364
	BIT(POWER_DOMAIN_VGA) |				\
399
	BIT(POWER_DOMAIN_VGA) |				\
Line 367... Line 402...
367
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
402
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
368
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
403
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
369
	BIT(POWER_DOMAIN_PIPE_A) |			\
404
	BIT(POWER_DOMAIN_PIPE_A) |			\
370
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
405
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
371
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
406
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
372
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
-
 
373
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
407
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
374
	BIT(POWER_DOMAIN_AUX_A) |			\
408
	BIT(POWER_DOMAIN_AUX_A) |			\
375
	BIT(POWER_DOMAIN_PLLS) |			\
409
	BIT(POWER_DOMAIN_PLLS) |			\
376
	BIT(POWER_DOMAIN_INIT))
410
	BIT(POWER_DOMAIN_INIT))
-
 
411
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
-
 
412
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
-
 
413
	BIT(POWER_DOMAIN_MODESET) |			\
-
 
414
	BIT(POWER_DOMAIN_AUX_A) |			\
-
 
415
	BIT(POWER_DOMAIN_INIT))
377
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
416
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
378
	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
417
	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
379
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
418
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
380
	BIT(POWER_DOMAIN_INIT))
419
	BIT(POWER_DOMAIN_INIT))
Line 415... Line 454...
415
	  *  set disable sequence was followed.
454
	  *  set disable sequence was followed.
416
	  * 2] Check if display uninitialize sequence is initialized.
455
	  * 2] Check if display uninitialize sequence is initialized.
417
	  */
456
	  */
418
}
457
}
Line -... Line 458...
-
 
458
 
419
 
459
static void gen9_set_dc_state_debugmask_memory_up(
420
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
460
			struct drm_i915_private *dev_priv)
421
{
461
{
Line -... Line 462...
-
 
462
	uint32_t val;
422
	uint32_t val;
463
 
-
 
464
	/* The below bit doesn't need to be cleared ever afterwards */
-
 
465
	val = I915_READ(DC_STATE_DEBUG);
-
 
466
	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
-
 
467
		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
-
 
468
		I915_WRITE(DC_STATE_DEBUG, val);
-
 
469
		POSTING_READ(DC_STATE_DEBUG);
Line -... Line 470...
-
 
470
	}
-
 
471
}
-
 
472
 
-
 
473
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
-
 
474
				u32 state)
-
 
475
{
-
 
476
	int rewrites = 0;
423
 
477
	int rereads = 0;
-
 
478
	u32 v;
-
 
479
 
-
 
480
	I915_WRITE(DC_STATE_EN, state);
-
 
481
 
-
 
482
	/* It has been observed that disabling the dc6 state sometimes
-
 
483
	 * doesn't stick and dmc keeps returning old value. Make sure
-
 
484
	 * the write really sticks enough times and also force rewrite until
-
 
485
	 * we are confident that state is exactly what we want.
-
 
486
	 */
-
 
487
	do  {
-
 
488
		v = I915_READ(DC_STATE_EN);
-
 
489
 
-
 
490
		if (v != state) {
-
 
491
			I915_WRITE(DC_STATE_EN, state);
-
 
492
			rewrites++;
-
 
493
			rereads = 0;
Line 424... Line 494...
424
	assert_can_enable_dc9(dev_priv);
494
		} else if (rereads++ > 5) {
-
 
495
			break;
-
 
496
		}
-
 
497
 
425
 
498
	} while (rewrites < 100);
-
 
499
 
-
 
500
	if (v != state)
426
	DRM_DEBUG_KMS("Enabling DC9\n");
501
		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
-
 
502
			  state, v);
427
 
503
 
428
	val = I915_READ(DC_STATE_EN);
504
	/* Most of the times we need one retry, avoid spam */
Line 429... Line 505...
429
	val |= DC_STATE_EN_DC9;
505
	if (rewrites > 1)
430
	I915_WRITE(DC_STATE_EN, val);
506
		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
431
	POSTING_READ(DC_STATE_EN);
507
			      state, rewrites);
-
 
508
}
Line -... Line 509...
-
 
509
 
432
}
510
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
-
 
511
{
-
 
512
	uint32_t val;
-
 
513
	uint32_t mask;
Line 433... Line 514...
433
 
514
 
-
 
515
	mask = DC_STATE_EN_UPTO_DC5;
-
 
516
	if (IS_BROXTON(dev_priv))
-
 
517
		mask |= DC_STATE_EN_DC9;
-
 
518
	else
-
 
519
		mask |= DC_STATE_EN_UPTO_DC6;
-
 
520
 
-
 
521
	WARN_ON_ONCE(state & ~mask);
-
 
522
 
Line 434... Line 523...
434
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
523
	if (i915.enable_dc == 0)
-
 
524
		state = DC_STATE_DISABLE;
-
 
525
	else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
-
 
526
		state = DC_STATE_EN_UPTO_DC5;
-
 
527
 
-
 
528
	if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
-
 
529
		gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
 
530
 
-
 
531
	val = I915_READ(DC_STATE_EN);
435
{
532
	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
-
 
533
		      val & mask, state);
-
 
534
 
436
	uint32_t val;
535
	/* Check if DMC is ignoring our DC state requests */
-
 
536
	if ((val & mask) != dev_priv->csr.dc_state)
437
 
537
		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
438
	assert_can_disable_dc9(dev_priv);
538
			  dev_priv->csr.dc_state, val & mask);
Line 439... Line -...
439
 
-
 
440
	DRM_DEBUG_KMS("Disabling DC9\n");
539
 
441
 
540
	val &= ~mask;
442
	val = I915_READ(DC_STATE_EN);
541
	val |= state;
Line 443... Line -...
443
	val &= ~DC_STATE_EN_DC9;
-
 
444
	I915_WRITE(DC_STATE_EN, val);
542
 
445
	POSTING_READ(DC_STATE_EN);
-
 
446
}
-
 
447
 
-
 
-
 
543
	gen9_write_dc_state(dev_priv, val);
448
static void gen9_set_dc_state_debugmask_memory_up(
544
 
449
			struct drm_i915_private *dev_priv)
545
	dev_priv->csr.dc_state = val & mask;
-
 
546
}
-
 
547
 
-
 
548
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-
 
549
{
-
 
550
	assert_can_enable_dc9(dev_priv);
-
 
551
 
-
 
552
	DRM_DEBUG_KMS("Enabling DC9\n");
-
 
553
 
-
 
554
	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-
 
555
}
-
 
556
 
-
 
557
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-
 
558
{
-
 
559
	assert_can_disable_dc9(dev_priv);
-
 
560
 
-
 
561
	DRM_DEBUG_KMS("Disabling DC9\n");
450
{
562
 
Line 451... Line 563...
451
	uint32_t val;
563
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
452
 
564
}
453
	/* The below bit doesn't need to be cleared ever afterwards */
565
 
Line 469... Line 581...
469
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
581
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
470
	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
582
	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
Line 471... Line 583...
471
 
583
 
472
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
584
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
473
		  "DC5 already programmed to be enabled.\n");
585
		  "DC5 already programmed to be enabled.\n");
474
	WARN_ONCE(dev_priv->pm.suspended,
-
 
Line 475... Line 586...
475
		  "DC5 cannot be enabled, if platform is runtime-suspended.\n");
586
	assert_rpm_wakelock_held(dev_priv);
476
 
587
 
Line 477... Line 588...
477
	assert_csr_loaded(dev_priv);
588
	assert_csr_loaded(dev_priv);
478
}
589
}
479
 
-
 
480
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
-
 
481
{
590
 
482
	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
591
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
483
					SKL_DISP_PW_2);
592
{
484
	/*
593
	/*
485
	 * During initialization, the firmware may not be loaded yet.
594
	 * During initialization, the firmware may not be loaded yet.
486
	 * We still want to make sure that the DC enabling flag is cleared.
595
	 * We still want to make sure that the DC enabling flag is cleared.
Line 487... Line -...
487
	 */
-
 
488
	if (dev_priv->power_domains.initializing)
596
	 */
489
		return;
-
 
490
 
597
	if (dev_priv->power_domains.initializing)
Line 491... Line 598...
491
	WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
598
		return;
492
	WARN_ONCE(dev_priv->pm.suspended,
599
 
493
		"Disabling of DC5 while platform is runtime-suspended should never happen.\n");
-
 
494
}
-
 
495
 
600
	assert_rpm_wakelock_held(dev_priv);
Line 496... Line 601...
496
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
601
}
Line 497... Line 602...
497
{
602
 
498
	uint32_t val;
-
 
499
 
-
 
500
	assert_can_enable_dc5(dev_priv);
-
 
501
 
-
 
502
	DRM_DEBUG_KMS("Enabling DC5\n");
-
 
503
 
-
 
504
	gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
 
505
 
-
 
506
	val = I915_READ(DC_STATE_EN);
-
 
507
	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
-
 
508
	val |= DC_STATE_EN_UPTO_DC5;
-
 
509
	I915_WRITE(DC_STATE_EN, val);
-
 
510
	POSTING_READ(DC_STATE_EN);
-
 
511
}
-
 
512
 
-
 
513
static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
-
 
514
{
-
 
515
	uint32_t val;
-
 
516
 
-
 
517
	assert_can_disable_dc5(dev_priv);
-
 
518
 
603
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
Line 519... Line 604...
519
	DRM_DEBUG_KMS("Disabling DC5\n");
604
{
520
 
605
	assert_can_enable_dc5(dev_priv);
521
	val = I915_READ(DC_STATE_EN);
606
 
Line 545... Line 630...
545
	 * We still want to make sure that the DC enabling flag is cleared.
630
	 * We still want to make sure that the DC enabling flag is cleared.
546
	 */
631
	 */
547
	if (dev_priv->power_domains.initializing)
632
	if (dev_priv->power_domains.initializing)
548
		return;
633
		return;
Line 549... Line -...
549
 
-
 
550
	assert_csr_loaded(dev_priv);
634
 
551
	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
635
	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
552
		  "DC6 already programmed to be disabled.\n");
636
		  "DC6 already programmed to be disabled.\n");
Line 553... Line 637...
553
}
637
}
554
 
638
 
555
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
639
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
Line -... Line 640...
-
 
640
{
-
 
641
	assert_can_disable_dc5(dev_priv);
-
 
642
 
-
 
643
	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
-
 
644
		assert_can_disable_dc6(dev_priv);
-
 
645
 
-
 
646
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
 
647
}
556
{
648
 
Line 557... Line 649...
557
	uint32_t val;
649
void skl_enable_dc6(struct drm_i915_private *dev_priv)
Line 558... Line 650...
558
 
650
{
Line 559... Line -...
559
	assert_can_enable_dc6(dev_priv);
-
 
560
 
-
 
561
	DRM_DEBUG_KMS("Enabling DC6\n");
-
 
562
 
-
 
563
	gen9_set_dc_state_debugmask_memory_up(dev_priv);
-
 
564
 
651
	assert_can_enable_dc6(dev_priv);
Line 565... Line 652...
565
	val = I915_READ(DC_STATE_EN);
652
 
566
	val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
653
	DRM_DEBUG_KMS("Enabling DC6\n");
567
	val |= DC_STATE_EN_UPTO_DC6;
-
 
568
	I915_WRITE(DC_STATE_EN, val);
-
 
569
	POSTING_READ(DC_STATE_EN);
654
 
Line 570... Line 655...
570
}
655
	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
Line 571... Line -...
571
 
-
 
572
static void skl_disable_dc6(struct drm_i915_private *dev_priv)
-
 
573
{
656
 
574
	uint32_t val;
-
 
575
 
657
}
Line 576... Line 658...
576
	assert_can_disable_dc6(dev_priv);
658
 
577
 
659
void skl_disable_dc6(struct drm_i915_private *dev_priv)
578
	DRM_DEBUG_KMS("Disabling DC6\n");
660
{
Line 628... Line 710...
628
		if (!enable_requested) {
710
		if (!enable_requested) {
629
			WARN((tmp & state_mask) &&
711
			WARN((tmp & state_mask) &&
630
				!I915_READ(HSW_PWR_WELL_BIOS),
712
				!I915_READ(HSW_PWR_WELL_BIOS),
631
				"Invalid for power well status to be enabled, unless done by the BIOS, \
713
				"Invalid for power well status to be enabled, unless done by the BIOS, \
632
				when request is to disable!\n");
714
				when request is to disable!\n");
633
			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
-
 
634
				power_well->data == SKL_DISP_PW_2) {
715
			if (power_well->data == SKL_DISP_PW_2) {
635
				if (SKL_ENABLE_DC6(dev)) {
-
 
636
					skl_disable_dc6(dev_priv);
-
 
637
					/*
716
					/*
638
					 * DDI buffer programming unnecessary during driver-load/resume
717
				 * DDI buffer programming unnecessary during
-
 
718
				 * driver-load/resume as it's already done
639
					 * as it's already done during modeset initialization then.
719
				 * during modeset initialization then. It's
640
					 * It's also invalid here as encoder list is still uninitialized.
720
				 * also invalid here as encoder list is still
-
 
721
				 * uninitialized.
641
					 */
722
					 */
642
					if (!dev_priv->power_domains.initializing)
723
					if (!dev_priv->power_domains.initializing)
643
						intel_prepare_ddi(dev);
724
						intel_prepare_ddi(dev);
644
				} else {
-
 
645
					gen9_disable_dc5(dev_priv);
-
 
646
				}
-
 
647
			}
725
			}
648
			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
726
			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
649
		}
727
		}
Line 650... Line 728...
650
 
728
 
Line 656... Line 734...
656
					power_well->name);
734
					power_well->name);
657
			check_fuse_status = true;
735
			check_fuse_status = true;
658
		}
736
		}
659
	} else {
737
	} else {
660
		if (enable_requested) {
738
		if (enable_requested) {
661
			if (IS_SKYLAKE(dev) &&
-
 
662
				(power_well->data == SKL_DISP_PW_1) &&
-
 
663
				(intel_csr_load_status_get(dev_priv) == FW_LOADED))
-
 
664
				DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
-
 
665
			else {
-
 
666
				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
739
				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
667
				POSTING_READ(HSW_PWR_WELL_DRIVER);
740
				POSTING_READ(HSW_PWR_WELL_DRIVER);
668
				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
741
				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
669
			}
742
			}
670
 
-
 
671
			if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
-
 
672
				power_well->data == SKL_DISP_PW_2) {
-
 
673
				enum csr_state state;
-
 
674
				/* TODO: wait for a completion event or
-
 
675
				 * similar here instead of busy
-
 
676
				 * waiting using wait_for function.
-
 
677
				 */
-
 
678
				wait_for((state = intel_csr_load_status_get(dev_priv)) !=
-
 
679
						FW_UNINITIALIZED, 1000);
-
 
680
				if (state != FW_LOADED)
-
 
681
					DRM_DEBUG("CSR firmware not ready (%d)\n",
-
 
682
							state);
-
 
683
				else
-
 
684
					if (SKL_ENABLE_DC6(dev))
-
 
685
						skl_enable_dc6(dev_priv);
-
 
686
					else
-
 
687
						gen9_enable_dc5(dev_priv);
-
 
688
			}
-
 
689
		}
-
 
690
	}
743
		}
Line 691... Line 744...
691
 
744
 
692
	if (check_fuse_status) {
745
	if (check_fuse_status) {
693
		if (power_well->data == SKL_DISP_PW_1) {
746
		if (power_well->data == SKL_DISP_PW_1) {
Line 758... Line 811...
758
				struct i915_power_well *power_well)
811
				struct i915_power_well *power_well)
759
{
812
{
760
	skl_set_power_well(dev_priv, power_well, false);
813
	skl_set_power_well(dev_priv, power_well, false);
761
}
814
}
Line -... Line 815...
-
 
815
 
-
 
816
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
-
 
817
					   struct i915_power_well *power_well)
-
 
818
{
-
 
819
	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
-
 
820
}
-
 
821
 
-
 
822
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
-
 
823
					  struct i915_power_well *power_well)
-
 
824
{
-
 
825
	gen9_disable_dc5_dc6(dev_priv);
-
 
826
}
-
 
827
 
-
 
828
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
-
 
829
					   struct i915_power_well *power_well)
-
 
830
{
-
 
831
	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
-
 
832
		skl_enable_dc6(dev_priv);
-
 
833
	else
-
 
834
		gen9_enable_dc5(dev_priv);
-
 
835
}
-
 
836
 
-
 
837
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
-
 
838
					   struct i915_power_well *power_well)
-
 
839
{
-
 
840
	if (power_well->count > 0) {
-
 
841
		gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
 
842
	} else {
-
 
843
		if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
-
 
844
		    i915.enable_dc != 1)
-
 
845
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-
 
846
		else
-
 
847
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-
 
848
	}
-
 
849
}
762
 
850
 
763
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
851
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
764
					   struct i915_power_well *power_well)
852
					   struct i915_power_well *power_well)
765
{
853
{
Line 972... Line 1060...
972
 
1060
 
973
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1061
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
974
						 int power_well_id)
1062
						 int power_well_id)
975
{
1063
{
976
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
977
	struct i915_power_well *power_well;
1064
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
Line -... Line 1065...
-
 
1065
	int i;
-
 
1066
 
-
 
1067
	for (i = 0; i < power_domains->power_well_count; i++) {
978
	int i;
1068
	struct i915_power_well *power_well;
979
 
1069
 
980
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1070
		power_well = &power_domains->power_wells[i];
981
		if (power_well->data == power_well_id)
1071
		if (power_well->data == power_well_id)
Line 982... Line 1072...
982
			return power_well;
1072
			return power_well;
Line 1395... Line 1485...
1395
	vlv_display_power_well_deinit(dev_priv);
1485
	vlv_display_power_well_deinit(dev_priv);
Line 1396... Line 1486...
1396
 
1486
 
1397
	chv_set_pipe_power_well(dev_priv, power_well, false);
1487
	chv_set_pipe_power_well(dev_priv, power_well, false);
Line -... Line 1488...
-
 
1488
}
-
 
1489
 
-
 
1490
static void
-
 
1491
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
-
 
1492
				 enum intel_display_power_domain domain)
-
 
1493
{
-
 
1494
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
1495
	struct i915_power_well *power_well;
-
 
1496
	int i;
-
 
1497
 
-
 
1498
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
-
 
1499
		if (!power_well->count++)
-
 
1500
			intel_power_well_enable(dev_priv, power_well);
-
 
1501
	}
-
 
1502
 
-
 
1503
	power_domains->domain_use_count[domain]++;
1398
}
1504
}
1399
 
1505
 
1400
/**
1506
/**
1401
 * intel_display_power_get - grab a power domain reference
1507
 * intel_display_power_get - grab a power domain reference
1402
 * @dev_priv: i915 device instance
1508
 * @dev_priv: i915 device instance
Line 1410... Line 1516...
1410
 * call to intel_display_power_put() to release the reference again.
1516
 * call to intel_display_power_put() to release the reference again.
1411
 */
1517
 */
1412
void intel_display_power_get(struct drm_i915_private *dev_priv,
1518
void intel_display_power_get(struct drm_i915_private *dev_priv,
1413
			     enum intel_display_power_domain domain)
1519
			     enum intel_display_power_domain domain)
1414
{
1520
{
1415
	struct i915_power_domains *power_domains;
1521
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1416
	struct i915_power_well *power_well;
-
 
1417
	int i;
-
 
Line 1418... Line 1522...
1418
 
1522
 
Line 1419... Line -...
1419
	intel_runtime_pm_get(dev_priv);
-
 
1420
 
-
 
1421
	power_domains = &dev_priv->power_domains;
1523
	intel_runtime_pm_get(dev_priv);
Line 1422... Line 1524...
1422
 
1524
 
-
 
1525
	mutex_lock(&power_domains->lock);
1423
	mutex_lock(&power_domains->lock);
1526
 
1424
 
-
 
1425
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1527
	__intel_display_power_get_domain(dev_priv, domain);
Line -... Line 1528...
-
 
1528
 
-
 
1529
	mutex_unlock(&power_domains->lock);
-
 
1530
}
-
 
1531
 
-
 
1532
/**
-
 
1533
 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
-
 
1534
 * @dev_priv: i915 device instance
-
 
1535
 * @domain: power domain to reference
-
 
1536
 *
-
 
1537
 * This function grabs a power domain reference for @domain and ensures that the
-
 
1538
 * power domain and all its parents are powered up. Therefore users should only
-
 
1539
 * grab a reference to the innermost power domain they need.
-
 
1540
 *
-
 
1541
 * Any power domain reference obtained by this function must have a symmetric
-
 
1542
 * call to intel_display_power_put() to release the reference again.
1426
		if (!power_well->count++)
1543
 */
-
 
1544
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
-
 
1545
					enum intel_display_power_domain domain)
-
 
1546
{
-
 
1547
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
1548
	bool is_enabled;
-
 
1549
 
-
 
1550
	if (!intel_runtime_pm_get_if_in_use(dev_priv))
-
 
1551
		return false;
-
 
1552
 
-
 
1553
	mutex_lock(&power_domains->lock);
-
 
1554
 
-
 
1555
	if (__intel_display_power_is_enabled(dev_priv, domain)) {
-
 
1556
		__intel_display_power_get_domain(dev_priv, domain);
Line 1427... Line 1557...
1427
			intel_power_well_enable(dev_priv, power_well);
1557
		is_enabled = true;
-
 
1558
	} else {
-
 
1559
		is_enabled = false;
-
 
1560
	}
-
 
1561
 
-
 
1562
	mutex_unlock(&power_domains->lock);
1428
	}
1563
 
Line 1429... Line 1564...
1429
 
1564
	if (!is_enabled)
1430
	power_domains->domain_use_count[domain]++;
1565
		intel_runtime_pm_put(dev_priv);
1431
 
1566
 
Line 1450... Line 1585...
1450
 
1585
 
Line 1451... Line 1586...
1451
	power_domains = &dev_priv->power_domains;
1586
	power_domains = &dev_priv->power_domains;
Line 1452... Line 1587...
1452
 
1587
 
-
 
1588
	mutex_lock(&power_domains->lock);
-
 
1589
 
1453
	mutex_lock(&power_domains->lock);
1590
	WARN(!power_domains->domain_use_count[domain],
Line 1454... Line 1591...
1454
 
1591
	     "Use count on domain %s is already zero\n",
1455
	WARN_ON(!power_domains->domain_use_count[domain]);
1592
	     intel_display_power_domain_str(domain));
-
 
1593
	power_domains->domain_use_count[domain]--;
-
 
1594
 
Line 1456... Line 1595...
1456
	power_domains->domain_use_count[domain]--;
1595
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1457
 
1596
		WARN(!power_well->count,
1458
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
1597
		     "Use count on power well %s is already zero",
Line 1459... Line 1598...
1459
		WARN_ON(!power_well->count);
1598
		     power_well->name);
Line 1468... Line 1607...
1468
}
1607
}
Line 1469... Line 1608...
1469
 
1608
 
1470
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1609
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1471
	BIT(POWER_DOMAIN_PIPE_A) |			\
1610
	BIT(POWER_DOMAIN_PIPE_A) |			\
1472
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
-
 
1473
	BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) |		\
1611
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
1474
	BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) |		\
-
 
1475
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |		\
1612
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1476
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |		\
-
 
1477
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |		\
1613
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1478
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |		\
-
 
1479
	BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |		\
1614
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1480
	BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |		\
1615
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
1481
	BIT(POWER_DOMAIN_PORT_CRT) |			\
1616
	BIT(POWER_DOMAIN_PORT_CRT) |			\
1482
	BIT(POWER_DOMAIN_PLLS) |			\
1617
	BIT(POWER_DOMAIN_PLLS) |			\
1483
	BIT(POWER_DOMAIN_AUX_A) |			\
1618
	BIT(POWER_DOMAIN_AUX_A) |			\
1484
	BIT(POWER_DOMAIN_AUX_B) |			\
1619
	BIT(POWER_DOMAIN_AUX_B) |			\
Line 1499... Line 1634...
1499
 
1634
 
1500
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1635
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
Line 1501... Line 1636...
1501
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1636
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1502
 
-
 
1503
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1637
 
1504
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
-
 
1505
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1638
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
1506
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1639
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1507
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1640
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1508
	BIT(POWER_DOMAIN_PORT_CRT) |		\
1641
	BIT(POWER_DOMAIN_PORT_CRT) |		\
1509
	BIT(POWER_DOMAIN_AUX_B) |		\
1642
	BIT(POWER_DOMAIN_AUX_B) |		\
Line 1510... Line 1643...
1510
	BIT(POWER_DOMAIN_AUX_C) |		\
1643
	BIT(POWER_DOMAIN_AUX_C) |		\
1511
	BIT(POWER_DOMAIN_INIT))
-
 
1512
 
1644
	BIT(POWER_DOMAIN_INIT))
1513
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
1645
 
1514
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1646
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
Line 1515... Line 1647...
1515
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
1647
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1516
	BIT(POWER_DOMAIN_AUX_B) |		\
1648
	BIT(POWER_DOMAIN_AUX_B) |		\
1517
	BIT(POWER_DOMAIN_INIT))
1649
	BIT(POWER_DOMAIN_INIT))
1518
 
1650
 
Line 1519... Line 1651...
1519
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1651
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
1520
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
1521
	BIT(POWER_DOMAIN_AUX_B) |		\
1652
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1522
	BIT(POWER_DOMAIN_INIT))
1653
	BIT(POWER_DOMAIN_AUX_B) |		\
1523
 
1654
	BIT(POWER_DOMAIN_INIT))
Line 1524... Line 1655...
1524
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1655
 
1525
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1656
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
1526
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1657
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1527
	BIT(POWER_DOMAIN_AUX_C) |		\
1658
	BIT(POWER_DOMAIN_AUX_C) |		\
Line 1528... Line 1659...
1528
	BIT(POWER_DOMAIN_INIT))
1659
	BIT(POWER_DOMAIN_INIT))
1529
 
-
 
1530
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1660
 
1531
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
-
 
1532
	BIT(POWER_DOMAIN_AUX_C) |		\
1661
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
1533
	BIT(POWER_DOMAIN_INIT))
1662
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
1534
 
1663
	BIT(POWER_DOMAIN_AUX_C) |		\
1535
#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1664
	BIT(POWER_DOMAIN_INIT))
Line 1536... Line 1665...
1536
	BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) |	\
1665
 
1537
	BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) |	\
-
 
1538
	BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) |	\
1666
#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
1539
	BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) |	\
1667
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1540
	BIT(POWER_DOMAIN_AUX_B) |		\
1668
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
Line 1541... Line 1669...
1541
	BIT(POWER_DOMAIN_AUX_C) |		\
1669
	BIT(POWER_DOMAIN_AUX_B) |		\
1542
	BIT(POWER_DOMAIN_INIT))
1670
	BIT(POWER_DOMAIN_AUX_C) |		\
Line 1589... Line 1717...
1589
	.enable = skl_power_well_enable,
1717
	.enable = skl_power_well_enable,
1590
	.disable = skl_power_well_disable,
1718
	.disable = skl_power_well_disable,
1591
	.is_enabled = skl_power_well_enabled,
1719
	.is_enabled = skl_power_well_enabled,
1592
};
1720
};
Line -... Line 1721...
-
 
1721
 
-
 
1722
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
-
 
1723
	.sync_hw = gen9_dc_off_power_well_sync_hw,
-
 
1724
	.enable = gen9_dc_off_power_well_enable,
-
 
1725
	.disable = gen9_dc_off_power_well_disable,
-
 
1726
	.is_enabled = gen9_dc_off_power_well_enabled,
-
 
1727
};
1593
 
1728
 
1594
static struct i915_power_well hsw_power_wells[] = {
1729
static struct i915_power_well hsw_power_wells[] = {
1595
	{
1730
	{
1596
		.name = "always-on",
1731
		.name = "always-on",
1597
		.always_on = 1,
1732
		.always_on = 1,
Line 1644... Line 1779...
1644
	{
1779
	{
1645
		.name = "always-on",
1780
		.name = "always-on",
1646
		.always_on = 1,
1781
		.always_on = 1,
1647
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1782
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1648
		.ops = &i9xx_always_on_power_well_ops,
1783
		.ops = &i9xx_always_on_power_well_ops,
-
 
1784
		.data = PUNIT_POWER_WELL_ALWAYS_ON,
1649
	},
1785
	},
1650
	{
1786
	{
1651
		.name = "display",
1787
		.name = "display",
1652
		.domains = VLV_DISPLAY_POWER_DOMAINS,
1788
		.domains = VLV_DISPLAY_POWER_DOMAINS,
1653
		.data = PUNIT_POWER_WELL_DISP2D,
1789
		.data = PUNIT_POWER_WELL_DISP2D,
Line 1745... Line 1881...
1745
	{
1881
	{
1746
		.name = "always-on",
1882
		.name = "always-on",
1747
		.always_on = 1,
1883
		.always_on = 1,
1748
		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1884
		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1749
		.ops = &i9xx_always_on_power_well_ops,
1885
		.ops = &i9xx_always_on_power_well_ops,
-
 
1886
		.data = SKL_DISP_PW_ALWAYS_ON,
1750
	},
1887
	},
1751
	{
1888
	{
1752
		.name = "power well 1",
1889
		.name = "power well 1",
1753
		.domains = SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1890
		/* Handled by the DMC firmware */
-
 
1891
		.domains = 0,
1754
		.ops = &skl_power_well_ops,
1892
		.ops = &skl_power_well_ops,
1755
		.data = SKL_DISP_PW_1,
1893
		.data = SKL_DISP_PW_1,
1756
	},
1894
	},
1757
	{
1895
	{
1758
		.name = "MISC IO power well",
1896
		.name = "MISC IO power well",
1759
		.domains = SKL_DISPLAY_MISC_IO_POWER_DOMAINS,
1897
		/* Handled by the DMC firmware */
-
 
1898
		.domains = 0,
1760
		.ops = &skl_power_well_ops,
1899
		.ops = &skl_power_well_ops,
1761
		.data = SKL_DISP_PW_MISC_IO,
1900
		.data = SKL_DISP_PW_MISC_IO,
1762
	},
1901
	},
1763
	{
1902
	{
-
 
1903
		.name = "DC off",
-
 
1904
		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
-
 
1905
		.ops = &gen9_dc_off_power_well_ops,
-
 
1906
		.data = SKL_DISP_PW_DC_OFF,
-
 
1907
	},
-
 
1908
	{
1764
		.name = "power well 2",
1909
		.name = "power well 2",
1765
		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1910
		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1766
		.ops = &skl_power_well_ops,
1911
		.ops = &skl_power_well_ops,
1767
		.data = SKL_DISP_PW_2,
1912
		.data = SKL_DISP_PW_2,
1768
	},
1913
	},
Line 1790... Line 1935...
1790
		.ops = &skl_power_well_ops,
1935
		.ops = &skl_power_well_ops,
1791
		.data = SKL_DISP_PW_DDI_D,
1936
		.data = SKL_DISP_PW_DDI_D,
1792
	},
1937
	},
1793
};
1938
};
Line -... Line 1939...
-
 
1939
 
-
 
1940
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
-
 
1941
{
-
 
1942
	struct i915_power_well *well;
-
 
1943
 
-
 
1944
	if (!IS_SKYLAKE(dev_priv))
-
 
1945
		return;
-
 
1946
 
-
 
1947
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-
 
1948
	intel_power_well_enable(dev_priv, well);
-
 
1949
 
-
 
1950
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-
 
1951
	intel_power_well_enable(dev_priv, well);
-
 
1952
}
-
 
1953
 
-
 
1954
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
-
 
1955
{
-
 
1956
	struct i915_power_well *well;
-
 
1957
 
-
 
1958
	if (!IS_SKYLAKE(dev_priv))
-
 
1959
		return;
-
 
1960
 
-
 
1961
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-
 
1962
	intel_power_well_disable(dev_priv, well);
-
 
1963
 
-
 
1964
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
-
 
1965
	intel_power_well_disable(dev_priv, well);
-
 
1966
}
1794
 
1967
 
1795
static struct i915_power_well bxt_power_wells[] = {
1968
static struct i915_power_well bxt_power_wells[] = {
1796
	{
1969
	{
1797
		.name = "always-on",
1970
		.name = "always-on",
1798
		.always_on = 1,
1971
		.always_on = 1,
Line 1804... Line 1977...
1804
		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1977
		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1805
		.ops = &skl_power_well_ops,
1978
		.ops = &skl_power_well_ops,
1806
		.data = SKL_DISP_PW_1,
1979
		.data = SKL_DISP_PW_1,
1807
	},
1980
	},
1808
	{
1981
	{
-
 
1982
		.name = "DC off",
-
 
1983
		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
-
 
1984
		.ops = &gen9_dc_off_power_well_ops,
-
 
1985
		.data = SKL_DISP_PW_DC_OFF,
-
 
1986
	},
-
 
1987
	{
1809
		.name = "power well 2",
1988
		.name = "power well 2",
1810
		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1989
		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1811
		.ops = &skl_power_well_ops,
1990
		.ops = &skl_power_well_ops,
1812
		.data = SKL_DISP_PW_2,
1991
		.data = SKL_DISP_PW_2,
1813
	}
1992
	},
1814
};
1993
};
Line 1815... Line 1994...
1815
 
1994
 
1816
static int
1995
static int
1817
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1996
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1818
				   int disable_power_well)
1997
				   int disable_power_well)
1819
{
1998
{
1820
	if (disable_power_well >= 0)
1999
	if (disable_power_well >= 0)
Line 1821... Line 2000...
1821
		return !!disable_power_well;
2000
		return !!disable_power_well;
1822
 
2001
 
1823
	if (IS_SKYLAKE(dev_priv)) {
2002
	if (IS_BROXTON(dev_priv)) {
1824
		DRM_DEBUG_KMS("Disabling display power well support\n");
2003
		DRM_DEBUG_KMS("Disabling display power well support\n");
Line 1825... Line 2004...
1825
		return 0;
2004
		return 0;
Line 1857... Line 2036...
1857
	 */
2036
	 */
1858
	if (IS_HASWELL(dev_priv->dev)) {
2037
	if (IS_HASWELL(dev_priv->dev)) {
1859
		set_power_wells(power_domains, hsw_power_wells);
2038
		set_power_wells(power_domains, hsw_power_wells);
1860
	} else if (IS_BROADWELL(dev_priv->dev)) {
2039
	} else if (IS_BROADWELL(dev_priv->dev)) {
1861
		set_power_wells(power_domains, bdw_power_wells);
2040
		set_power_wells(power_domains, bdw_power_wells);
1862
	} else if (IS_SKYLAKE(dev_priv->dev)) {
2041
	} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
1863
		set_power_wells(power_domains, skl_power_wells);
2042
		set_power_wells(power_domains, skl_power_wells);
1864
	} else if (IS_BROXTON(dev_priv->dev)) {
2043
	} else if (IS_BROXTON(dev_priv->dev)) {
1865
		set_power_wells(power_domains, bxt_power_wells);
2044
		set_power_wells(power_domains, bxt_power_wells);
1866
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
2045
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
1867
		set_power_wells(power_domains, chv_power_wells);
2046
		set_power_wells(power_domains, chv_power_wells);
Line 1872... Line 2051...
1872
	}
2051
	}
Line 1873... Line 2052...
1873
 
2052
 
1874
	return 0;
2053
	return 0;
Line 1875... Line -...
1875
}
-
 
1876
 
-
 
1877
static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
-
 
1878
{
-
 
1879
	struct drm_device *dev = dev_priv->dev;
-
 
1880
	struct device *device = &dev->pdev->dev;
-
 
1881
 
-
 
1882
	if (!HAS_RUNTIME_PM(dev))
-
 
1883
		return;
-
 
1884
 
-
 
1885
	if (!intel_enable_rc6(dev))
-
 
1886
		return;
-
 
1887
 
-
 
1888
	/* Make sure we're not suspended first. */
-
 
1889
	pm_runtime_get_sync(device);
-
 
1890
}
2054
}
1891
 
2055
 
1892
/**
2056
/**
1893
 * intel_power_domains_fini - finalizes the power domain structures
2057
 * intel_power_domains_fini - finalizes the power domain structures
1894
 * @dev_priv: i915 device instance
2058
 * @dev_priv: i915 device instance
1895
 *
2059
 *
1896
 * Finalizes the power domain structures for @dev_priv depending upon the
2060
 * Finalizes the power domain structures for @dev_priv depending upon the
1897
 * supported platform. This function also disables runtime pm and ensures that
2061
 * supported platform. This function also disables runtime pm and ensures that
1898
 * the device stays powered up so that the driver can be reloaded.
2062
 * the device stays powered up so that the driver can be reloaded.
1899
 */
2063
 */
1900
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2064
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
Line -... Line 2065...
-
 
2065
{
1901
{
2066
	struct device *device = &dev_priv->dev->pdev->dev;
1902
	intel_runtime_pm_disable(dev_priv);
2067
 
1903
 
2068
	/*
-
 
2069
	 * The i915.ko module is still not prepared to be loaded when
-
 
2070
	 * the power well is not enabled, so just enable it in case
-
 
2071
	 * we're going to unload/reload.
-
 
2072
	 * The following also reacquires the RPM reference the core passed
-
 
2073
	 * to the driver during loading, which is dropped in
1904
	/* The i915.ko module is still not prepared to be loaded when
2074
	 * intel_runtime_pm_enable(). We have to hand back the control of the
-
 
2075
	 * device to the core with this reference held.
-
 
2076
	 */
-
 
2077
	intel_display_set_init_power(dev_priv, true);
-
 
2078
 
-
 
2079
	/* Remove the refcount we took to keep power well support disabled. */
-
 
2080
	if (!i915.disable_power_well)
-
 
2081
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
 
2082
 
-
 
2083
	/*
-
 
2084
	 * Remove the refcount we took in intel_runtime_pm_enable() in case
-
 
2085
	 * the platform doesn't support runtime PM.
1905
	 * the power well is not enabled, so just enable it in case
2086
	 */
Line 1906... Line 2087...
1906
	 * we're going to unload/reload. */
2087
	if (!HAS_RUNTIME_PM(dev_priv))
1907
	intel_display_set_init_power(dev_priv, true);
2088
		pm_runtime_put(device);
1908
}
2089
}
1909
 
2090
 
1910
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
2091
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
Line 1920... Line 2101...
1920
								     power_well);
2101
								     power_well);
1921
	}
2102
	}
1922
	mutex_unlock(&power_domains->lock);
2103
	mutex_unlock(&power_domains->lock);
1923
}
2104
}
Line -... Line 2105...
-
 
2105
 
-
 
2106
static void skl_display_core_init(struct drm_i915_private *dev_priv,
-
 
2107
				  bool resume)
-
 
2108
{
-
 
2109
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
2110
	uint32_t val;
-
 
2111
 
-
 
2112
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
 
2113
 
-
 
2114
	/* enable PCH reset handshake */
-
 
2115
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
-
 
2116
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
-
 
2117
 
-
 
2118
	/* enable PG1 and Misc I/O */
-
 
2119
	mutex_lock(&power_domains->lock);
-
 
2120
	skl_pw1_misc_io_init(dev_priv);
-
 
2121
	mutex_unlock(&power_domains->lock);
-
 
2122
 
-
 
2123
	if (!resume)
-
 
2124
		return;
-
 
2125
 
-
 
2126
	skl_init_cdclk(dev_priv);
-
 
2127
 
-
 
2128
	if (dev_priv->csr.dmc_payload)
-
 
2129
		intel_csr_load_program(dev_priv);
-
 
2130
}
-
 
2131
 
-
 
2132
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
-
 
2133
{
-
 
2134
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
 
2135
 
-
 
2136
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
 
2137
 
-
 
2138
	skl_uninit_cdclk(dev_priv);
-
 
2139
 
-
 
2140
	/* The spec doesn't call for removing the reset handshake flag */
-
 
2141
	/* disable PG1 and Misc I/O */
-
 
2142
	mutex_lock(&power_domains->lock);
-
 
2143
	skl_pw1_misc_io_fini(dev_priv);
-
 
2144
	mutex_unlock(&power_domains->lock);
-
 
2145
}
1924
 
2146
 
1925
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2147
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
1926
{
2148
{
1927
	struct i915_power_well *cmn_bc =
2149
	struct i915_power_well *cmn_bc =
1928
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2150
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
Line 2042... Line 2264...
2042
 * @dev_priv: i915 device instance
2264
 * @dev_priv: i915 device instance
2043
 *
2265
 *
2044
 * This function initializes the hardware power domain state and enables all
2266
 * This function initializes the hardware power domain state and enables all
2045
 * power domains using intel_display_set_init_power().
2267
 * power domains using intel_display_set_init_power().
2046
 */
2268
 */
2047
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2269
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2048
{
2270
{
2049
	struct drm_device *dev = dev_priv->dev;
2271
	struct drm_device *dev = dev_priv->dev;
2050
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2272
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
Line 2051... Line 2273...
2051
 
2273
 
Line -... Line 2274...
-
 
2274
	power_domains->initializing = true;
-
 
2275
 
2052
	power_domains->initializing = true;
2276
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2053
 
2277
		skl_display_core_init(dev_priv, resume);
2054
	if (IS_CHERRYVIEW(dev)) {
2278
	} else if (IS_CHERRYVIEW(dev)) {
2055
		mutex_lock(&power_domains->lock);
2279
		mutex_lock(&power_domains->lock);
2056
		chv_phy_control_init(dev_priv);
2280
		chv_phy_control_init(dev_priv);
2057
		mutex_unlock(&power_domains->lock);
2281
		mutex_unlock(&power_domains->lock);
Line 2061... Line 2285...
2061
		mutex_unlock(&power_domains->lock);
2285
		mutex_unlock(&power_domains->lock);
2062
	}
2286
	}
Line 2063... Line 2287...
2063
 
2287
 
2064
	/* For now, we need the power well to be always enabled. */
2288
	/* For now, we need the power well to be always enabled. */
-
 
2289
	intel_display_set_init_power(dev_priv, true);
-
 
2290
	/* Disable power support if the user asked so. */
-
 
2291
	if (!i915.disable_power_well)
2065
	intel_display_set_init_power(dev_priv, true);
2292
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2066
	intel_power_domains_resume(dev_priv);
2293
	intel_power_domains_sync_hw(dev_priv);
2067
	power_domains->initializing = false;
2294
	power_domains->initializing = false;
Line 2068... Line 2295...
2068
}
2295
}
-
 
2296
 
-
 
2297
/**
-
 
2298
 * intel_power_domains_suspend - suspend power domain state
-
 
2299
 * @dev_priv: i915 device instance
-
 
2300
 *
-
 
2301
 * This function prepares the hardware power domain state before entering
-
 
2302
 * system suspend. It must be paired with intel_power_domains_init_hw().
-
 
2303
 */
-
 
2304
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
-
 
2305
{
-
 
2306
	/*
-
 
2307
	 * Even if power well support was disabled we still want to disable
-
 
2308
	 * power wells while we are system suspended.
-
 
2309
	 */
-
 
2310
	if (!i915.disable_power_well)
-
 
2311
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
 
2312
 
-
 
2313
	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
-
 
2314
		skl_display_core_uninit(dev_priv);
-
 
2315
}
2069
 
2316
 
2070
/**
2317
/**
2071
 * intel_runtime_pm_get - grab a runtime pm reference
2318
 * intel_runtime_pm_get - grab a runtime pm reference
2072
 * @dev_priv: i915 device instance
2319
 * @dev_priv: i915 device instance
2073
 *
2320
 *
Line 2080... Line 2327...
2080
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2327
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2081
{
2328
{
2082
	struct drm_device *dev = dev_priv->dev;
2329
	struct drm_device *dev = dev_priv->dev;
2083
	struct device *device = &dev->pdev->dev;
2330
	struct device *device = &dev->pdev->dev;
Line 2084... Line -...
2084
 
-
 
2085
	if (!HAS_RUNTIME_PM(dev))
-
 
2086
		return;
-
 
2087
 
2331
 
-
 
2332
	pm_runtime_get_sync(device);
-
 
2333
 
-
 
2334
	atomic_inc(&dev_priv->pm.wakeref_count);
-
 
2335
	assert_rpm_wakelock_held(dev_priv);
-
 
2336
}
-
 
2337
 
-
 
2338
/**
-
 
2339
 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
-
 
2340
 * @dev_priv: i915 device instance
-
 
2341
 *
-
 
2342
 * This function grabs a device-level runtime pm reference if the device is
-
 
2343
 * already in use and ensures that it is powered up.
-
 
2344
 *
-
 
2345
 * Any runtime pm reference obtained by this function must have a symmetric
-
 
2346
 * call to intel_runtime_pm_put() to release the reference again.
-
 
2347
 */
-
 
2348
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
-
 
2349
{
-
 
2350
	struct drm_device *dev = dev_priv->dev;
-
 
2351
	struct device *device = &dev->pdev->dev;
-
 
2352
 
2088
	pm_runtime_get_sync(device);
2353
	if (IS_ENABLED(CONFIG_PM)) {
-
 
2354
		int ret = pm_runtime_get_if_in_use(device);
-
 
2355
 
-
 
2356
		/*
-
 
2357
		 * In cases runtime PM is disabled by the RPM core and we get
-
 
2358
		 * an -EINVAL return value we are not supposed to call this
-
 
2359
		 * function, since the power state is undefined. This applies
-
 
2360
		 * atm to the late/early system suspend/resume handlers.
-
 
2361
		 */
-
 
2362
		WARN_ON_ONCE(ret < 0);
-
 
2363
		if (ret <= 0)
-
 
2364
			return false;
-
 
2365
	}
-
 
2366
 
-
 
2367
	atomic_inc(&dev_priv->pm.wakeref_count);
-
 
2368
	assert_rpm_wakelock_held(dev_priv);
-
 
2369
 
2089
	WARN(dev_priv->pm.suspended, "Device still suspended.\n");
2370
	return true;
Line 2090... Line 2371...
2090
}
2371
}
2091
 
2372
 
2092
/**
2373
/**
Line 2109... Line 2390...
2109
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2390
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2110
{
2391
{
2111
	struct drm_device *dev = dev_priv->dev;
2392
	struct drm_device *dev = dev_priv->dev;
2112
	struct device *device = &dev->pdev->dev;
2393
	struct device *device = &dev->pdev->dev;
Line 2113... Line 2394...
2113
 
2394
 
2114
	if (!HAS_RUNTIME_PM(dev))
-
 
2115
		return;
-
 
2116
 
-
 
2117
	WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
2395
	assert_rpm_wakelock_held(dev_priv);
-
 
2396
	pm_runtime_get_noresume(device);
-
 
2397
 
2118
	pm_runtime_get_noresume(device);
2398
	atomic_inc(&dev_priv->pm.wakeref_count);
Line 2119... Line 2399...
2119
}
2399
}
2120
 
2400
 
2121
/**
2401
/**
Line 2129... Line 2409...
2129
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2409
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2130
{
2410
{
2131
	struct drm_device *dev = dev_priv->dev;
2411
	struct drm_device *dev = dev_priv->dev;
2132
	struct device *device = &dev->pdev->dev;
2412
	struct device *device = &dev->pdev->dev;
Line 2133... Line 2413...
2133
 
2413
 
-
 
2414
	assert_rpm_wakelock_held(dev_priv);
2134
	if (!HAS_RUNTIME_PM(dev))
2415
	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
Line 2135... Line 2416...
2135
		return;
2416
		atomic_inc(&dev_priv->pm.atomic_seq);
2136
 
2417
 
2137
	pm_runtime_mark_last_busy(device);
2418
	pm_runtime_mark_last_busy(device);
Line 2151... Line 2432...
2151
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2432
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2152
{
2433
{
2153
	struct drm_device *dev = dev_priv->dev;
2434
	struct drm_device *dev = dev_priv->dev;
2154
	struct device *device = &dev->pdev->dev;
2435
	struct device *device = &dev->pdev->dev;
Line 2155... Line 2436...
2155
 
2436
 
2156
	if (!HAS_RUNTIME_PM(dev))
2437
	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
Line 2157... Line 2438...
2157
		return;
2438
	pm_runtime_mark_last_busy(device);
-
 
2439
 
-
 
2440
	/*
2158
 
2441
	 * Take a permanent reference to disable the RPM functionality and drop
2159
	/*
2442
	 * it only when unloading the driver. Use the low level get/put helpers,
2160
	 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
2443
	 * so the driver's own RPM reference tracking asserts also work on
2161
	 * requirement.
2444
	 * platforms without RPM support.
2162
	 */
-
 
2163
	if (!intel_enable_rc6(dev)) {
-
 
2164
		DRM_INFO("RC6 disabled, disabling runtime PM support\n");
-
 
2165
		return;
-
 
2166
	}
2445
	 */
2167
 
2446
	if (!HAS_RUNTIME_PM(dev)) {
-
 
2447
		pm_runtime_dont_use_autosuspend(device);
2168
	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2448
		pm_runtime_get_sync(device);
-
 
2449
	} else {
Line -... Line 2450...
-
 
2450
		pm_runtime_use_autosuspend(device);
-
 
2451
	}
-
 
2452
 
-
 
2453
	/*
-
 
2454
	 * The core calls the driver load handler with an RPM reference held.
2169
	pm_runtime_mark_last_busy(device);
2455
	 * We drop that here and will reacquire it during unloading in
2170
	pm_runtime_use_autosuspend(device);
2456
	 * intel_power_domains_fini().