Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5354 serge 1
/*
2
 * Copyright © 2012-2014 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eugeni Dodonov 
25
 *    Daniel Vetter 
26
 *
27
 */
28
 
6084 serge 29
#include 
30
#include 
5354 serge 31
 
32
#include "i915_drv.h"
33
#include "intel_drv.h"
34
 
35
/**
36
 * DOC: runtime pm
37
 *
38
 * The i915 driver supports dynamic enabling and disabling of entire hardware
39
 * blocks at runtime. This is especially important on the display side where
40
 * software is supposed to control many power gates manually on recent hardware,
41
 * since on the GT side a lot of the power management is done by the hardware.
42
 * But even there some manual control at the device level is required.
43
 *
44
 * Since i915 supports a diverse set of platforms with a unified codebase and
45
 * hardware engineers just love to shuffle functionality around between power
46
 * domains there's a sizeable amount of indirection required. This file provides
47
 * generic functions to the driver for grabbing and releasing references for
48
 * abstract power domains. It then maps those to the actual power wells
49
 * present for a given platform.
50
 */
51
 
52
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
53
	for (i = 0;							\
54
	     i < (power_domains)->power_well_count &&			\
55
		 ((power_well) = &(power_domains)->power_wells[i]);	\
56
	     i++)							\
6937 serge 57
		for_each_if ((power_well)->domains & (domain_mask))
5354 serge 58
 
59
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60
	for (i = (power_domains)->power_well_count - 1;			 \
61
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62
	     i--)							 \
6937 serge 63
		for_each_if ((power_well)->domains & (domain_mask))
5354 serge 64
 
6084 serge 65
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66
				    int power_well_id);
67
 
6937 serge 68
const char *
69
intel_display_power_domain_str(enum intel_display_power_domain domain)
70
{
71
	switch (domain) {
72
	case POWER_DOMAIN_PIPE_A:
73
		return "PIPE_A";
74
	case POWER_DOMAIN_PIPE_B:
75
		return "PIPE_B";
76
	case POWER_DOMAIN_PIPE_C:
77
		return "PIPE_C";
78
	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
79
		return "PIPE_A_PANEL_FITTER";
80
	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
81
		return "PIPE_B_PANEL_FITTER";
82
	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
83
		return "PIPE_C_PANEL_FITTER";
84
	case POWER_DOMAIN_TRANSCODER_A:
85
		return "TRANSCODER_A";
86
	case POWER_DOMAIN_TRANSCODER_B:
87
		return "TRANSCODER_B";
88
	case POWER_DOMAIN_TRANSCODER_C:
89
		return "TRANSCODER_C";
90
	case POWER_DOMAIN_TRANSCODER_EDP:
91
		return "TRANSCODER_EDP";
92
	case POWER_DOMAIN_PORT_DDI_A_LANES:
93
		return "PORT_DDI_A_LANES";
94
	case POWER_DOMAIN_PORT_DDI_B_LANES:
95
		return "PORT_DDI_B_LANES";
96
	case POWER_DOMAIN_PORT_DDI_C_LANES:
97
		return "PORT_DDI_C_LANES";
98
	case POWER_DOMAIN_PORT_DDI_D_LANES:
99
		return "PORT_DDI_D_LANES";
100
	case POWER_DOMAIN_PORT_DDI_E_LANES:
101
		return "PORT_DDI_E_LANES";
102
	case POWER_DOMAIN_PORT_DSI:
103
		return "PORT_DSI";
104
	case POWER_DOMAIN_PORT_CRT:
105
		return "PORT_CRT";
106
	case POWER_DOMAIN_PORT_OTHER:
107
		return "PORT_OTHER";
108
	case POWER_DOMAIN_VGA:
109
		return "VGA";
110
	case POWER_DOMAIN_AUDIO:
111
		return "AUDIO";
112
	case POWER_DOMAIN_PLLS:
113
		return "PLLS";
114
	case POWER_DOMAIN_AUX_A:
115
		return "AUX_A";
116
	case POWER_DOMAIN_AUX_B:
117
		return "AUX_B";
118
	case POWER_DOMAIN_AUX_C:
119
		return "AUX_C";
120
	case POWER_DOMAIN_AUX_D:
121
		return "AUX_D";
122
	case POWER_DOMAIN_GMBUS:
123
		return "GMBUS";
124
	case POWER_DOMAIN_INIT:
125
		return "INIT";
126
	case POWER_DOMAIN_MODESET:
127
		return "MODESET";
128
	default:
129
		MISSING_CASE(domain);
130
		return "?";
131
	}
132
}
133
 
6084 serge 134
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
135
				    struct i915_power_well *power_well)
136
{
137
	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
138
	power_well->ops->enable(dev_priv, power_well);
139
	power_well->hw_enabled = true;
140
}
141
 
142
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
143
				     struct i915_power_well *power_well)
144
{
145
	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
146
	power_well->hw_enabled = false;
147
	power_well->ops->disable(dev_priv, power_well);
148
}
149
 
5354 serge 150
/*
151
 * We should only use the power well if we explicitly asked the hardware to
152
 * enable it, so check if it's enabled and also check if we've requested it to
153
 * be enabled.
154
 */
155
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
156
				   struct i915_power_well *power_well)
157
{
158
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
159
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
160
}
161
 
162
/**
163
 * __intel_display_power_is_enabled - unlocked check for a power domain
164
 * @dev_priv: i915 device instance
165
 * @domain: power domain to check
166
 *
167
 * This is the unlocked version of intel_display_power_is_enabled() and should
168
 * only be used from error capture and recovery code where deadlocks are
169
 * possible.
170
 *
171
 * Returns:
172
 * True when the power domain is enabled, false otherwise.
173
 */
174
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
175
				      enum intel_display_power_domain domain)
176
{
177
	struct i915_power_domains *power_domains;
178
	struct i915_power_well *power_well;
179
	bool is_enabled;
180
	int i;
181
 
182
	if (dev_priv->pm.suspended)
183
		return false;
184
 
185
	power_domains = &dev_priv->power_domains;
186
 
187
	is_enabled = true;
188
 
189
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
190
		if (power_well->always_on)
191
			continue;
192
 
193
		if (!power_well->hw_enabled) {
194
			is_enabled = false;
195
			break;
196
		}
197
	}
198
 
199
	return is_enabled;
200
}
201
 
202
/**
6084 serge 203
 * intel_display_power_is_enabled - check for a power domain
5354 serge 204
 * @dev_priv: i915 device instance
205
 * @domain: power domain to check
206
 *
207
 * This function can be used to check the hw power domain state. It is mostly
208
 * used in hardware state readout functions. Everywhere else code should rely
209
 * upon explicit power domain reference counting to ensure that the hardware
210
 * block is powered up before accessing it.
211
 *
212
 * Callers must hold the relevant modesetting locks to ensure that concurrent
213
 * threads can't disable the power well while the caller tries to read a few
214
 * registers.
215
 *
216
 * Returns:
217
 * True when the power domain is enabled, false otherwise.
218
 */
219
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
220
				    enum intel_display_power_domain domain)
221
{
222
	struct i915_power_domains *power_domains;
223
	bool ret;
224
 
225
	power_domains = &dev_priv->power_domains;
226
 
227
	mutex_lock(&power_domains->lock);
228
	ret = __intel_display_power_is_enabled(dev_priv, domain);
229
	mutex_unlock(&power_domains->lock);
230
 
231
	return ret;
232
}
233
 
234
/**
235
 * intel_display_set_init_power - set the initial power domain state
236
 * @dev_priv: i915 device instance
237
 * @enable: whether to enable or disable the initial power domain state
238
 *
239
 * For simplicity our driver load/unload and system suspend/resume code assumes
240
 * that all power domains are always enabled. This functions controls the state
241
 * of this little hack. While the initial power domain state is enabled runtime
242
 * pm is effectively disabled.
243
 */
244
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
245
				  bool enable)
246
{
247
	if (dev_priv->power_domains.init_power_on == enable)
248
		return;
249
 
250
	if (enable)
251
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
252
	else
253
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
254
 
255
	dev_priv->power_domains.init_power_on = enable;
256
}
257
 
258
/*
259
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
260
 * when not needed anymore. We have 4 registers that can request the power well
261
 * to be enabled, and it will only be disabled if none of the registers is
262
 * requesting it to be enabled.
263
 */
264
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
265
{
266
	struct drm_device *dev = dev_priv->dev;
267
 
268
	/*
269
	 * After we re-enable the power well, if we touch VGA register 0x3d5
270
	 * we'll get unclaimed register interrupts. This stops after we write
271
	 * anything to the VGA MSR register. The vgacon module uses this
272
	 * register all the time, so if we unbind our driver and, as a
273
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
274
	 * console_unlock(). So make here we touch the VGA MSR register, making
275
	 * sure vgacon can keep working normally without triggering interrupts
276
	 * and error messages.
277
	 */
278
	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
279
	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
280
	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
281
 
6084 serge 282
	if (IS_BROADWELL(dev))
283
		gen8_irq_power_well_post_enable(dev_priv,
284
						1 << PIPE_C | 1 << PIPE_B);
5354 serge 285
}
286
 
7144 serge 287
static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
288
{
289
	if (IS_BROADWELL(dev_priv))
290
		gen8_irq_power_well_pre_disable(dev_priv,
291
						1 << PIPE_C | 1 << PIPE_B);
292
}
293
 
6084 serge 294
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
295
				       struct i915_power_well *power_well)
296
{
297
	struct drm_device *dev = dev_priv->dev;
298
 
299
	/*
300
	 * After we re-enable the power well, if we touch VGA register 0x3d5
301
	 * we'll get unclaimed register interrupts. This stops after we write
302
	 * anything to the VGA MSR register. The vgacon module uses this
303
	 * register all the time, so if we unbind our driver and, as a
304
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
305
	 * console_unlock(). So make here we touch the VGA MSR register, making
306
	 * sure vgacon can keep working normally without triggering interrupts
307
	 * and error messages.
308
	 */
309
	if (power_well->data == SKL_DISP_PW_2) {
310
		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
311
		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
312
		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
313
 
314
		gen8_irq_power_well_post_enable(dev_priv,
315
						1 << PIPE_C | 1 << PIPE_B);
316
	}
317
}
318
 
7144 serge 319
static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
320
				       struct i915_power_well *power_well)
321
{
322
	if (power_well->data == SKL_DISP_PW_2)
323
		gen8_irq_power_well_pre_disable(dev_priv,
324
						1 << PIPE_C | 1 << PIPE_B);
325
}
326
 
5354 serge 327
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
328
			       struct i915_power_well *power_well, bool enable)
329
{
330
	bool is_enabled, enable_requested;
331
	uint32_t tmp;
332
 
333
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
334
	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
335
	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
336
 
337
	if (enable) {
338
		if (!enable_requested)
339
			I915_WRITE(HSW_PWR_WELL_DRIVER,
340
				   HSW_PWR_WELL_ENABLE_REQUEST);
341
 
342
		if (!is_enabled) {
343
			DRM_DEBUG_KMS("Enabling power well\n");
344
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
345
				      HSW_PWR_WELL_STATE_ENABLED), 20))
346
				DRM_ERROR("Timeout enabling power well\n");
347
			hsw_power_well_post_enable(dev_priv);
348
		}
349
 
350
	} else {
351
		if (enable_requested) {
7144 serge 352
			hsw_power_well_pre_disable(dev_priv);
5354 serge 353
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
354
			POSTING_READ(HSW_PWR_WELL_DRIVER);
355
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
356
		}
357
	}
358
}
359
 
6084 serge 360
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
361
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
362
	BIT(POWER_DOMAIN_PIPE_B) |			\
363
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
364
	BIT(POWER_DOMAIN_PIPE_C) |			\
365
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
366
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
367
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
6937 serge 368
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
369
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
370
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
371
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
6084 serge 372
	BIT(POWER_DOMAIN_AUX_B) |                       \
373
	BIT(POWER_DOMAIN_AUX_C) |			\
374
	BIT(POWER_DOMAIN_AUX_D) |			\
375
	BIT(POWER_DOMAIN_AUDIO) |			\
376
	BIT(POWER_DOMAIN_VGA) |				\
377
	BIT(POWER_DOMAIN_INIT))
378
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
6937 serge 379
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
380
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
6084 serge 381
	BIT(POWER_DOMAIN_INIT))
382
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
6937 serge 383
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
6084 serge 384
	BIT(POWER_DOMAIN_INIT))
385
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
6937 serge 386
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
6084 serge 387
	BIT(POWER_DOMAIN_INIT))
388
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
6937 serge 389
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
6084 serge 390
	BIT(POWER_DOMAIN_INIT))
6937 serge 391
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
392
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
393
	BIT(POWER_DOMAIN_MODESET) |			\
394
	BIT(POWER_DOMAIN_AUX_A) |			\
6084 serge 395
	BIT(POWER_DOMAIN_INIT))
396
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
6937 serge 397
	(POWER_DOMAIN_MASK & ~(				\
6084 serge 398
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
6937 serge 399
	SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |		\
6084 serge 400
	BIT(POWER_DOMAIN_INIT))
401
 
402
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
403
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
404
	BIT(POWER_DOMAIN_PIPE_B) |			\
405
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
406
	BIT(POWER_DOMAIN_PIPE_C) |			\
407
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
408
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
409
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
6937 serge 410
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
411
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
6084 serge 412
	BIT(POWER_DOMAIN_AUX_B) |			\
413
	BIT(POWER_DOMAIN_AUX_C) |			\
414
	BIT(POWER_DOMAIN_AUDIO) |			\
415
	BIT(POWER_DOMAIN_VGA) |				\
416
	BIT(POWER_DOMAIN_GMBUS) |			\
417
	BIT(POWER_DOMAIN_INIT))
418
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
419
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
420
	BIT(POWER_DOMAIN_PIPE_A) |			\
421
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
422
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
6937 serge 423
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
6084 serge 424
	BIT(POWER_DOMAIN_AUX_A) |			\
425
	BIT(POWER_DOMAIN_PLLS) |			\
426
	BIT(POWER_DOMAIN_INIT))
6937 serge 427
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
428
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
429
	BIT(POWER_DOMAIN_MODESET) |			\
430
	BIT(POWER_DOMAIN_AUX_A) |			\
431
	BIT(POWER_DOMAIN_INIT))
6084 serge 432
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
433
	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
434
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
435
	BIT(POWER_DOMAIN_INIT))
436
 
437
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
438
{
439
	struct drm_device *dev = dev_priv->dev;
440
 
441
	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
442
	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
443
		"DC9 already programmed to be enabled.\n");
444
	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
445
		"DC5 still not disabled to enable DC9.\n");
446
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
447
	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
448
 
449
	 /*
450
	  * TODO: check for the following to verify the conditions to enter DC9
451
	  * state are satisfied:
452
	  * 1] Check relevant display engine registers to verify if mode set
453
	  * disable sequence was followed.
454
	  * 2] Check if display uninitialize sequence is initialized.
455
	  */
456
}
457
 
458
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
459
{
460
	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
461
	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
462
		"DC9 already programmed to be disabled.\n");
463
	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
464
		"DC5 still not disabled.\n");
465
 
466
	 /*
467
	  * TODO: check for the following to verify DC9 state was indeed
468
	  * entered before programming to disable it:
469
	  * 1] Check relevant display engine registers to verify if mode
470
	  *  set disable sequence was followed.
471
	  * 2] Check if display uninitialize sequence is initialized.
472
	  */
473
}
474
 
7144 serge 475
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
6084 serge 476
{
7144 serge 477
	uint32_t val, mask;
6084 serge 478
 
7144 serge 479
	mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
480
 
481
	if (IS_BROXTON(dev_priv))
482
		mask |= DC_STATE_DEBUG_MASK_CORES;
483
 
6937 serge 484
	/* The below bit doesn't need to be cleared ever afterwards */
485
	val = I915_READ(DC_STATE_DEBUG);
7144 serge 486
	if ((val & mask) != mask) {
487
		val |= mask;
6937 serge 488
		I915_WRITE(DC_STATE_DEBUG, val);
489
		POSTING_READ(DC_STATE_DEBUG);
490
	}
491
}
492
 
493
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
494
				u32 state)
495
{
496
	int rewrites = 0;
497
	int rereads = 0;
498
	u32 v;
499
 
500
	I915_WRITE(DC_STATE_EN, state);
501
 
502
	/* It has been observed that disabling the dc6 state sometimes
503
	 * doesn't stick and dmc keeps returning old value. Make sure
504
	 * the write really sticks enough times and also force rewrite until
505
	 * we are confident that state is exactly what we want.
506
	 */
507
	do  {
508
		v = I915_READ(DC_STATE_EN);
509
 
510
		if (v != state) {
511
			I915_WRITE(DC_STATE_EN, state);
512
			rewrites++;
513
			rereads = 0;
514
		} else if (rereads++ > 5) {
515
			break;
516
		}
517
 
518
	} while (rewrites < 100);
519
 
520
	if (v != state)
521
		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
522
			  state, v);
523
 
524
	/* Most of the times we need one retry, avoid spam */
525
	if (rewrites > 1)
526
		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
527
			      state, rewrites);
528
}
529
 
530
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
531
{
532
	uint32_t val;
533
	uint32_t mask;
534
 
535
	mask = DC_STATE_EN_UPTO_DC5;
536
	if (IS_BROXTON(dev_priv))
537
		mask |= DC_STATE_EN_DC9;
538
	else
539
		mask |= DC_STATE_EN_UPTO_DC6;
540
 
541
	WARN_ON_ONCE(state & ~mask);
542
 
543
	if (i915.enable_dc == 0)
544
		state = DC_STATE_DISABLE;
545
	else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
546
		state = DC_STATE_EN_UPTO_DC5;
547
 
548
	val = I915_READ(DC_STATE_EN);
549
	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
550
		      val & mask, state);
551
 
552
	/* Check if DMC is ignoring our DC state requests */
553
	if ((val & mask) != dev_priv->csr.dc_state)
554
		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
555
			  dev_priv->csr.dc_state, val & mask);
556
 
557
	val &= ~mask;
558
	val |= state;
559
 
560
	gen9_write_dc_state(dev_priv, val);
561
 
562
	dev_priv->csr.dc_state = val & mask;
563
}
564
 
565
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
566
{
6084 serge 567
	assert_can_enable_dc9(dev_priv);
568
 
569
	DRM_DEBUG_KMS("Enabling DC9\n");
570
 
6937 serge 571
	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
6084 serge 572
}
573
 
574
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
575
{
576
	assert_can_disable_dc9(dev_priv);
577
 
578
	DRM_DEBUG_KMS("Disabling DC9\n");
579
 
6937 serge 580
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
6084 serge 581
}
582
 
6937 serge 583
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
6084 serge 584
{
6937 serge 585
	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
586
		  "CSR program storage start is NULL\n");
587
	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
588
	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
6084 serge 589
}
590
 
591
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
592
{
593
	struct drm_device *dev = dev_priv->dev;
594
	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
595
					SKL_DISP_PW_2);
596
 
7144 serge 597
	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
598
		  "Platform doesn't support DC5.\n");
6084 serge 599
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
600
	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
601
 
602
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
603
		  "DC5 already programmed to be enabled.\n");
6937 serge 604
	assert_rpm_wakelock_held(dev_priv);
6084 serge 605
 
606
	assert_csr_loaded(dev_priv);
607
}
608
 
609
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
610
{
611
	/*
612
	 * During initialization, the firmware may not be loaded yet.
613
	 * We still want to make sure that the DC enabling flag is cleared.
614
	 */
615
	if (dev_priv->power_domains.initializing)
616
		return;
617
 
6937 serge 618
	assert_rpm_wakelock_held(dev_priv);
6084 serge 619
}
620
 
621
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
622
{
623
	assert_can_enable_dc5(dev_priv);
624
 
625
	DRM_DEBUG_KMS("Enabling DC5\n");
626
 
6937 serge 627
	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
6084 serge 628
}
629
 
630
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
631
{
632
	struct drm_device *dev = dev_priv->dev;
633
 
7144 serge 634
	WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
635
		  "Platform doesn't support DC6.\n");
6084 serge 636
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
637
	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
638
		  "Backlight is not disabled.\n");
639
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
640
		  "DC6 already programmed to be enabled.\n");
641
 
642
	assert_csr_loaded(dev_priv);
643
}
644
 
645
static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
646
{
647
	/*
648
	 * During initialization, the firmware may not be loaded yet.
649
	 * We still want to make sure that the DC enabling flag is cleared.
650
	 */
651
	if (dev_priv->power_domains.initializing)
652
		return;
653
 
654
	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
655
		  "DC6 already programmed to be disabled.\n");
656
}
657
 
6937 serge 658
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
6084 serge 659
{
6937 serge 660
	assert_can_disable_dc5(dev_priv);
6084 serge 661
 
7144 serge 662
	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
663
	    i915.enable_dc != 0 && i915.enable_dc != 1)
6937 serge 664
		assert_can_disable_dc6(dev_priv);
665
 
666
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
667
}
668
 
669
void skl_enable_dc6(struct drm_i915_private *dev_priv)
670
{
6084 serge 671
	assert_can_enable_dc6(dev_priv);
672
 
673
	DRM_DEBUG_KMS("Enabling DC6\n");
674
 
6937 serge 675
	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
6084 serge 676
 
677
}
678
 
6937 serge 679
void skl_disable_dc6(struct drm_i915_private *dev_priv)
6084 serge 680
{
681
	assert_can_disable_dc6(dev_priv);
682
 
683
	DRM_DEBUG_KMS("Disabling DC6\n");
684
 
6937 serge 685
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
6084 serge 686
}
687
 
688
static void skl_set_power_well(struct drm_i915_private *dev_priv,
689
			struct i915_power_well *power_well, bool enable)
690
{
691
	uint32_t tmp, fuse_status;
692
	uint32_t req_mask, state_mask;
693
	bool is_enabled, enable_requested, check_fuse_status = false;
694
 
695
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
696
	fuse_status = I915_READ(SKL_FUSE_STATUS);
697
 
698
	switch (power_well->data) {
699
	case SKL_DISP_PW_1:
700
		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
701
			SKL_FUSE_PG0_DIST_STATUS), 1)) {
702
			DRM_ERROR("PG0 not enabled\n");
703
			return;
704
		}
705
		break;
706
	case SKL_DISP_PW_2:
707
		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
708
			DRM_ERROR("PG1 in disabled state\n");
709
			return;
710
		}
711
		break;
712
	case SKL_DISP_PW_DDI_A_E:
713
	case SKL_DISP_PW_DDI_B:
714
	case SKL_DISP_PW_DDI_C:
715
	case SKL_DISP_PW_DDI_D:
716
	case SKL_DISP_PW_MISC_IO:
717
		break;
718
	default:
719
		WARN(1, "Unknown power well %lu\n", power_well->data);
720
		return;
721
	}
722
 
723
	req_mask = SKL_POWER_WELL_REQ(power_well->data);
724
	enable_requested = tmp & req_mask;
725
	state_mask = SKL_POWER_WELL_STATE(power_well->data);
726
	is_enabled = tmp & state_mask;
727
 
7144 serge 728
	if (!enable && enable_requested)
729
		skl_power_well_pre_disable(dev_priv, power_well);
730
 
6084 serge 731
	if (enable) {
732
		if (!enable_requested) {
733
			WARN((tmp & state_mask) &&
734
				!I915_READ(HSW_PWR_WELL_BIOS),
735
				"Invalid for power well status to be enabled, unless done by the BIOS, \
736
				when request is to disable!\n");
737
			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
738
		}
739
 
740
		if (!is_enabled) {
741
			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
742
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
743
				state_mask), 1))
744
				DRM_ERROR("%s enable timeout\n",
745
					power_well->name);
746
			check_fuse_status = true;
747
		}
748
	} else {
749
		if (enable_requested) {
7144 serge 750
			I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
751
			POSTING_READ(HSW_PWR_WELL_DRIVER);
752
			DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
6084 serge 753
		}
7144 serge 754
	}
6084 serge 755
 
756
	if (check_fuse_status) {
757
		if (power_well->data == SKL_DISP_PW_1) {
758
			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
759
				SKL_FUSE_PG1_DIST_STATUS), 1))
760
				DRM_ERROR("PG1 distributing status timeout\n");
761
		} else if (power_well->data == SKL_DISP_PW_2) {
762
			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
763
				SKL_FUSE_PG2_DIST_STATUS), 1))
764
				DRM_ERROR("PG2 distributing status timeout\n");
765
		}
766
	}
767
 
768
	if (enable && !is_enabled)
769
		skl_power_well_post_enable(dev_priv, power_well);
770
}
771
 
5354 serge 772
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
773
				   struct i915_power_well *power_well)
774
{
775
	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
776
 
777
	/*
778
	 * We're taking over the BIOS, so clear any requests made by it since
779
	 * the driver is in charge now.
780
	 */
781
	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
782
		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
783
}
784
 
785
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
786
				  struct i915_power_well *power_well)
787
{
788
	hsw_set_power_well(dev_priv, power_well, true);
789
}
790
 
791
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
792
				   struct i915_power_well *power_well)
793
{
794
	hsw_set_power_well(dev_priv, power_well, false);
795
}
796
 
6084 serge 797
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
798
					struct i915_power_well *power_well)
799
{
800
	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
801
		SKL_POWER_WELL_STATE(power_well->data);
802
 
803
	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
804
}
805
 
806
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
807
				struct i915_power_well *power_well)
808
{
809
	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
810
 
811
	/* Clear any request made by BIOS as driver is taking over */
812
	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
813
}
814
 
815
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
816
				struct i915_power_well *power_well)
817
{
818
	skl_set_power_well(dev_priv, power_well, true);
819
}
820
 
821
static void skl_power_well_disable(struct drm_i915_private *dev_priv,
822
				struct i915_power_well *power_well)
823
{
824
	skl_set_power_well(dev_priv, power_well, false);
825
}
826
 
6937 serge 827
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
828
					   struct i915_power_well *power_well)
829
{
830
	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
831
}
832
 
833
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
834
					  struct i915_power_well *power_well)
835
{
836
	gen9_disable_dc5_dc6(dev_priv);
837
}
838
 
839
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
840
					   struct i915_power_well *power_well)
841
{
7144 serge 842
	if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
843
	    i915.enable_dc != 0 && i915.enable_dc != 1)
6937 serge 844
		skl_enable_dc6(dev_priv);
845
	else
846
		gen9_enable_dc5(dev_priv);
847
}
848
 
849
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
850
					   struct i915_power_well *power_well)
851
{
852
	if (power_well->count > 0) {
853
		gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
854
	} else {
7144 serge 855
		if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
856
		    i915.enable_dc != 0 &&
6937 serge 857
		    i915.enable_dc != 1)
858
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
859
		else
860
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
861
	}
862
}
863
 
5354 serge 864
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
865
					   struct i915_power_well *power_well)
866
{
867
}
868
 
869
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
870
					     struct i915_power_well *power_well)
871
{
872
	return true;
873
}
874
 
875
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
876
			       struct i915_power_well *power_well, bool enable)
877
{
878
	enum punit_power_well power_well_id = power_well->data;
879
	u32 mask;
880
	u32 state;
881
	u32 ctrl;
882
 
883
	mask = PUNIT_PWRGT_MASK(power_well_id);
884
	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
885
			 PUNIT_PWRGT_PWR_GATE(power_well_id);
886
 
887
	mutex_lock(&dev_priv->rps.hw_lock);
888
 
889
#define COND \
890
	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
891
 
892
	if (COND)
893
		goto out;
894
 
895
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
896
	ctrl &= ~mask;
897
	ctrl |= state;
898
	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
899
 
900
	if (wait_for(COND, 100))
6084 serge 901
		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
5354 serge 902
			  state,
903
			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
904
 
905
#undef COND
906
 
907
out:
908
	mutex_unlock(&dev_priv->rps.hw_lock);
909
}
910
 
911
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
912
				   struct i915_power_well *power_well)
913
{
914
	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
915
}
916
 
917
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
918
				  struct i915_power_well *power_well)
919
{
920
	vlv_set_power_well(dev_priv, power_well, true);
921
}
922
 
923
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
924
				   struct i915_power_well *power_well)
925
{
926
	vlv_set_power_well(dev_priv, power_well, false);
927
}
928
 
929
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
930
				   struct i915_power_well *power_well)
931
{
932
	int power_well_id = power_well->data;
933
	bool enabled = false;
934
	u32 mask;
935
	u32 state;
936
	u32 ctrl;
937
 
938
	mask = PUNIT_PWRGT_MASK(power_well_id);
939
	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
940
 
941
	mutex_lock(&dev_priv->rps.hw_lock);
942
 
943
	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
944
	/*
945
	 * We only ever set the power-on and power-gate states, anything
946
	 * else is unexpected.
947
	 */
948
	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
949
		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
950
	if (state == ctrl)
951
		enabled = true;
952
 
953
	/*
954
	 * A transient state at this point would mean some unexpected party
955
	 * is poking at the power controls too.
956
	 */
957
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
958
	WARN_ON(ctrl != state);
959
 
960
	mutex_unlock(&dev_priv->rps.hw_lock);
961
 
962
	return enabled;
963
}
964
 
6084 serge 965
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
5354 serge 966
{
6084 serge 967
	enum pipe pipe;
5354 serge 968
 
6084 serge 969
	/*
970
	 * Enable the CRI clock source so we can get at the
971
	 * display and the reference clock for VGA
972
	 * hotplug / manual detection. Supposedly DSI also
973
	 * needs the ref clock up and running.
974
	 *
975
	 * CHV DPLL B/C have some issues if VGA mode is enabled.
976
	 */
977
	for_each_pipe(dev_priv->dev, pipe) {
978
		u32 val = I915_READ(DPLL(pipe));
5354 serge 979
 
6084 serge 980
		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
981
		if (pipe != PIPE_A)
982
			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
983
 
984
		I915_WRITE(DPLL(pipe), val);
985
	}
986
 
5354 serge 987
	spin_lock_irq(&dev_priv->irq_lock);
988
	valleyview_enable_display_irqs(dev_priv);
989
	spin_unlock_irq(&dev_priv->irq_lock);
990
 
991
	/*
992
	 * During driver initialization/resume we can avoid restoring the
993
	 * part of the HW/SW state that will be inited anyway explicitly.
994
	 */
995
	if (dev_priv->power_domains.initializing)
996
		return;
997
 
6296 serge 998
	intel_hpd_init(dev_priv);
5354 serge 999
 
1000
	i915_redisable_vga_power_on(dev_priv->dev);
1001
}
1002
 
6084 serge 1003
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1004
{
1005
	spin_lock_irq(&dev_priv->irq_lock);
1006
	valleyview_disable_display_irqs(dev_priv);
1007
	spin_unlock_irq(&dev_priv->irq_lock);
1008
 
7144 serge 1009
	/* make sure we're done processing display irqs */
1010
	synchronize_irq(dev_priv->dev->irq);
1011
 
6084 serge 1012
	vlv_power_sequencer_reset(dev_priv);
1013
}
1014
 
1015
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1016
					  struct i915_power_well *power_well)
1017
{
1018
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
1019
 
1020
	vlv_set_power_well(dev_priv, power_well, true);
1021
 
1022
	vlv_display_power_well_init(dev_priv);
1023
}
1024
 
5354 serge 1025
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1026
					   struct i915_power_well *power_well)
1027
{
1028
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
1029
 
6084 serge 1030
	vlv_display_power_well_deinit(dev_priv);
5354 serge 1031
 
1032
	vlv_set_power_well(dev_priv, power_well, false);
1033
}
1034
 
1035
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1036
					   struct i915_power_well *power_well)
1037
{
1038
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1039
 
6084 serge 1040
	/* since ref/cri clock was enabled */
5354 serge 1041
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1042
 
1043
	vlv_set_power_well(dev_priv, power_well, true);
1044
 
1045
	/*
1046
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1047
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1048
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1049
	 *   b.	The other bits such as sfr settings / modesel may all
1050
	 *	be set to 0.
1051
	 *
1052
	 * This should only be done on init and resume from S3 with
1053
	 * both PLLs disabled, or we risk losing DPIO and PLL
1054
	 * synchronization.
1055
	 */
1056
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1057
}
1058
 
1059
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1060
					    struct i915_power_well *power_well)
1061
{
1062
	enum pipe pipe;
1063
 
1064
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1065
 
1066
	for_each_pipe(dev_priv, pipe)
1067
		assert_pll_disabled(dev_priv, pipe);
1068
 
1069
	/* Assert common reset */
1070
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1071
 
1072
	vlv_set_power_well(dev_priv, power_well, false);
1073
}
1074
 
6084 serge 1075
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1076
 
1077
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1078
						 int power_well_id)
1079
{
1080
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1081
	int i;
1082
 
6937 serge 1083
	for (i = 0; i < power_domains->power_well_count; i++) {
7144 serge 1084
		struct i915_power_well *power_well;
6937 serge 1085
 
1086
		power_well = &power_domains->power_wells[i];
6084 serge 1087
		if (power_well->data == power_well_id)
1088
			return power_well;
1089
	}
1090
 
1091
	return NULL;
1092
}
1093
 
1094
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1095
 
1096
static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1097
{
1098
	struct i915_power_well *cmn_bc =
1099
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1100
	struct i915_power_well *cmn_d =
1101
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1102
	u32 phy_control = dev_priv->chv_phy_control;
1103
	u32 phy_status = 0;
1104
	u32 phy_status_mask = 0xffffffff;
1105
	u32 tmp;
1106
 
1107
	/*
1108
	 * The BIOS can leave the PHY is some weird state
1109
	 * where it doesn't fully power down some parts.
1110
	 * Disable the asserts until the PHY has been fully
1111
	 * reset (ie. the power well has been disabled at
1112
	 * least once).
1113
	 */
1114
	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1115
		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1116
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1117
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1118
				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1119
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1120
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1121
 
1122
	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1123
		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1124
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1125
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1126
 
1127
	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1128
		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1129
 
1130
		/* this assumes override is only used to enable lanes */
1131
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1132
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1133
 
1134
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1135
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1136
 
1137
		/* CL1 is on whenever anything is on in either channel */
1138
		if (BITS_SET(phy_control,
1139
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1140
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1141
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1142
 
1143
		/*
1144
		 * The DPLLB check accounts for the pipe B + port A usage
1145
		 * with CL2 powered up but all the lanes in the second channel
1146
		 * powered down.
1147
		 */
1148
		if (BITS_SET(phy_control,
1149
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1150
		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1151
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1152
 
1153
		if (BITS_SET(phy_control,
1154
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1155
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1156
		if (BITS_SET(phy_control,
1157
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1158
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1159
 
1160
		if (BITS_SET(phy_control,
1161
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1162
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1163
		if (BITS_SET(phy_control,
1164
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1165
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1166
	}
1167
 
1168
	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1169
		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1170
 
1171
		/* this assumes override is only used to enable lanes */
1172
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1173
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1174
 
1175
		if (BITS_SET(phy_control,
1176
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1177
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1178
 
1179
		if (BITS_SET(phy_control,
1180
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1181
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1182
		if (BITS_SET(phy_control,
1183
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1184
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1185
	}
1186
 
1187
	phy_status &= phy_status_mask;
1188
 
1189
	/*
1190
	 * The PHY may be busy with some initial calibration and whatnot,
1191
	 * so the power state can take a while to actually change.
1192
	 */
1193
	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1194
		WARN(phy_status != tmp,
1195
		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1196
		     tmp, phy_status, dev_priv->chv_phy_control);
1197
}
1198
 
1199
#undef BITS_SET
1200
 
5354 serge 1201
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1202
					   struct i915_power_well *power_well)
1203
{
1204
	enum dpio_phy phy;
6084 serge 1205
	enum pipe pipe;
1206
	uint32_t tmp;
5354 serge 1207
 
1208
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1209
		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1210
 
1211
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6084 serge 1212
		pipe = PIPE_A;
5354 serge 1213
		phy = DPIO_PHY0;
1214
	} else {
6084 serge 1215
		pipe = PIPE_C;
5354 serge 1216
		phy = DPIO_PHY1;
1217
	}
6084 serge 1218
 
1219
	/* since ref/cri clock was enabled */
5354 serge 1220
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1221
	vlv_set_power_well(dev_priv, power_well, true);
1222
 
1223
	/* Poll for phypwrgood signal */
1224
	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1225
		DRM_ERROR("Display PHY %d is not power up\n", phy);
1226
 
6084 serge 1227
	mutex_lock(&dev_priv->sb_lock);
1228
 
1229
	/* Enable dynamic power down */
1230
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1231
	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1232
		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1233
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1234
 
1235
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1236
		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1237
		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1238
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1239
	} else {
1240
		/*
1241
		 * Force the non-existing CL2 off. BXT does this
1242
		 * too, so maybe it saves some power even though
1243
		 * CL2 doesn't exist?
1244
		 */
1245
		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1246
		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1247
		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1248
	}
1249
 
1250
	mutex_unlock(&dev_priv->sb_lock);
1251
 
1252
	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1253
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1254
 
1255
	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1256
		      phy, dev_priv->chv_phy_control);
1257
 
1258
	assert_chv_phy_status(dev_priv);
5354 serge 1259
}
1260
 
1261
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1262
					    struct i915_power_well *power_well)
1263
{
1264
	enum dpio_phy phy;
1265
 
1266
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1267
		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1268
 
1269
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1270
		phy = DPIO_PHY0;
1271
		assert_pll_disabled(dev_priv, PIPE_A);
1272
		assert_pll_disabled(dev_priv, PIPE_B);
1273
	} else {
1274
		phy = DPIO_PHY1;
1275
		assert_pll_disabled(dev_priv, PIPE_C);
1276
	}
1277
 
6084 serge 1278
	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1279
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
5354 serge 1280
 
1281
	vlv_set_power_well(dev_priv, power_well, false);
6084 serge 1282
 
1283
	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1284
		      phy, dev_priv->chv_phy_control);
1285
 
1286
	/* PHY is fully reset now, so we can enable the PHY state asserts */
1287
	dev_priv->chv_phy_assert[phy] = true;
1288
 
1289
	assert_chv_phy_status(dev_priv);
5354 serge 1290
}
1291
 
6084 serge 1292
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1293
				     enum dpio_channel ch, bool override, unsigned int mask)
1294
{
1295
	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1296
	u32 reg, val, expected, actual;
1297
 
1298
	/*
1299
	 * The BIOS can leave the PHY is some weird state
1300
	 * where it doesn't fully power down some parts.
1301
	 * Disable the asserts until the PHY has been fully
1302
	 * reset (ie. the power well has been disabled at
1303
	 * least once).
1304
	 */
1305
	if (!dev_priv->chv_phy_assert[phy])
1306
		return;
1307
 
1308
	if (ch == DPIO_CH0)
1309
		reg = _CHV_CMN_DW0_CH0;
1310
	else
1311
		reg = _CHV_CMN_DW6_CH1;
1312
 
1313
	mutex_lock(&dev_priv->sb_lock);
1314
	val = vlv_dpio_read(dev_priv, pipe, reg);
1315
	mutex_unlock(&dev_priv->sb_lock);
1316
 
1317
	/*
1318
	 * This assumes !override is only used when the port is disabled.
1319
	 * All lanes should power down even without the override when
1320
	 * the port is disabled.
1321
	 */
1322
	if (!override || mask == 0xf) {
1323
		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1324
		/*
1325
		 * If CH1 common lane is not active anymore
1326
		 * (eg. for pipe B DPLL) the entire channel will
1327
		 * shut down, which causes the common lane registers
1328
		 * to read as 0. That means we can't actually check
1329
		 * the lane power down status bits, but as the entire
1330
		 * register reads as 0 it's a good indication that the
1331
		 * channel is indeed entirely powered down.
1332
		 */
1333
		if (ch == DPIO_CH1 && val == 0)
1334
			expected = 0;
1335
	} else if (mask != 0x0) {
1336
		expected = DPIO_ANYDL_POWERDOWN;
1337
	} else {
1338
		expected = 0;
1339
	}
1340
 
1341
	if (ch == DPIO_CH0)
1342
		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1343
	else
1344
		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1345
	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1346
 
1347
	WARN(actual != expected,
1348
	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1349
	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1350
	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1351
	     reg, val);
1352
}
1353
 
1354
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1355
			  enum dpio_channel ch, bool override)
1356
{
1357
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1358
	bool was_override;
1359
 
1360
	mutex_lock(&power_domains->lock);
1361
 
1362
	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1363
 
1364
	if (override == was_override)
1365
		goto out;
1366
 
1367
	if (override)
1368
		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1369
	else
1370
		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1371
 
1372
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1373
 
1374
	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1375
		      phy, ch, dev_priv->chv_phy_control);
1376
 
1377
	assert_chv_phy_status(dev_priv);
1378
 
1379
out:
1380
	mutex_unlock(&power_domains->lock);
1381
 
1382
	return was_override;
1383
}
1384
 
1385
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1386
			     bool override, unsigned int mask)
1387
{
1388
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1389
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1390
	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1391
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1392
 
1393
	mutex_lock(&power_domains->lock);
1394
 
1395
	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1396
	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1397
 
1398
	if (override)
1399
		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1400
	else
1401
		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1402
 
1403
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1404
 
1405
	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1406
		      phy, ch, mask, dev_priv->chv_phy_control);
1407
 
1408
	assert_chv_phy_status(dev_priv);
1409
 
1410
	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1411
 
1412
	mutex_unlock(&power_domains->lock);
1413
}
1414
 
5354 serge 1415
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1416
					struct i915_power_well *power_well)
1417
{
1418
	enum pipe pipe = power_well->data;
1419
	bool enabled;
1420
	u32 state, ctrl;
1421
 
1422
	mutex_lock(&dev_priv->rps.hw_lock);
1423
 
1424
	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1425
	/*
1426
	 * We only ever set the power-on and power-gate states, anything
1427
	 * else is unexpected.
1428
	 */
1429
	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1430
	enabled = state == DP_SSS_PWR_ON(pipe);
1431
 
1432
	/*
1433
	 * A transient state at this point would mean some unexpected party
1434
	 * is poking at the power controls too.
1435
	 */
1436
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1437
	WARN_ON(ctrl << 16 != state);
1438
 
1439
	mutex_unlock(&dev_priv->rps.hw_lock);
1440
 
1441
	return enabled;
1442
}
1443
 
1444
static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1445
				    struct i915_power_well *power_well,
1446
				    bool enable)
1447
{
1448
	enum pipe pipe = power_well->data;
1449
	u32 state;
1450
	u32 ctrl;
1451
 
1452
	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1453
 
1454
	mutex_lock(&dev_priv->rps.hw_lock);
1455
 
1456
#define COND \
1457
	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1458
 
1459
	if (COND)
1460
		goto out;
1461
 
1462
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1463
	ctrl &= ~DP_SSC_MASK(pipe);
1464
	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1465
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1466
 
1467
	if (wait_for(COND, 100))
6084 serge 1468
		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
5354 serge 1469
			  state,
1470
			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1471
 
1472
#undef COND
1473
 
1474
out:
1475
	mutex_unlock(&dev_priv->rps.hw_lock);
1476
}
1477
 
1478
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1479
					struct i915_power_well *power_well)
1480
{
6084 serge 1481
	WARN_ON_ONCE(power_well->data != PIPE_A);
1482
 
5354 serge 1483
	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1484
}
1485
 
1486
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1487
				       struct i915_power_well *power_well)
1488
{
6084 serge 1489
	WARN_ON_ONCE(power_well->data != PIPE_A);
5354 serge 1490
 
1491
	chv_set_pipe_power_well(dev_priv, power_well, true);
1492
 
6084 serge 1493
	vlv_display_power_well_init(dev_priv);
5354 serge 1494
}
1495
 
1496
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1497
					struct i915_power_well *power_well)
1498
{
6084 serge 1499
	WARN_ON_ONCE(power_well->data != PIPE_A);
5354 serge 1500
 
6084 serge 1501
	vlv_display_power_well_deinit(dev_priv);
5354 serge 1502
 
1503
	chv_set_pipe_power_well(dev_priv, power_well, false);
1504
}
1505
 
6937 serge 1506
static void
1507
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1508
				 enum intel_display_power_domain domain)
1509
{
1510
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1511
	struct i915_power_well *power_well;
1512
	int i;
1513
 
1514
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1515
		if (!power_well->count++)
1516
			intel_power_well_enable(dev_priv, power_well);
1517
	}
1518
 
1519
	power_domains->domain_use_count[domain]++;
1520
}
1521
 
5354 serge 1522
/**
1523
 * intel_display_power_get - grab a power domain reference
1524
 * @dev_priv: i915 device instance
1525
 * @domain: power domain to reference
1526
 *
1527
 * This function grabs a power domain reference for @domain and ensures that the
1528
 * power domain and all its parents are powered up. Therefore users should only
1529
 * grab a reference to the innermost power domain they need.
1530
 *
1531
 * Any power domain reference obtained by this function must have a symmetric
1532
 * call to intel_display_power_put() to release the reference again.
1533
 */
1534
void intel_display_power_get(struct drm_i915_private *dev_priv,
1535
			     enum intel_display_power_domain domain)
1536
{
6937 serge 1537
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5354 serge 1538
 
1539
	intel_runtime_pm_get(dev_priv);
1540
 
6937 serge 1541
	mutex_lock(&power_domains->lock);
5354 serge 1542
 
6937 serge 1543
	__intel_display_power_get_domain(dev_priv, domain);
1544
 
1545
	mutex_unlock(&power_domains->lock);
1546
}
1547
 
1548
/**
1549
 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1550
 * @dev_priv: i915 device instance
1551
 * @domain: power domain to reference
1552
 *
1553
 * This function grabs a power domain reference for @domain and ensures that the
1554
 * power domain and all its parents are powered up. Therefore users should only
1555
 * grab a reference to the innermost power domain they need.
1556
 *
1557
 * Any power domain reference obtained by this function must have a symmetric
1558
 * call to intel_display_power_put() to release the reference again.
1559
 */
1560
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1561
					enum intel_display_power_domain domain)
1562
{
1563
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1564
	bool is_enabled;
1565
 
1566
	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1567
		return false;
1568
 
5354 serge 1569
	mutex_lock(&power_domains->lock);
1570
 
6937 serge 1571
	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1572
		__intel_display_power_get_domain(dev_priv, domain);
1573
		is_enabled = true;
1574
	} else {
1575
		is_enabled = false;
5354 serge 1576
	}
1577
 
6937 serge 1578
	mutex_unlock(&power_domains->lock);
5354 serge 1579
 
6937 serge 1580
	if (!is_enabled)
1581
		intel_runtime_pm_put(dev_priv);
1582
 
1583
	return is_enabled;
5354 serge 1584
}
1585
 
1586
/**
1587
 * intel_display_power_put - release a power domain reference
1588
 * @dev_priv: i915 device instance
1589
 * @domain: power domain to reference
1590
 *
1591
 * This function drops the power domain reference obtained by
1592
 * intel_display_power_get() and might power down the corresponding hardware
1593
 * block right away if this is the last reference.
1594
 */
1595
void intel_display_power_put(struct drm_i915_private *dev_priv,
1596
			     enum intel_display_power_domain domain)
1597
{
1598
	struct i915_power_domains *power_domains;
1599
	struct i915_power_well *power_well;
1600
	int i;
1601
 
1602
	power_domains = &dev_priv->power_domains;
1603
 
1604
	mutex_lock(&power_domains->lock);
1605
 
6937 serge 1606
	WARN(!power_domains->domain_use_count[domain],
1607
	     "Use count on domain %s is already zero\n",
1608
	     intel_display_power_domain_str(domain));
5354 serge 1609
	power_domains->domain_use_count[domain]--;
1610
 
1611
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6937 serge 1612
		WARN(!power_well->count,
1613
		     "Use count on power well %s is already zero",
1614
		     power_well->name);
5354 serge 1615
 
6937 serge 1616
		if (!--power_well->count)
6084 serge 1617
			intel_power_well_disable(dev_priv, power_well);
5354 serge 1618
	}
1619
 
1620
	mutex_unlock(&power_domains->lock);
1621
 
1622
	intel_runtime_pm_put(dev_priv);
1623
}
1624
 
1625
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1626
	BIT(POWER_DOMAIN_PIPE_A) |			\
1627
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
6937 serge 1628
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1629
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1630
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1631
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
5354 serge 1632
	BIT(POWER_DOMAIN_PORT_CRT) |			\
1633
	BIT(POWER_DOMAIN_PLLS) |			\
6084 serge 1634
	BIT(POWER_DOMAIN_AUX_A) |			\
1635
	BIT(POWER_DOMAIN_AUX_B) |			\
1636
	BIT(POWER_DOMAIN_AUX_C) |			\
1637
	BIT(POWER_DOMAIN_AUX_D) |			\
1638
	BIT(POWER_DOMAIN_GMBUS) |			\
5354 serge 1639
	BIT(POWER_DOMAIN_INIT))
1640
#define HSW_DISPLAY_POWER_DOMAINS (				\
1641
	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1642
	BIT(POWER_DOMAIN_INIT))
1643
 
1644
#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1645
	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1646
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1647
#define BDW_DISPLAY_POWER_DOMAINS (				\
1648
	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1649
	BIT(POWER_DOMAIN_INIT))
1650
 
1651
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1652
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1653
 
1654
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
6937 serge 1655
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1656
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
5354 serge 1657
	BIT(POWER_DOMAIN_PORT_CRT) |		\
6084 serge 1658
	BIT(POWER_DOMAIN_AUX_B) |		\
1659
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1660
	BIT(POWER_DOMAIN_INIT))
1661
 
1662
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
6937 serge 1663
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
6084 serge 1664
	BIT(POWER_DOMAIN_AUX_B) |		\
5354 serge 1665
	BIT(POWER_DOMAIN_INIT))
1666
 
1667
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
6937 serge 1668
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
6084 serge 1669
	BIT(POWER_DOMAIN_AUX_B) |		\
5354 serge 1670
	BIT(POWER_DOMAIN_INIT))
1671
 
1672
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
6937 serge 1673
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1674
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1675
	BIT(POWER_DOMAIN_INIT))
1676
 
1677
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
6937 serge 1678
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1679
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1680
	BIT(POWER_DOMAIN_INIT))
1681
 
1682
#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
6937 serge 1683
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1684
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1685
	BIT(POWER_DOMAIN_AUX_B) |		\
1686
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1687
	BIT(POWER_DOMAIN_INIT))
1688
 
1689
#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
6937 serge 1690
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
6084 serge 1691
	BIT(POWER_DOMAIN_AUX_D) |		\
5354 serge 1692
	BIT(POWER_DOMAIN_INIT))
1693
 
1694
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1695
	.sync_hw = i9xx_always_on_power_well_noop,
1696
	.enable = i9xx_always_on_power_well_noop,
1697
	.disable = i9xx_always_on_power_well_noop,
1698
	.is_enabled = i9xx_always_on_power_well_enabled,
1699
};
1700
 
1701
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1702
	.sync_hw = chv_pipe_power_well_sync_hw,
1703
	.enable = chv_pipe_power_well_enable,
1704
	.disable = chv_pipe_power_well_disable,
1705
	.is_enabled = chv_pipe_power_well_enabled,
1706
};
1707
 
1708
static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1709
	.sync_hw = vlv_power_well_sync_hw,
1710
	.enable = chv_dpio_cmn_power_well_enable,
1711
	.disable = chv_dpio_cmn_power_well_disable,
1712
	.is_enabled = vlv_power_well_enabled,
1713
};
1714
 
1715
static struct i915_power_well i9xx_always_on_power_well[] = {
1716
	{
1717
		.name = "always-on",
1718
		.always_on = 1,
1719
		.domains = POWER_DOMAIN_MASK,
1720
		.ops = &i9xx_always_on_power_well_ops,
1721
	},
1722
};
1723
 
1724
static const struct i915_power_well_ops hsw_power_well_ops = {
1725
	.sync_hw = hsw_power_well_sync_hw,
1726
	.enable = hsw_power_well_enable,
1727
	.disable = hsw_power_well_disable,
1728
	.is_enabled = hsw_power_well_enabled,
1729
};
1730
 
6084 serge 1731
static const struct i915_power_well_ops skl_power_well_ops = {
1732
	.sync_hw = skl_power_well_sync_hw,
1733
	.enable = skl_power_well_enable,
1734
	.disable = skl_power_well_disable,
1735
	.is_enabled = skl_power_well_enabled,
1736
};
1737
 
6937 serge 1738
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1739
	.sync_hw = gen9_dc_off_power_well_sync_hw,
1740
	.enable = gen9_dc_off_power_well_enable,
1741
	.disable = gen9_dc_off_power_well_disable,
1742
	.is_enabled = gen9_dc_off_power_well_enabled,
1743
};
1744
 
5354 serge 1745
static struct i915_power_well hsw_power_wells[] = {
1746
	{
1747
		.name = "always-on",
1748
		.always_on = 1,
1749
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1750
		.ops = &i9xx_always_on_power_well_ops,
1751
	},
1752
	{
1753
		.name = "display",
1754
		.domains = HSW_DISPLAY_POWER_DOMAINS,
1755
		.ops = &hsw_power_well_ops,
1756
	},
1757
};
1758
 
1759
static struct i915_power_well bdw_power_wells[] = {
1760
	{
1761
		.name = "always-on",
1762
		.always_on = 1,
1763
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1764
		.ops = &i9xx_always_on_power_well_ops,
1765
	},
1766
	{
1767
		.name = "display",
1768
		.domains = BDW_DISPLAY_POWER_DOMAINS,
1769
		.ops = &hsw_power_well_ops,
1770
	},
1771
};
1772
 
1773
static const struct i915_power_well_ops vlv_display_power_well_ops = {
1774
	.sync_hw = vlv_power_well_sync_hw,
1775
	.enable = vlv_display_power_well_enable,
1776
	.disable = vlv_display_power_well_disable,
1777
	.is_enabled = vlv_power_well_enabled,
1778
};
1779
 
1780
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1781
	.sync_hw = vlv_power_well_sync_hw,
1782
	.enable = vlv_dpio_cmn_power_well_enable,
1783
	.disable = vlv_dpio_cmn_power_well_disable,
1784
	.is_enabled = vlv_power_well_enabled,
1785
};
1786
 
1787
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1788
	.sync_hw = vlv_power_well_sync_hw,
1789
	.enable = vlv_power_well_enable,
1790
	.disable = vlv_power_well_disable,
1791
	.is_enabled = vlv_power_well_enabled,
1792
};
1793
 
1794
static struct i915_power_well vlv_power_wells[] = {
1795
	{
1796
		.name = "always-on",
1797
		.always_on = 1,
1798
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1799
		.ops = &i9xx_always_on_power_well_ops,
6937 serge 1800
		.data = PUNIT_POWER_WELL_ALWAYS_ON,
5354 serge 1801
	},
1802
	{
1803
		.name = "display",
1804
		.domains = VLV_DISPLAY_POWER_DOMAINS,
1805
		.data = PUNIT_POWER_WELL_DISP2D,
1806
		.ops = &vlv_display_power_well_ops,
1807
	},
1808
	{
1809
		.name = "dpio-tx-b-01",
1810
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1811
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1812
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1813
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1814
		.ops = &vlv_dpio_power_well_ops,
1815
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1816
	},
1817
	{
1818
		.name = "dpio-tx-b-23",
1819
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1820
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1821
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1822
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1823
		.ops = &vlv_dpio_power_well_ops,
1824
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1825
	},
1826
	{
1827
		.name = "dpio-tx-c-01",
1828
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1829
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1830
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1831
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1832
		.ops = &vlv_dpio_power_well_ops,
1833
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1834
	},
1835
	{
1836
		.name = "dpio-tx-c-23",
1837
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1838
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1839
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1840
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1841
		.ops = &vlv_dpio_power_well_ops,
1842
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1843
	},
1844
	{
1845
		.name = "dpio-common",
1846
		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1847
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1848
		.ops = &vlv_dpio_cmn_power_well_ops,
1849
	},
1850
};
1851
 
1852
static struct i915_power_well chv_power_wells[] = {
1853
	{
1854
		.name = "always-on",
1855
		.always_on = 1,
1856
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1857
		.ops = &i9xx_always_on_power_well_ops,
1858
	},
1859
	{
1860
		.name = "display",
1861
		/*
6084 serge 1862
		 * Pipe A power well is the new disp2d well. Pipe B and C
1863
		 * power wells don't actually exist. Pipe A power well is
1864
		 * required for any pipe to work.
5354 serge 1865
		 */
6084 serge 1866
		.domains = VLV_DISPLAY_POWER_DOMAINS,
5354 serge 1867
		.data = PIPE_A,
1868
		.ops = &chv_pipe_power_well_ops,
1869
	},
1870
	{
1871
		.name = "dpio-common-bc",
6084 serge 1872
		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
5354 serge 1873
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1874
		.ops = &chv_dpio_cmn_power_well_ops,
1875
	},
1876
	{
1877
		.name = "dpio-common-d",
6084 serge 1878
		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
5354 serge 1879
		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1880
		.ops = &chv_dpio_cmn_power_well_ops,
1881
	},
6084 serge 1882
};
1883
 
1884
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1885
				    int power_well_id)
1886
{
1887
	struct i915_power_well *power_well;
1888
	bool ret;
1889
 
1890
	power_well = lookup_power_well(dev_priv, power_well_id);
1891
	ret = power_well->ops->is_enabled(dev_priv, power_well);
1892
 
1893
	return ret;
1894
}
1895
 
1896
static struct i915_power_well skl_power_wells[] = {
5354 serge 1897
	{
6084 serge 1898
		.name = "always-on",
1899
		.always_on = 1,
1900
		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1901
		.ops = &i9xx_always_on_power_well_ops,
6937 serge 1902
		.data = SKL_DISP_PW_ALWAYS_ON,
5354 serge 1903
	},
1904
	{
6084 serge 1905
		.name = "power well 1",
6937 serge 1906
		/* Handled by the DMC firmware */
1907
		.domains = 0,
6084 serge 1908
		.ops = &skl_power_well_ops,
1909
		.data = SKL_DISP_PW_1,
5354 serge 1910
	},
1911
	{
6084 serge 1912
		.name = "MISC IO power well",
6937 serge 1913
		/* Handled by the DMC firmware */
1914
		.domains = 0,
6084 serge 1915
		.ops = &skl_power_well_ops,
1916
		.data = SKL_DISP_PW_MISC_IO,
5354 serge 1917
	},
1918
	{
6937 serge 1919
		.name = "DC off",
1920
		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1921
		.ops = &gen9_dc_off_power_well_ops,
1922
		.data = SKL_DISP_PW_DC_OFF,
1923
	},
1924
	{
6084 serge 1925
		.name = "power well 2",
1926
		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1927
		.ops = &skl_power_well_ops,
1928
		.data = SKL_DISP_PW_2,
5354 serge 1929
	},
1930
	{
6084 serge 1931
		.name = "DDI A/E power well",
1932
		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1933
		.ops = &skl_power_well_ops,
1934
		.data = SKL_DISP_PW_DDI_A_E,
5354 serge 1935
	},
1936
	{
6084 serge 1937
		.name = "DDI B power well",
1938
		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1939
		.ops = &skl_power_well_ops,
1940
		.data = SKL_DISP_PW_DDI_B,
5354 serge 1941
	},
6084 serge 1942
	{
1943
		.name = "DDI C power well",
1944
		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1945
		.ops = &skl_power_well_ops,
1946
		.data = SKL_DISP_PW_DDI_C,
1947
	},
1948
	{
1949
		.name = "DDI D power well",
1950
		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1951
		.ops = &skl_power_well_ops,
1952
		.data = SKL_DISP_PW_DDI_D,
1953
	},
5354 serge 1954
};
1955
 
6937 serge 1956
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
1957
{
1958
	struct i915_power_well *well;
1959
 
7144 serge 1960
	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
6937 serge 1961
		return;
1962
 
1963
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1964
	intel_power_well_enable(dev_priv, well);
1965
 
1966
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1967
	intel_power_well_enable(dev_priv, well);
1968
}
1969
 
1970
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
1971
{
1972
	struct i915_power_well *well;
1973
 
7144 serge 1974
	if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
6937 serge 1975
		return;
1976
 
1977
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1978
	intel_power_well_disable(dev_priv, well);
1979
 
1980
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1981
	intel_power_well_disable(dev_priv, well);
1982
}
1983
 
6084 serge 1984
static struct i915_power_well bxt_power_wells[] = {
1985
	{
1986
		.name = "always-on",
1987
		.always_on = 1,
1988
		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1989
		.ops = &i9xx_always_on_power_well_ops,
1990
	},
1991
	{
1992
		.name = "power well 1",
1993
		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1994
		.ops = &skl_power_well_ops,
1995
		.data = SKL_DISP_PW_1,
1996
	},
1997
	{
6937 serge 1998
		.name = "DC off",
1999
		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
2000
		.ops = &gen9_dc_off_power_well_ops,
2001
		.data = SKL_DISP_PW_DC_OFF,
2002
	},
2003
	{
6084 serge 2004
		.name = "power well 2",
2005
		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2006
		.ops = &skl_power_well_ops,
2007
		.data = SKL_DISP_PW_2,
6937 serge 2008
	},
6084 serge 2009
};
2010
 
2011
static int
2012
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
2013
				   int disable_power_well)
5354 serge 2014
{
6084 serge 2015
	if (disable_power_well >= 0)
2016
		return !!disable_power_well;
5354 serge 2017
 
6937 serge 2018
	if (IS_BROXTON(dev_priv)) {
6084 serge 2019
		DRM_DEBUG_KMS("Disabling display power well support\n");
2020
		return 0;
5354 serge 2021
	}
2022
 
6084 serge 2023
	return 1;
5354 serge 2024
}
2025
 
2026
#define set_power_wells(power_domains, __power_wells) ({		\
2027
	(power_domains)->power_wells = (__power_wells);			\
2028
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2029
})
2030
 
2031
/**
2032
 * intel_power_domains_init - initializes the power domain structures
2033
 * @dev_priv: i915 device instance
2034
 *
2035
 * Initializes the power domain structures for @dev_priv depending upon the
2036
 * supported platform.
2037
 */
2038
int intel_power_domains_init(struct drm_i915_private *dev_priv)
2039
{
2040
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2041
 
6084 serge 2042
	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2043
						     i915.disable_power_well);
2044
 
2045
	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
2046
 
5354 serge 2047
	mutex_init(&power_domains->lock);
2048
 
2049
	/*
2050
	 * The enabling order will be from lower to higher indexed wells,
2051
	 * the disabling order is reversed.
2052
	 */
2053
	if (IS_HASWELL(dev_priv->dev)) {
2054
		set_power_wells(power_domains, hsw_power_wells);
2055
	} else if (IS_BROADWELL(dev_priv->dev)) {
2056
		set_power_wells(power_domains, bdw_power_wells);
6937 serge 2057
	} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
6084 serge 2058
		set_power_wells(power_domains, skl_power_wells);
2059
	} else if (IS_BROXTON(dev_priv->dev)) {
2060
		set_power_wells(power_domains, bxt_power_wells);
5354 serge 2061
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
2062
		set_power_wells(power_domains, chv_power_wells);
2063
	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
2064
		set_power_wells(power_domains, vlv_power_wells);
2065
	} else {
2066
		set_power_wells(power_domains, i9xx_always_on_power_well);
2067
	}
2068
 
2069
	return 0;
2070
}
2071
 
2072
/**
2073
 * intel_power_domains_fini - finalizes the power domain structures
2074
 * @dev_priv: i915 device instance
2075
 *
2076
 * Finalizes the power domain structures for @dev_priv depending upon the
2077
 * supported platform. This function also disables runtime pm and ensures that
2078
 * the device stays powered up so that the driver can be reloaded.
2079
 */
2080
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2081
{
6937 serge 2082
	struct device *device = &dev_priv->dev->pdev->dev;
5354 serge 2083
 
6937 serge 2084
	/*
2085
	 * The i915.ko module is still not prepared to be loaded when
5354 serge 2086
	 * the power well is not enabled, so just enable it in case
6937 serge 2087
	 * we're going to unload/reload.
2088
	 * The following also reacquires the RPM reference the core passed
2089
	 * to the driver during loading, which is dropped in
2090
	 * intel_runtime_pm_enable(). We have to hand back the control of the
2091
	 * device to the core with this reference held.
2092
	 */
5354 serge 2093
	intel_display_set_init_power(dev_priv, true);
6937 serge 2094
 
2095
	/* Remove the refcount we took to keep power well support disabled. */
2096
	if (!i915.disable_power_well)
2097
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2098
 
2099
	/*
2100
	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2101
	 * the platform doesn't support runtime PM.
2102
	 */
2103
	if (!HAS_RUNTIME_PM(dev_priv))
2104
		pm_runtime_put(device);
5354 serge 2105
}
2106
 
6937 serge 2107
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5354 serge 2108
{
2109
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2110
	struct i915_power_well *power_well;
2111
	int i;
2112
 
2113
	mutex_lock(&power_domains->lock);
2114
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
2115
		power_well->ops->sync_hw(dev_priv, power_well);
2116
		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2117
								     power_well);
2118
	}
2119
	mutex_unlock(&power_domains->lock);
2120
}
2121
 
6937 serge 2122
static void skl_display_core_init(struct drm_i915_private *dev_priv,
2123
				  bool resume)
2124
{
2125
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2126
	uint32_t val;
2127
 
2128
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2129
 
2130
	/* enable PCH reset handshake */
2131
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2132
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2133
 
2134
	/* enable PG1 and Misc I/O */
2135
	mutex_lock(&power_domains->lock);
2136
	skl_pw1_misc_io_init(dev_priv);
2137
	mutex_unlock(&power_domains->lock);
2138
 
2139
	if (!resume)
2140
		return;
2141
 
2142
	skl_init_cdclk(dev_priv);
2143
 
7144 serge 2144
	if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
2145
		gen9_set_dc_state_debugmask(dev_priv);
6937 serge 2146
}
2147
 
2148
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2149
{
2150
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2151
 
2152
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2153
 
2154
	skl_uninit_cdclk(dev_priv);
2155
 
2156
	/* The spec doesn't call for removing the reset handshake flag */
2157
	/* disable PG1 and Misc I/O */
2158
	mutex_lock(&power_domains->lock);
2159
	skl_pw1_misc_io_fini(dev_priv);
2160
	mutex_unlock(&power_domains->lock);
2161
}
2162
 
6084 serge 2163
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2164
{
2165
	struct i915_power_well *cmn_bc =
2166
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2167
	struct i915_power_well *cmn_d =
2168
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2169
 
2170
	/*
2171
	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2172
	 * workaround never ever read DISPLAY_PHY_CONTROL, and
2173
	 * instead maintain a shadow copy ourselves. Use the actual
2174
	 * power well state and lane status to reconstruct the
2175
	 * expected initial value.
2176
	 */
2177
	dev_priv->chv_phy_control =
2178
		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2179
		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2180
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2181
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2182
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2183
 
2184
	/*
2185
	 * If all lanes are disabled we leave the override disabled
2186
	 * with all power down bits cleared to match the state we
2187
	 * would use after disabling the port. Otherwise enable the
2188
	 * override and set the lane powerdown bits accding to the
2189
	 * current lane status.
2190
	 */
2191
	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2192
		uint32_t status = I915_READ(DPLL(PIPE_A));
2193
		unsigned int mask;
2194
 
2195
		mask = status & DPLL_PORTB_READY_MASK;
2196
		if (mask == 0xf)
2197
			mask = 0x0;
2198
		else
2199
			dev_priv->chv_phy_control |=
2200
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2201
 
2202
		dev_priv->chv_phy_control |=
2203
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2204
 
2205
		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2206
		if (mask == 0xf)
2207
			mask = 0x0;
2208
		else
2209
			dev_priv->chv_phy_control |=
2210
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2211
 
2212
		dev_priv->chv_phy_control |=
2213
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2214
 
2215
		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2216
 
2217
		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2218
	} else {
2219
		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2220
	}
2221
 
2222
	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2223
		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2224
		unsigned int mask;
2225
 
2226
		mask = status & DPLL_PORTD_READY_MASK;
2227
 
2228
		if (mask == 0xf)
2229
			mask = 0x0;
2230
		else
2231
			dev_priv->chv_phy_control |=
2232
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2233
 
2234
		dev_priv->chv_phy_control |=
2235
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2236
 
2237
		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2238
 
2239
		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2240
	} else {
2241
		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2242
	}
2243
 
2244
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2245
 
2246
	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2247
		      dev_priv->chv_phy_control);
2248
}
2249
 
5354 serge 2250
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2251
{
2252
	struct i915_power_well *cmn =
2253
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2254
	struct i915_power_well *disp2d =
2255
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2256
 
2257
	/* If the display might be already active skip this */
2258
	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2259
	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2260
	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2261
		return;
2262
 
2263
	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2264
 
2265
	/* cmnlane needs DPLL registers */
2266
	disp2d->ops->enable(dev_priv, disp2d);
2267
 
2268
	/*
2269
	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2270
	 * Need to assert and de-assert PHY SB reset by gating the
2271
	 * common lane power, then un-gating it.
2272
	 * Simply ungating isn't enough to reset the PHY enough to get
2273
	 * ports and lanes running.
2274
	 */
2275
	cmn->ops->disable(dev_priv, cmn);
2276
}
2277
 
2278
/**
2279
 * intel_power_domains_init_hw - initialize hardware power domain state
2280
 * @dev_priv: i915 device instance
2281
 *
2282
 * This function initializes the hardware power domain state and enables all
2283
 * power domains using intel_display_set_init_power().
2284
 */
6937 serge 2285
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
5354 serge 2286
{
2287
	struct drm_device *dev = dev_priv->dev;
2288
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2289
 
2290
	power_domains->initializing = true;
2291
 
6937 serge 2292
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2293
		skl_display_core_init(dev_priv, resume);
2294
	} else if (IS_CHERRYVIEW(dev)) {
5354 serge 2295
		mutex_lock(&power_domains->lock);
6084 serge 2296
		chv_phy_control_init(dev_priv);
2297
		mutex_unlock(&power_domains->lock);
2298
	} else if (IS_VALLEYVIEW(dev)) {
2299
		mutex_lock(&power_domains->lock);
5354 serge 2300
		vlv_cmnlane_wa(dev_priv);
2301
		mutex_unlock(&power_domains->lock);
2302
	}
2303
 
2304
	/* For now, we need the power well to be always enabled. */
2305
	intel_display_set_init_power(dev_priv, true);
6937 serge 2306
	/* Disable power support if the user asked so. */
2307
	if (!i915.disable_power_well)
2308
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2309
	intel_power_domains_sync_hw(dev_priv);
5354 serge 2310
	power_domains->initializing = false;
2311
}
2312
 
2313
/**
6937 serge 2314
 * intel_power_domains_suspend - suspend power domain state
2315
 * @dev_priv: i915 device instance
2316
 *
2317
 * This function prepares the hardware power domain state before entering
2318
 * system suspend. It must be paired with intel_power_domains_init_hw().
2319
 */
2320
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2321
{
2322
	/*
2323
	 * Even if power well support was disabled we still want to disable
2324
	 * power wells while we are system suspended.
2325
	 */
2326
	if (!i915.disable_power_well)
2327
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2328
 
2329
	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2330
		skl_display_core_uninit(dev_priv);
2331
}
2332
 
2333
/**
5354 serge 2334
 * intel_runtime_pm_get - grab a runtime pm reference
2335
 * @dev_priv: i915 device instance
2336
 *
2337
 * This function grabs a device-level runtime pm reference (mostly used for GEM
2338
 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2339
 *
2340
 * Any runtime pm reference obtained by this function must have a symmetric
2341
 * call to intel_runtime_pm_put() to release the reference again.
2342
 */
2343
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2344
{
2345
	struct drm_device *dev = dev_priv->dev;
2346
	struct device *device = &dev->pdev->dev;
2347
 
6937 serge 2348
	pm_runtime_get_sync(device);
5354 serge 2349
 
6937 serge 2350
	atomic_inc(&dev_priv->pm.wakeref_count);
2351
	assert_rpm_wakelock_held(dev_priv);
5354 serge 2352
}
2353
 
2354
/**
6937 serge 2355
 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2356
 * @dev_priv: i915 device instance
2357
 *
2358
 * This function grabs a device-level runtime pm reference if the device is
2359
 * already in use and ensures that it is powered up.
2360
 *
2361
 * Any runtime pm reference obtained by this function must have a symmetric
2362
 * call to intel_runtime_pm_put() to release the reference again.
2363
 */
2364
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2365
{
2366
	struct drm_device *dev = dev_priv->dev;
2367
	struct device *device = &dev->pdev->dev;
2368
 
2369
	if (IS_ENABLED(CONFIG_PM)) {
2370
		int ret = pm_runtime_get_if_in_use(device);
2371
 
2372
		/*
2373
		 * In cases runtime PM is disabled by the RPM core and we get
2374
		 * an -EINVAL return value we are not supposed to call this
2375
		 * function, since the power state is undefined. This applies
2376
		 * atm to the late/early system suspend/resume handlers.
2377
		 */
2378
		WARN_ON_ONCE(ret < 0);
2379
		if (ret <= 0)
2380
			return false;
2381
	}
2382
 
2383
	atomic_inc(&dev_priv->pm.wakeref_count);
2384
	assert_rpm_wakelock_held(dev_priv);
2385
 
2386
	return true;
2387
}
2388
 
2389
/**
5354 serge 2390
 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2391
 * @dev_priv: i915 device instance
2392
 *
2393
 * This function grabs a device-level runtime pm reference (mostly used for GEM
2394
 * code to ensure the GTT or GT is on).
2395
 *
2396
 * It will _not_ power up the device but instead only check that it's powered
2397
 * on.  Therefore it is only valid to call this functions from contexts where
2398
 * the device is known to be powered up and where trying to power it up would
2399
 * result in hilarity and deadlocks. That pretty much means only the system
2400
 * suspend/resume code where this is used to grab runtime pm references for
2401
 * delayed setup down in work items.
2402
 *
2403
 * Any runtime pm reference obtained by this function must have a symmetric
2404
 * call to intel_runtime_pm_put() to release the reference again.
2405
 */
2406
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2407
{
2408
	struct drm_device *dev = dev_priv->dev;
2409
	struct device *device = &dev->pdev->dev;
2410
 
6937 serge 2411
	assert_rpm_wakelock_held(dev_priv);
2412
	pm_runtime_get_noresume(device);
5354 serge 2413
 
6937 serge 2414
	atomic_inc(&dev_priv->pm.wakeref_count);
5354 serge 2415
}
2416
 
2417
/**
2418
 * intel_runtime_pm_put - release a runtime pm reference
2419
 * @dev_priv: i915 device instance
2420
 *
2421
 * This function drops the device-level runtime pm reference obtained by
2422
 * intel_runtime_pm_get() and might power down the corresponding
2423
 * hardware block right away if this is the last reference.
2424
 */
2425
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2426
{
2427
	struct drm_device *dev = dev_priv->dev;
2428
	struct device *device = &dev->pdev->dev;
2429
 
6937 serge 2430
	assert_rpm_wakelock_held(dev_priv);
2431
	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
2432
		atomic_inc(&dev_priv->pm.atomic_seq);
5354 serge 2433
 
6084 serge 2434
	pm_runtime_mark_last_busy(device);
2435
	pm_runtime_put_autosuspend(device);
5354 serge 2436
}
2437
 
2438
/**
2439
 * intel_runtime_pm_enable - enable runtime pm
2440
 * @dev_priv: i915 device instance
2441
 *
2442
 * This function enables runtime pm at the end of the driver load sequence.
2443
 *
2444
 * Note that this function does currently not enable runtime pm for the
2445
 * subordinate display power domains. That is only done on the first modeset
2446
 * using intel_display_set_init_power().
2447
 */
2448
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2449
{
2450
	struct drm_device *dev = dev_priv->dev;
2451
	struct device *device = &dev->pdev->dev;
2452
 
6937 serge 2453
	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2454
	pm_runtime_mark_last_busy(device);
5354 serge 2455
 
2456
	/*
6937 serge 2457
	 * Take a permanent reference to disable the RPM functionality and drop
2458
	 * it only when unloading the driver. Use the low level get/put helpers,
2459
	 * so the driver's own RPM reference tracking asserts also work on
2460
	 * platforms without RPM support.
5354 serge 2461
	 */
6937 serge 2462
	if (!HAS_RUNTIME_PM(dev)) {
2463
		pm_runtime_dont_use_autosuspend(device);
2464
		pm_runtime_get_sync(device);
2465
	} else {
2466
		pm_runtime_use_autosuspend(device);
5354 serge 2467
	}
2468
 
6937 serge 2469
	/*
2470
	 * The core calls the driver load handler with an RPM reference held.
2471
	 * We drop that here and will reacquire it during unloading in
2472
	 * intel_power_domains_fini().
2473
	 */
6084 serge 2474
	pm_runtime_put_autosuspend(device);
5354 serge 2475
}
2476