Subversion Repositories Kolibri OS

Rev

Rev 6296 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5354 serge 1
/*
2
 * Copyright © 2012-2014 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eugeni Dodonov 
25
 *    Daniel Vetter 
26
 *
27
 */
28
 
6084 serge 29
#include 
30
#include 
5354 serge 31
 
32
#include "i915_drv.h"
33
#include "intel_drv.h"
34
 
35
/**
36
 * DOC: runtime pm
37
 *
38
 * The i915 driver supports dynamic enabling and disabling of entire hardware
39
 * blocks at runtime. This is especially important on the display side where
40
 * software is supposed to control many power gates manually on recent hardware,
41
 * since on the GT side a lot of the power management is done by the hardware.
42
 * But even there some manual control at the device level is required.
43
 *
44
 * Since i915 supports a diverse set of platforms with a unified codebase and
45
 * hardware engineers just love to shuffle functionality around between power
46
 * domains there's a sizeable amount of indirection required. This file provides
47
 * generic functions to the driver for grabbing and releasing references for
48
 * abstract power domains. It then maps those to the actual power wells
49
 * present for a given platform.
50
 */
51
 
52
#define for_each_power_well(i, power_well, domain_mask, power_domains)	\
53
	for (i = 0;							\
54
	     i < (power_domains)->power_well_count &&			\
55
		 ((power_well) = &(power_domains)->power_wells[i]);	\
56
	     i++)							\
6937 serge 57
		for_each_if ((power_well)->domains & (domain_mask))
5354 serge 58
 
59
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
60
	for (i = (power_domains)->power_well_count - 1;			 \
61
	     i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
62
	     i--)							 \
6937 serge 63
		for_each_if ((power_well)->domains & (domain_mask))
5354 serge 64
 
6084 serge 65
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66
				    int power_well_id);
67
 
6937 serge 68
const char *
69
intel_display_power_domain_str(enum intel_display_power_domain domain)
70
{
71
	switch (domain) {
72
	case POWER_DOMAIN_PIPE_A:
73
		return "PIPE_A";
74
	case POWER_DOMAIN_PIPE_B:
75
		return "PIPE_B";
76
	case POWER_DOMAIN_PIPE_C:
77
		return "PIPE_C";
78
	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
79
		return "PIPE_A_PANEL_FITTER";
80
	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
81
		return "PIPE_B_PANEL_FITTER";
82
	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
83
		return "PIPE_C_PANEL_FITTER";
84
	case POWER_DOMAIN_TRANSCODER_A:
85
		return "TRANSCODER_A";
86
	case POWER_DOMAIN_TRANSCODER_B:
87
		return "TRANSCODER_B";
88
	case POWER_DOMAIN_TRANSCODER_C:
89
		return "TRANSCODER_C";
90
	case POWER_DOMAIN_TRANSCODER_EDP:
91
		return "TRANSCODER_EDP";
92
	case POWER_DOMAIN_PORT_DDI_A_LANES:
93
		return "PORT_DDI_A_LANES";
94
	case POWER_DOMAIN_PORT_DDI_B_LANES:
95
		return "PORT_DDI_B_LANES";
96
	case POWER_DOMAIN_PORT_DDI_C_LANES:
97
		return "PORT_DDI_C_LANES";
98
	case POWER_DOMAIN_PORT_DDI_D_LANES:
99
		return "PORT_DDI_D_LANES";
100
	case POWER_DOMAIN_PORT_DDI_E_LANES:
101
		return "PORT_DDI_E_LANES";
102
	case POWER_DOMAIN_PORT_DSI:
103
		return "PORT_DSI";
104
	case POWER_DOMAIN_PORT_CRT:
105
		return "PORT_CRT";
106
	case POWER_DOMAIN_PORT_OTHER:
107
		return "PORT_OTHER";
108
	case POWER_DOMAIN_VGA:
109
		return "VGA";
110
	case POWER_DOMAIN_AUDIO:
111
		return "AUDIO";
112
	case POWER_DOMAIN_PLLS:
113
		return "PLLS";
114
	case POWER_DOMAIN_AUX_A:
115
		return "AUX_A";
116
	case POWER_DOMAIN_AUX_B:
117
		return "AUX_B";
118
	case POWER_DOMAIN_AUX_C:
119
		return "AUX_C";
120
	case POWER_DOMAIN_AUX_D:
121
		return "AUX_D";
122
	case POWER_DOMAIN_GMBUS:
123
		return "GMBUS";
124
	case POWER_DOMAIN_INIT:
125
		return "INIT";
126
	case POWER_DOMAIN_MODESET:
127
		return "MODESET";
128
	default:
129
		MISSING_CASE(domain);
130
		return "?";
131
	}
132
}
133
 
6084 serge 134
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
135
				    struct i915_power_well *power_well)
136
{
137
	DRM_DEBUG_KMS("enabling %s\n", power_well->name);
138
	power_well->ops->enable(dev_priv, power_well);
139
	power_well->hw_enabled = true;
140
}
141
 
142
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
143
				     struct i915_power_well *power_well)
144
{
145
	DRM_DEBUG_KMS("disabling %s\n", power_well->name);
146
	power_well->hw_enabled = false;
147
	power_well->ops->disable(dev_priv, power_well);
148
}
149
 
5354 serge 150
/*
151
 * We should only use the power well if we explicitly asked the hardware to
152
 * enable it, so check if it's enabled and also check if we've requested it to
153
 * be enabled.
154
 */
155
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
156
				   struct i915_power_well *power_well)
157
{
158
	return I915_READ(HSW_PWR_WELL_DRIVER) ==
159
		     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
160
}
161
 
162
/**
163
 * __intel_display_power_is_enabled - unlocked check for a power domain
164
 * @dev_priv: i915 device instance
165
 * @domain: power domain to check
166
 *
167
 * This is the unlocked version of intel_display_power_is_enabled() and should
168
 * only be used from error capture and recovery code where deadlocks are
169
 * possible.
170
 *
171
 * Returns:
172
 * True when the power domain is enabled, false otherwise.
173
 */
174
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
175
				      enum intel_display_power_domain domain)
176
{
177
	struct i915_power_domains *power_domains;
178
	struct i915_power_well *power_well;
179
	bool is_enabled;
180
	int i;
181
 
182
	if (dev_priv->pm.suspended)
183
		return false;
184
 
185
	power_domains = &dev_priv->power_domains;
186
 
187
	is_enabled = true;
188
 
189
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
190
		if (power_well->always_on)
191
			continue;
192
 
193
		if (!power_well->hw_enabled) {
194
			is_enabled = false;
195
			break;
196
		}
197
	}
198
 
199
	return is_enabled;
200
}
201
 
202
/**
6084 serge 203
 * intel_display_power_is_enabled - check for a power domain
5354 serge 204
 * @dev_priv: i915 device instance
205
 * @domain: power domain to check
206
 *
207
 * This function can be used to check the hw power domain state. It is mostly
208
 * used in hardware state readout functions. Everywhere else code should rely
209
 * upon explicit power domain reference counting to ensure that the hardware
210
 * block is powered up before accessing it.
211
 *
212
 * Callers must hold the relevant modesetting locks to ensure that concurrent
213
 * threads can't disable the power well while the caller tries to read a few
214
 * registers.
215
 *
216
 * Returns:
217
 * True when the power domain is enabled, false otherwise.
218
 */
219
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
220
				    enum intel_display_power_domain domain)
221
{
222
	struct i915_power_domains *power_domains;
223
	bool ret;
224
 
225
	power_domains = &dev_priv->power_domains;
226
 
227
	mutex_lock(&power_domains->lock);
228
	ret = __intel_display_power_is_enabled(dev_priv, domain);
229
	mutex_unlock(&power_domains->lock);
230
 
231
	return ret;
232
}
233
 
234
/**
235
 * intel_display_set_init_power - set the initial power domain state
236
 * @dev_priv: i915 device instance
237
 * @enable: whether to enable or disable the initial power domain state
238
 *
239
 * For simplicity our driver load/unload and system suspend/resume code assumes
240
 * that all power domains are always enabled. This functions controls the state
241
 * of this little hack. While the initial power domain state is enabled runtime
242
 * pm is effectively disabled.
243
 */
244
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
245
				  bool enable)
246
{
247
	if (dev_priv->power_domains.init_power_on == enable)
248
		return;
249
 
250
	if (enable)
251
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
252
	else
253
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
254
 
255
	dev_priv->power_domains.init_power_on = enable;
256
}
257
 
258
/*
259
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
260
 * when not needed anymore. We have 4 registers that can request the power well
261
 * to be enabled, and it will only be disabled if none of the registers is
262
 * requesting it to be enabled.
263
 */
264
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
265
{
266
	struct drm_device *dev = dev_priv->dev;
267
 
268
	/*
269
	 * After we re-enable the power well, if we touch VGA register 0x3d5
270
	 * we'll get unclaimed register interrupts. This stops after we write
271
	 * anything to the VGA MSR register. The vgacon module uses this
272
	 * register all the time, so if we unbind our driver and, as a
273
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
274
	 * console_unlock(). So make here we touch the VGA MSR register, making
275
	 * sure vgacon can keep working normally without triggering interrupts
276
	 * and error messages.
277
	 */
278
	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
279
	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
280
	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
281
 
6084 serge 282
	if (IS_BROADWELL(dev))
283
		gen8_irq_power_well_post_enable(dev_priv,
284
						1 << PIPE_C | 1 << PIPE_B);
5354 serge 285
}
286
 
6084 serge 287
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
288
				       struct i915_power_well *power_well)
289
{
290
	struct drm_device *dev = dev_priv->dev;
291
 
292
	/*
293
	 * After we re-enable the power well, if we touch VGA register 0x3d5
294
	 * we'll get unclaimed register interrupts. This stops after we write
295
	 * anything to the VGA MSR register. The vgacon module uses this
296
	 * register all the time, so if we unbind our driver and, as a
297
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
298
	 * console_unlock(). So make here we touch the VGA MSR register, making
299
	 * sure vgacon can keep working normally without triggering interrupts
300
	 * and error messages.
301
	 */
302
	if (power_well->data == SKL_DISP_PW_2) {
303
		vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
304
		outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
305
		vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
306
 
307
		gen8_irq_power_well_post_enable(dev_priv,
308
						1 << PIPE_C | 1 << PIPE_B);
309
	}
310
}
311
 
5354 serge 312
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
313
			       struct i915_power_well *power_well, bool enable)
314
{
315
	bool is_enabled, enable_requested;
316
	uint32_t tmp;
317
 
318
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
319
	is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
320
	enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
321
 
322
	if (enable) {
323
		if (!enable_requested)
324
			I915_WRITE(HSW_PWR_WELL_DRIVER,
325
				   HSW_PWR_WELL_ENABLE_REQUEST);
326
 
327
		if (!is_enabled) {
328
			DRM_DEBUG_KMS("Enabling power well\n");
329
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
330
				      HSW_PWR_WELL_STATE_ENABLED), 20))
331
				DRM_ERROR("Timeout enabling power well\n");
332
			hsw_power_well_post_enable(dev_priv);
333
		}
334
 
335
	} else {
336
		if (enable_requested) {
337
			I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
338
			POSTING_READ(HSW_PWR_WELL_DRIVER);
339
			DRM_DEBUG_KMS("Requesting to disable the power well\n");
340
		}
341
	}
342
}
343
 
6084 serge 344
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
345
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
346
	BIT(POWER_DOMAIN_PIPE_B) |			\
347
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
348
	BIT(POWER_DOMAIN_PIPE_C) |			\
349
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
350
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
351
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
6937 serge 352
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
353
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
354
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
355
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
6084 serge 356
	BIT(POWER_DOMAIN_AUX_B) |                       \
357
	BIT(POWER_DOMAIN_AUX_C) |			\
358
	BIT(POWER_DOMAIN_AUX_D) |			\
359
	BIT(POWER_DOMAIN_AUDIO) |			\
360
	BIT(POWER_DOMAIN_VGA) |				\
361
	BIT(POWER_DOMAIN_INIT))
362
#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS (		\
6937 serge 363
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
364
	BIT(POWER_DOMAIN_PORT_DDI_E_LANES) |		\
6084 serge 365
	BIT(POWER_DOMAIN_INIT))
366
#define SKL_DISPLAY_DDI_B_POWER_DOMAINS (		\
6937 serge 367
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
6084 serge 368
	BIT(POWER_DOMAIN_INIT))
369
#define SKL_DISPLAY_DDI_C_POWER_DOMAINS (		\
6937 serge 370
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
6084 serge 371
	BIT(POWER_DOMAIN_INIT))
372
#define SKL_DISPLAY_DDI_D_POWER_DOMAINS (		\
6937 serge 373
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
6084 serge 374
	BIT(POWER_DOMAIN_INIT))
6937 serge 375
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (		\
376
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
377
	BIT(POWER_DOMAIN_MODESET) |			\
378
	BIT(POWER_DOMAIN_AUX_A) |			\
6084 serge 379
	BIT(POWER_DOMAIN_INIT))
380
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
6937 serge 381
	(POWER_DOMAIN_MASK & ~(				\
6084 serge 382
	SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
6937 serge 383
	SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) |		\
6084 serge 384
	BIT(POWER_DOMAIN_INIT))
385
 
386
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (		\
387
	BIT(POWER_DOMAIN_TRANSCODER_A) |		\
388
	BIT(POWER_DOMAIN_PIPE_B) |			\
389
	BIT(POWER_DOMAIN_TRANSCODER_B) |		\
390
	BIT(POWER_DOMAIN_PIPE_C) |			\
391
	BIT(POWER_DOMAIN_TRANSCODER_C) |		\
392
	BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |		\
393
	BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |		\
6937 serge 394
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
395
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
6084 serge 396
	BIT(POWER_DOMAIN_AUX_B) |			\
397
	BIT(POWER_DOMAIN_AUX_C) |			\
398
	BIT(POWER_DOMAIN_AUDIO) |			\
399
	BIT(POWER_DOMAIN_VGA) |				\
400
	BIT(POWER_DOMAIN_GMBUS) |			\
401
	BIT(POWER_DOMAIN_INIT))
402
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS (		\
403
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
404
	BIT(POWER_DOMAIN_PIPE_A) |			\
405
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
406
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |		\
6937 serge 407
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
6084 serge 408
	BIT(POWER_DOMAIN_AUX_A) |			\
409
	BIT(POWER_DOMAIN_PLLS) |			\
410
	BIT(POWER_DOMAIN_INIT))
6937 serge 411
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (		\
412
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |		\
413
	BIT(POWER_DOMAIN_MODESET) |			\
414
	BIT(POWER_DOMAIN_AUX_A) |			\
415
	BIT(POWER_DOMAIN_INIT))
6084 serge 416
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS (		\
417
	(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS |	\
418
	BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) |	\
419
	BIT(POWER_DOMAIN_INIT))
420
 
421
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
422
{
423
	struct drm_device *dev = dev_priv->dev;
424
 
425
	WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
426
	WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
427
		"DC9 already programmed to be enabled.\n");
428
	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
429
		"DC5 still not disabled to enable DC9.\n");
430
	WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
431
	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
432
 
433
	 /*
434
	  * TODO: check for the following to verify the conditions to enter DC9
435
	  * state are satisfied:
436
	  * 1] Check relevant display engine registers to verify if mode set
437
	  * disable sequence was followed.
438
	  * 2] Check if display uninitialize sequence is initialized.
439
	  */
440
}
441
 
442
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
443
{
444
	WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
445
	WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
446
		"DC9 already programmed to be disabled.\n");
447
	WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
448
		"DC5 still not disabled.\n");
449
 
450
	 /*
451
	  * TODO: check for the following to verify DC9 state was indeed
452
	  * entered before programming to disable it:
453
	  * 1] Check relevant display engine registers to verify if mode
454
	  *  set disable sequence was followed.
455
	  * 2] Check if display uninitialize sequence is initialized.
456
	  */
457
}
458
 
6937 serge 459
static void gen9_set_dc_state_debugmask_memory_up(
460
			struct drm_i915_private *dev_priv)
6084 serge 461
{
462
	uint32_t val;
463
 
6937 serge 464
	/* The below bit doesn't need to be cleared ever afterwards */
465
	val = I915_READ(DC_STATE_DEBUG);
466
	if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
467
		val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
468
		I915_WRITE(DC_STATE_DEBUG, val);
469
		POSTING_READ(DC_STATE_DEBUG);
470
	}
471
}
472
 
473
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
474
				u32 state)
475
{
476
	int rewrites = 0;
477
	int rereads = 0;
478
	u32 v;
479
 
480
	I915_WRITE(DC_STATE_EN, state);
481
 
482
	/* It has been observed that disabling the dc6 state sometimes
483
	 * doesn't stick and dmc keeps returning old value. Make sure
484
	 * the write really sticks enough times and also force rewrite until
485
	 * we are confident that state is exactly what we want.
486
	 */
487
	do  {
488
		v = I915_READ(DC_STATE_EN);
489
 
490
		if (v != state) {
491
			I915_WRITE(DC_STATE_EN, state);
492
			rewrites++;
493
			rereads = 0;
494
		} else if (rereads++ > 5) {
495
			break;
496
		}
497
 
498
	} while (rewrites < 100);
499
 
500
	if (v != state)
501
		DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
502
			  state, v);
503
 
504
	/* Most of the times we need one retry, avoid spam */
505
	if (rewrites > 1)
506
		DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
507
			      state, rewrites);
508
}
509
 
510
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
511
{
512
	uint32_t val;
513
	uint32_t mask;
514
 
515
	mask = DC_STATE_EN_UPTO_DC5;
516
	if (IS_BROXTON(dev_priv))
517
		mask |= DC_STATE_EN_DC9;
518
	else
519
		mask |= DC_STATE_EN_UPTO_DC6;
520
 
521
	WARN_ON_ONCE(state & ~mask);
522
 
523
	if (i915.enable_dc == 0)
524
		state = DC_STATE_DISABLE;
525
	else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
526
		state = DC_STATE_EN_UPTO_DC5;
527
 
528
	if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK)
529
		gen9_set_dc_state_debugmask_memory_up(dev_priv);
530
 
531
	val = I915_READ(DC_STATE_EN);
532
	DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
533
		      val & mask, state);
534
 
535
	/* Check if DMC is ignoring our DC state requests */
536
	if ((val & mask) != dev_priv->csr.dc_state)
537
		DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
538
			  dev_priv->csr.dc_state, val & mask);
539
 
540
	val &= ~mask;
541
	val |= state;
542
 
543
	gen9_write_dc_state(dev_priv, val);
544
 
545
	dev_priv->csr.dc_state = val & mask;
546
}
547
 
548
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
549
{
6084 serge 550
	assert_can_enable_dc9(dev_priv);
551
 
552
	DRM_DEBUG_KMS("Enabling DC9\n");
553
 
6937 serge 554
	gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
6084 serge 555
}
556
 
557
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
558
{
559
	assert_can_disable_dc9(dev_priv);
560
 
561
	DRM_DEBUG_KMS("Disabling DC9\n");
562
 
6937 serge 563
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
6084 serge 564
}
565
 
6937 serge 566
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
6084 serge 567
{
6937 serge 568
	WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
569
		  "CSR program storage start is NULL\n");
570
	WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
571
	WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
6084 serge 572
}
573
 
574
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
575
{
576
	struct drm_device *dev = dev_priv->dev;
577
	bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
578
					SKL_DISP_PW_2);
579
 
580
	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
581
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
582
	WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
583
 
584
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
585
		  "DC5 already programmed to be enabled.\n");
6937 serge 586
	assert_rpm_wakelock_held(dev_priv);
6084 serge 587
 
588
	assert_csr_loaded(dev_priv);
589
}
590
 
591
static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
592
{
593
	/*
594
	 * During initialization, the firmware may not be loaded yet.
595
	 * We still want to make sure that the DC enabling flag is cleared.
596
	 */
597
	if (dev_priv->power_domains.initializing)
598
		return;
599
 
6937 serge 600
	assert_rpm_wakelock_held(dev_priv);
6084 serge 601
}
602
 
603
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
604
{
605
	assert_can_enable_dc5(dev_priv);
606
 
607
	DRM_DEBUG_KMS("Enabling DC5\n");
608
 
6937 serge 609
	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
6084 serge 610
}
611
 
612
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
613
{
614
	struct drm_device *dev = dev_priv->dev;
615
 
616
	WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
617
	WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
618
	WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
619
		  "Backlight is not disabled.\n");
620
	WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
621
		  "DC6 already programmed to be enabled.\n");
622
 
623
	assert_csr_loaded(dev_priv);
624
}
625
 
626
static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
627
{
628
	/*
629
	 * During initialization, the firmware may not be loaded yet.
630
	 * We still want to make sure that the DC enabling flag is cleared.
631
	 */
632
	if (dev_priv->power_domains.initializing)
633
		return;
634
 
635
	WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
636
		  "DC6 already programmed to be disabled.\n");
637
}
638
 
6937 serge 639
static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
6084 serge 640
{
6937 serge 641
	assert_can_disable_dc5(dev_priv);
6084 serge 642
 
6937 serge 643
	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
644
		assert_can_disable_dc6(dev_priv);
645
 
646
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
647
}
648
 
649
void skl_enable_dc6(struct drm_i915_private *dev_priv)
650
{
6084 serge 651
	assert_can_enable_dc6(dev_priv);
652
 
653
	DRM_DEBUG_KMS("Enabling DC6\n");
654
 
6937 serge 655
	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
6084 serge 656
 
657
}
658
 
6937 serge 659
void skl_disable_dc6(struct drm_i915_private *dev_priv)
6084 serge 660
{
661
	assert_can_disable_dc6(dev_priv);
662
 
663
	DRM_DEBUG_KMS("Disabling DC6\n");
664
 
6937 serge 665
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
6084 serge 666
}
667
 
668
static void skl_set_power_well(struct drm_i915_private *dev_priv,
669
			struct i915_power_well *power_well, bool enable)
670
{
671
	struct drm_device *dev = dev_priv->dev;
672
	uint32_t tmp, fuse_status;
673
	uint32_t req_mask, state_mask;
674
	bool is_enabled, enable_requested, check_fuse_status = false;
675
 
676
	tmp = I915_READ(HSW_PWR_WELL_DRIVER);
677
	fuse_status = I915_READ(SKL_FUSE_STATUS);
678
 
679
	switch (power_well->data) {
680
	case SKL_DISP_PW_1:
681
		if (wait_for((I915_READ(SKL_FUSE_STATUS) &
682
			SKL_FUSE_PG0_DIST_STATUS), 1)) {
683
			DRM_ERROR("PG0 not enabled\n");
684
			return;
685
		}
686
		break;
687
	case SKL_DISP_PW_2:
688
		if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
689
			DRM_ERROR("PG1 in disabled state\n");
690
			return;
691
		}
692
		break;
693
	case SKL_DISP_PW_DDI_A_E:
694
	case SKL_DISP_PW_DDI_B:
695
	case SKL_DISP_PW_DDI_C:
696
	case SKL_DISP_PW_DDI_D:
697
	case SKL_DISP_PW_MISC_IO:
698
		break;
699
	default:
700
		WARN(1, "Unknown power well %lu\n", power_well->data);
701
		return;
702
	}
703
 
704
	req_mask = SKL_POWER_WELL_REQ(power_well->data);
705
	enable_requested = tmp & req_mask;
706
	state_mask = SKL_POWER_WELL_STATE(power_well->data);
707
	is_enabled = tmp & state_mask;
708
 
709
	if (enable) {
710
		if (!enable_requested) {
711
			WARN((tmp & state_mask) &&
712
				!I915_READ(HSW_PWR_WELL_BIOS),
713
				"Invalid for power well status to be enabled, unless done by the BIOS, \
714
				when request is to disable!\n");
6937 serge 715
			if (power_well->data == SKL_DISP_PW_2) {
6084 serge 716
					/*
6937 serge 717
				 * DDI buffer programming unnecessary during
718
				 * driver-load/resume as it's already done
719
				 * during modeset initialization then. It's
720
				 * also invalid here as encoder list is still
721
				 * uninitialized.
6084 serge 722
					 */
723
					if (!dev_priv->power_domains.initializing)
724
						intel_prepare_ddi(dev);
725
			}
726
			I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
727
		}
728
 
729
		if (!is_enabled) {
730
			DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
731
			if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
732
				state_mask), 1))
733
				DRM_ERROR("%s enable timeout\n",
734
					power_well->name);
735
			check_fuse_status = true;
736
		}
737
	} else {
738
		if (enable_requested) {
739
				I915_WRITE(HSW_PWR_WELL_DRIVER,	tmp & ~req_mask);
740
				POSTING_READ(HSW_PWR_WELL_DRIVER);
741
				DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
742
			}
743
		}
744
 
745
	if (check_fuse_status) {
746
		if (power_well->data == SKL_DISP_PW_1) {
747
			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
748
				SKL_FUSE_PG1_DIST_STATUS), 1))
749
				DRM_ERROR("PG1 distributing status timeout\n");
750
		} else if (power_well->data == SKL_DISP_PW_2) {
751
			if (wait_for((I915_READ(SKL_FUSE_STATUS) &
752
				SKL_FUSE_PG2_DIST_STATUS), 1))
753
				DRM_ERROR("PG2 distributing status timeout\n");
754
		}
755
	}
756
 
757
	if (enable && !is_enabled)
758
		skl_power_well_post_enable(dev_priv, power_well);
759
}
760
 
5354 serge 761
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
762
				   struct i915_power_well *power_well)
763
{
764
	hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
765
 
766
	/*
767
	 * We're taking over the BIOS, so clear any requests made by it since
768
	 * the driver is in charge now.
769
	 */
770
	if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
771
		I915_WRITE(HSW_PWR_WELL_BIOS, 0);
772
}
773
 
774
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
775
				  struct i915_power_well *power_well)
776
{
777
	hsw_set_power_well(dev_priv, power_well, true);
778
}
779
 
780
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
781
				   struct i915_power_well *power_well)
782
{
783
	hsw_set_power_well(dev_priv, power_well, false);
784
}
785
 
6084 serge 786
static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
787
					struct i915_power_well *power_well)
788
{
789
	uint32_t mask = SKL_POWER_WELL_REQ(power_well->data) |
790
		SKL_POWER_WELL_STATE(power_well->data);
791
 
792
	return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
793
}
794
 
795
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
796
				struct i915_power_well *power_well)
797
{
798
	skl_set_power_well(dev_priv, power_well, power_well->count > 0);
799
 
800
	/* Clear any request made by BIOS as driver is taking over */
801
	I915_WRITE(HSW_PWR_WELL_BIOS, 0);
802
}
803
 
804
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
805
				struct i915_power_well *power_well)
806
{
807
	skl_set_power_well(dev_priv, power_well, true);
808
}
809
 
810
static void skl_power_well_disable(struct drm_i915_private *dev_priv,
811
				struct i915_power_well *power_well)
812
{
813
	skl_set_power_well(dev_priv, power_well, false);
814
}
815
 
6937 serge 816
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
817
					   struct i915_power_well *power_well)
818
{
819
	return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
820
}
821
 
822
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
823
					  struct i915_power_well *power_well)
824
{
825
	gen9_disable_dc5_dc6(dev_priv);
826
}
827
 
828
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
829
					   struct i915_power_well *power_well)
830
{
831
	if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
832
		skl_enable_dc6(dev_priv);
833
	else
834
		gen9_enable_dc5(dev_priv);
835
}
836
 
837
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
838
					   struct i915_power_well *power_well)
839
{
840
	if (power_well->count > 0) {
841
		gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
842
	} else {
843
		if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
844
		    i915.enable_dc != 1)
845
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
846
		else
847
			gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
848
	}
849
}
850
 
5354 serge 851
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
852
					   struct i915_power_well *power_well)
853
{
854
}
855
 
856
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
857
					     struct i915_power_well *power_well)
858
{
859
	return true;
860
}
861
 
862
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
863
			       struct i915_power_well *power_well, bool enable)
864
{
865
	enum punit_power_well power_well_id = power_well->data;
866
	u32 mask;
867
	u32 state;
868
	u32 ctrl;
869
 
870
	mask = PUNIT_PWRGT_MASK(power_well_id);
871
	state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
872
			 PUNIT_PWRGT_PWR_GATE(power_well_id);
873
 
874
	mutex_lock(&dev_priv->rps.hw_lock);
875
 
876
#define COND \
877
	((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
878
 
879
	if (COND)
880
		goto out;
881
 
882
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
883
	ctrl &= ~mask;
884
	ctrl |= state;
885
	vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
886
 
887
	if (wait_for(COND, 100))
6084 serge 888
		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
5354 serge 889
			  state,
890
			  vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
891
 
892
#undef COND
893
 
894
out:
895
	mutex_unlock(&dev_priv->rps.hw_lock);
896
}
897
 
898
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
899
				   struct i915_power_well *power_well)
900
{
901
	vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
902
}
903
 
904
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
905
				  struct i915_power_well *power_well)
906
{
907
	vlv_set_power_well(dev_priv, power_well, true);
908
}
909
 
910
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
911
				   struct i915_power_well *power_well)
912
{
913
	vlv_set_power_well(dev_priv, power_well, false);
914
}
915
 
916
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
917
				   struct i915_power_well *power_well)
918
{
919
	int power_well_id = power_well->data;
920
	bool enabled = false;
921
	u32 mask;
922
	u32 state;
923
	u32 ctrl;
924
 
925
	mask = PUNIT_PWRGT_MASK(power_well_id);
926
	ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
927
 
928
	mutex_lock(&dev_priv->rps.hw_lock);
929
 
930
	state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
931
	/*
932
	 * We only ever set the power-on and power-gate states, anything
933
	 * else is unexpected.
934
	 */
935
	WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
936
		state != PUNIT_PWRGT_PWR_GATE(power_well_id));
937
	if (state == ctrl)
938
		enabled = true;
939
 
940
	/*
941
	 * A transient state at this point would mean some unexpected party
942
	 * is poking at the power controls too.
943
	 */
944
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
945
	WARN_ON(ctrl != state);
946
 
947
	mutex_unlock(&dev_priv->rps.hw_lock);
948
 
949
	return enabled;
950
}
951
 
6084 serge 952
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
5354 serge 953
{
6084 serge 954
	enum pipe pipe;
5354 serge 955
 
6084 serge 956
	/*
957
	 * Enable the CRI clock source so we can get at the
958
	 * display and the reference clock for VGA
959
	 * hotplug / manual detection. Supposedly DSI also
960
	 * needs the ref clock up and running.
961
	 *
962
	 * CHV DPLL B/C have some issues if VGA mode is enabled.
963
	 */
964
	for_each_pipe(dev_priv->dev, pipe) {
965
		u32 val = I915_READ(DPLL(pipe));
5354 serge 966
 
6084 serge 967
		val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
968
		if (pipe != PIPE_A)
969
			val |= DPLL_INTEGRATED_CRI_CLK_VLV;
970
 
971
		I915_WRITE(DPLL(pipe), val);
972
	}
973
 
5354 serge 974
	spin_lock_irq(&dev_priv->irq_lock);
975
	valleyview_enable_display_irqs(dev_priv);
976
	spin_unlock_irq(&dev_priv->irq_lock);
977
 
978
	/*
979
	 * During driver initialization/resume we can avoid restoring the
980
	 * part of the HW/SW state that will be inited anyway explicitly.
981
	 */
982
	if (dev_priv->power_domains.initializing)
983
		return;
984
 
6296 serge 985
	intel_hpd_init(dev_priv);
5354 serge 986
 
987
	i915_redisable_vga_power_on(dev_priv->dev);
988
}
989
 
6084 serge 990
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
991
{
992
	spin_lock_irq(&dev_priv->irq_lock);
993
	valleyview_disable_display_irqs(dev_priv);
994
	spin_unlock_irq(&dev_priv->irq_lock);
995
 
996
	vlv_power_sequencer_reset(dev_priv);
997
}
998
 
999
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1000
					  struct i915_power_well *power_well)
1001
{
1002
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
1003
 
1004
	vlv_set_power_well(dev_priv, power_well, true);
1005
 
1006
	vlv_display_power_well_init(dev_priv);
1007
}
1008
 
5354 serge 1009
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1010
					   struct i915_power_well *power_well)
1011
{
1012
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
1013
 
6084 serge 1014
	vlv_display_power_well_deinit(dev_priv);
5354 serge 1015
 
1016
	vlv_set_power_well(dev_priv, power_well, false);
1017
}
1018
 
1019
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1020
					   struct i915_power_well *power_well)
1021
{
1022
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1023
 
6084 serge 1024
	/* since ref/cri clock was enabled */
5354 serge 1025
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1026
 
1027
	vlv_set_power_well(dev_priv, power_well, true);
1028
 
1029
	/*
1030
	 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1031
	 *  6.	De-assert cmn_reset/side_reset. Same as VLV X0.
1032
	 *   a.	GUnit 0x2110 bit[0] set to 1 (def 0)
1033
	 *   b.	The other bits such as sfr settings / modesel may all
1034
	 *	be set to 0.
1035
	 *
1036
	 * This should only be done on init and resume from S3 with
1037
	 * both PLLs disabled, or we risk losing DPIO and PLL
1038
	 * synchronization.
1039
	 */
1040
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1041
}
1042
 
1043
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1044
					    struct i915_power_well *power_well)
1045
{
1046
	enum pipe pipe;
1047
 
1048
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
1049
 
1050
	for_each_pipe(dev_priv, pipe)
1051
		assert_pll_disabled(dev_priv, pipe);
1052
 
1053
	/* Assert common reset */
1054
	I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1055
 
1056
	vlv_set_power_well(dev_priv, power_well, false);
1057
}
1058
 
6084 serge 1059
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
1060
 
1061
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1062
						 int power_well_id)
1063
{
1064
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1065
	int i;
1066
 
6937 serge 1067
	for (i = 0; i < power_domains->power_well_count; i++) {
1068
	struct i915_power_well *power_well;
1069
 
1070
		power_well = &power_domains->power_wells[i];
6084 serge 1071
		if (power_well->data == power_well_id)
1072
			return power_well;
1073
	}
1074
 
1075
	return NULL;
1076
}
1077
 
1078
#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1079
 
1080
static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1081
{
1082
	struct i915_power_well *cmn_bc =
1083
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1084
	struct i915_power_well *cmn_d =
1085
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
1086
	u32 phy_control = dev_priv->chv_phy_control;
1087
	u32 phy_status = 0;
1088
	u32 phy_status_mask = 0xffffffff;
1089
	u32 tmp;
1090
 
1091
	/*
1092
	 * The BIOS can leave the PHY is some weird state
1093
	 * where it doesn't fully power down some parts.
1094
	 * Disable the asserts until the PHY has been fully
1095
	 * reset (ie. the power well has been disabled at
1096
	 * least once).
1097
	 */
1098
	if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1099
		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1100
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1101
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1102
				     PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1103
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1104
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1105
 
1106
	if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1107
		phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1108
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1109
				     PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1110
 
1111
	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
1112
		phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1113
 
1114
		/* this assumes override is only used to enable lanes */
1115
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1116
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1117
 
1118
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1119
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1120
 
1121
		/* CL1 is on whenever anything is on in either channel */
1122
		if (BITS_SET(phy_control,
1123
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1124
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1125
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1126
 
1127
		/*
1128
		 * The DPLLB check accounts for the pipe B + port A usage
1129
		 * with CL2 powered up but all the lanes in the second channel
1130
		 * powered down.
1131
		 */
1132
		if (BITS_SET(phy_control,
1133
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1134
		    (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1135
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1136
 
1137
		if (BITS_SET(phy_control,
1138
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1139
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1140
		if (BITS_SET(phy_control,
1141
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1142
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1143
 
1144
		if (BITS_SET(phy_control,
1145
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1146
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1147
		if (BITS_SET(phy_control,
1148
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1149
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1150
	}
1151
 
1152
	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
1153
		phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1154
 
1155
		/* this assumes override is only used to enable lanes */
1156
		if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1157
			phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1158
 
1159
		if (BITS_SET(phy_control,
1160
			     PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1161
			phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1162
 
1163
		if (BITS_SET(phy_control,
1164
			     PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1165
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1166
		if (BITS_SET(phy_control,
1167
			     PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1168
			phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1169
	}
1170
 
1171
	phy_status &= phy_status_mask;
1172
 
1173
	/*
1174
	 * The PHY may be busy with some initial calibration and whatnot,
1175
	 * so the power state can take a while to actually change.
1176
	 */
1177
	if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
1178
		WARN(phy_status != tmp,
1179
		     "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1180
		     tmp, phy_status, dev_priv->chv_phy_control);
1181
}
1182
 
1183
#undef BITS_SET
1184
 
5354 serge 1185
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1186
					   struct i915_power_well *power_well)
1187
{
1188
	enum dpio_phy phy;
6084 serge 1189
	enum pipe pipe;
1190
	uint32_t tmp;
5354 serge 1191
 
1192
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1193
		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1194
 
1195
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6084 serge 1196
		pipe = PIPE_A;
5354 serge 1197
		phy = DPIO_PHY0;
1198
	} else {
6084 serge 1199
		pipe = PIPE_C;
5354 serge 1200
		phy = DPIO_PHY1;
1201
	}
6084 serge 1202
 
1203
	/* since ref/cri clock was enabled */
5354 serge 1204
	udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1205
	vlv_set_power_well(dev_priv, power_well, true);
1206
 
1207
	/* Poll for phypwrgood signal */
1208
	if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
1209
		DRM_ERROR("Display PHY %d is not power up\n", phy);
1210
 
6084 serge 1211
	mutex_lock(&dev_priv->sb_lock);
1212
 
1213
	/* Enable dynamic power down */
1214
	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1215
	tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1216
		DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1217
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1218
 
1219
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1220
		tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1221
		tmp |= DPIO_DYNPWRDOWNEN_CH1;
1222
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1223
	} else {
1224
		/*
1225
		 * Force the non-existing CL2 off. BXT does this
1226
		 * too, so maybe it saves some power even though
1227
		 * CL2 doesn't exist?
1228
		 */
1229
		tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1230
		tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1231
		vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1232
	}
1233
 
1234
	mutex_unlock(&dev_priv->sb_lock);
1235
 
1236
	dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1237
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1238
 
1239
	DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1240
		      phy, dev_priv->chv_phy_control);
1241
 
1242
	assert_chv_phy_status(dev_priv);
5354 serge 1243
}
1244
 
1245
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1246
					    struct i915_power_well *power_well)
1247
{
1248
	enum dpio_phy phy;
1249
 
1250
	WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
1251
		     power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
1252
 
1253
	if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
1254
		phy = DPIO_PHY0;
1255
		assert_pll_disabled(dev_priv, PIPE_A);
1256
		assert_pll_disabled(dev_priv, PIPE_B);
1257
	} else {
1258
		phy = DPIO_PHY1;
1259
		assert_pll_disabled(dev_priv, PIPE_C);
1260
	}
1261
 
6084 serge 1262
	dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1263
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
5354 serge 1264
 
1265
	vlv_set_power_well(dev_priv, power_well, false);
6084 serge 1266
 
1267
	DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1268
		      phy, dev_priv->chv_phy_control);
1269
 
1270
	/* PHY is fully reset now, so we can enable the PHY state asserts */
1271
	dev_priv->chv_phy_assert[phy] = true;
1272
 
1273
	assert_chv_phy_status(dev_priv);
5354 serge 1274
}
1275
 
6084 serge 1276
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1277
				     enum dpio_channel ch, bool override, unsigned int mask)
1278
{
1279
	enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1280
	u32 reg, val, expected, actual;
1281
 
1282
	/*
1283
	 * The BIOS can leave the PHY is some weird state
1284
	 * where it doesn't fully power down some parts.
1285
	 * Disable the asserts until the PHY has been fully
1286
	 * reset (ie. the power well has been disabled at
1287
	 * least once).
1288
	 */
1289
	if (!dev_priv->chv_phy_assert[phy])
1290
		return;
1291
 
1292
	if (ch == DPIO_CH0)
1293
		reg = _CHV_CMN_DW0_CH0;
1294
	else
1295
		reg = _CHV_CMN_DW6_CH1;
1296
 
1297
	mutex_lock(&dev_priv->sb_lock);
1298
	val = vlv_dpio_read(dev_priv, pipe, reg);
1299
	mutex_unlock(&dev_priv->sb_lock);
1300
 
1301
	/*
1302
	 * This assumes !override is only used when the port is disabled.
1303
	 * All lanes should power down even without the override when
1304
	 * the port is disabled.
1305
	 */
1306
	if (!override || mask == 0xf) {
1307
		expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1308
		/*
1309
		 * If CH1 common lane is not active anymore
1310
		 * (eg. for pipe B DPLL) the entire channel will
1311
		 * shut down, which causes the common lane registers
1312
		 * to read as 0. That means we can't actually check
1313
		 * the lane power down status bits, but as the entire
1314
		 * register reads as 0 it's a good indication that the
1315
		 * channel is indeed entirely powered down.
1316
		 */
1317
		if (ch == DPIO_CH1 && val == 0)
1318
			expected = 0;
1319
	} else if (mask != 0x0) {
1320
		expected = DPIO_ANYDL_POWERDOWN;
1321
	} else {
1322
		expected = 0;
1323
	}
1324
 
1325
	if (ch == DPIO_CH0)
1326
		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1327
	else
1328
		actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1329
	actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1330
 
1331
	WARN(actual != expected,
1332
	     "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1333
	     !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1334
	     !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1335
	     reg, val);
1336
}
1337
 
1338
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1339
			  enum dpio_channel ch, bool override)
1340
{
1341
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1342
	bool was_override;
1343
 
1344
	mutex_lock(&power_domains->lock);
1345
 
1346
	was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1347
 
1348
	if (override == was_override)
1349
		goto out;
1350
 
1351
	if (override)
1352
		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1353
	else
1354
		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1355
 
1356
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1357
 
1358
	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1359
		      phy, ch, dev_priv->chv_phy_control);
1360
 
1361
	assert_chv_phy_status(dev_priv);
1362
 
1363
out:
1364
	mutex_unlock(&power_domains->lock);
1365
 
1366
	return was_override;
1367
}
1368
 
1369
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1370
			     bool override, unsigned int mask)
1371
{
1372
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1373
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1374
	enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1375
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1376
 
1377
	mutex_lock(&power_domains->lock);
1378
 
1379
	dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1380
	dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1381
 
1382
	if (override)
1383
		dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1384
	else
1385
		dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1386
 
1387
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1388
 
1389
	DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1390
		      phy, ch, mask, dev_priv->chv_phy_control);
1391
 
1392
	assert_chv_phy_status(dev_priv);
1393
 
1394
	assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1395
 
1396
	mutex_unlock(&power_domains->lock);
1397
}
1398
 
5354 serge 1399
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1400
					struct i915_power_well *power_well)
1401
{
1402
	enum pipe pipe = power_well->data;
1403
	bool enabled;
1404
	u32 state, ctrl;
1405
 
1406
	mutex_lock(&dev_priv->rps.hw_lock);
1407
 
1408
	state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
1409
	/*
1410
	 * We only ever set the power-on and power-gate states, anything
1411
	 * else is unexpected.
1412
	 */
1413
	WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1414
	enabled = state == DP_SSS_PWR_ON(pipe);
1415
 
1416
	/*
1417
	 * A transient state at this point would mean some unexpected party
1418
	 * is poking at the power controls too.
1419
	 */
1420
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
1421
	WARN_ON(ctrl << 16 != state);
1422
 
1423
	mutex_unlock(&dev_priv->rps.hw_lock);
1424
 
1425
	return enabled;
1426
}
1427
 
1428
static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1429
				    struct i915_power_well *power_well,
1430
				    bool enable)
1431
{
1432
	enum pipe pipe = power_well->data;
1433
	u32 state;
1434
	u32 ctrl;
1435
 
1436
	state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1437
 
1438
	mutex_lock(&dev_priv->rps.hw_lock);
1439
 
1440
#define COND \
1441
	((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
1442
 
1443
	if (COND)
1444
		goto out;
1445
 
1446
	ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
1447
	ctrl &= ~DP_SSC_MASK(pipe);
1448
	ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1449
	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
1450
 
1451
	if (wait_for(COND, 100))
6084 serge 1452
		DRM_ERROR("timeout setting power well state %08x (%08x)\n",
5354 serge 1453
			  state,
1454
			  vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
1455
 
1456
#undef COND
1457
 
1458
out:
1459
	mutex_unlock(&dev_priv->rps.hw_lock);
1460
}
1461
 
1462
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1463
					struct i915_power_well *power_well)
1464
{
6084 serge 1465
	WARN_ON_ONCE(power_well->data != PIPE_A);
1466
 
5354 serge 1467
	chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
1468
}
1469
 
1470
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1471
				       struct i915_power_well *power_well)
1472
{
6084 serge 1473
	WARN_ON_ONCE(power_well->data != PIPE_A);
5354 serge 1474
 
1475
	chv_set_pipe_power_well(dev_priv, power_well, true);
1476
 
6084 serge 1477
	vlv_display_power_well_init(dev_priv);
5354 serge 1478
}
1479
 
1480
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1481
					struct i915_power_well *power_well)
1482
{
6084 serge 1483
	WARN_ON_ONCE(power_well->data != PIPE_A);
5354 serge 1484
 
6084 serge 1485
	vlv_display_power_well_deinit(dev_priv);
5354 serge 1486
 
1487
	chv_set_pipe_power_well(dev_priv, power_well, false);
1488
}
1489
 
6937 serge 1490
static void
1491
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1492
				 enum intel_display_power_domain domain)
1493
{
1494
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1495
	struct i915_power_well *power_well;
1496
	int i;
1497
 
1498
	for_each_power_well(i, power_well, BIT(domain), power_domains) {
1499
		if (!power_well->count++)
1500
			intel_power_well_enable(dev_priv, power_well);
1501
	}
1502
 
1503
	power_domains->domain_use_count[domain]++;
1504
}
1505
 
5354 serge 1506
/**
1507
 * intel_display_power_get - grab a power domain reference
1508
 * @dev_priv: i915 device instance
1509
 * @domain: power domain to reference
1510
 *
1511
 * This function grabs a power domain reference for @domain and ensures that the
1512
 * power domain and all its parents are powered up. Therefore users should only
1513
 * grab a reference to the innermost power domain they need.
1514
 *
1515
 * Any power domain reference obtained by this function must have a symmetric
1516
 * call to intel_display_power_put() to release the reference again.
1517
 */
1518
void intel_display_power_get(struct drm_i915_private *dev_priv,
1519
			     enum intel_display_power_domain domain)
1520
{
6937 serge 1521
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
5354 serge 1522
 
1523
	intel_runtime_pm_get(dev_priv);
1524
 
6937 serge 1525
	mutex_lock(&power_domains->lock);
5354 serge 1526
 
6937 serge 1527
	__intel_display_power_get_domain(dev_priv, domain);
1528
 
1529
	mutex_unlock(&power_domains->lock);
1530
}
1531
 
1532
/**
1533
 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1534
 * @dev_priv: i915 device instance
1535
 * @domain: power domain to reference
1536
 *
1537
 * This function grabs a power domain reference for @domain and ensures that the
1538
 * power domain and all its parents are powered up. Therefore users should only
1539
 * grab a reference to the innermost power domain they need.
1540
 *
1541
 * Any power domain reference obtained by this function must have a symmetric
1542
 * call to intel_display_power_put() to release the reference again.
1543
 */
1544
bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1545
					enum intel_display_power_domain domain)
1546
{
1547
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
1548
	bool is_enabled;
1549
 
1550
	if (!intel_runtime_pm_get_if_in_use(dev_priv))
1551
		return false;
1552
 
5354 serge 1553
	mutex_lock(&power_domains->lock);
1554
 
6937 serge 1555
	if (__intel_display_power_is_enabled(dev_priv, domain)) {
1556
		__intel_display_power_get_domain(dev_priv, domain);
1557
		is_enabled = true;
1558
	} else {
1559
		is_enabled = false;
5354 serge 1560
	}
1561
 
6937 serge 1562
	mutex_unlock(&power_domains->lock);
5354 serge 1563
 
6937 serge 1564
	if (!is_enabled)
1565
		intel_runtime_pm_put(dev_priv);
1566
 
1567
	return is_enabled;
5354 serge 1568
}
1569
 
1570
/**
1571
 * intel_display_power_put - release a power domain reference
1572
 * @dev_priv: i915 device instance
1573
 * @domain: power domain to reference
1574
 *
1575
 * This function drops the power domain reference obtained by
1576
 * intel_display_power_get() and might power down the corresponding hardware
1577
 * block right away if this is the last reference.
1578
 */
1579
void intel_display_power_put(struct drm_i915_private *dev_priv,
1580
			     enum intel_display_power_domain domain)
1581
{
1582
	struct i915_power_domains *power_domains;
1583
	struct i915_power_well *power_well;
1584
	int i;
1585
 
1586
	power_domains = &dev_priv->power_domains;
1587
 
1588
	mutex_lock(&power_domains->lock);
1589
 
6937 serge 1590
	WARN(!power_domains->domain_use_count[domain],
1591
	     "Use count on domain %s is already zero\n",
1592
	     intel_display_power_domain_str(domain));
5354 serge 1593
	power_domains->domain_use_count[domain]--;
1594
 
1595
	for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6937 serge 1596
		WARN(!power_well->count,
1597
		     "Use count on power well %s is already zero",
1598
		     power_well->name);
5354 serge 1599
 
6937 serge 1600
		if (!--power_well->count)
6084 serge 1601
			intel_power_well_disable(dev_priv, power_well);
5354 serge 1602
	}
1603
 
1604
	mutex_unlock(&power_domains->lock);
1605
 
1606
	intel_runtime_pm_put(dev_priv);
1607
}
1608
 
1609
#define HSW_ALWAYS_ON_POWER_DOMAINS (			\
1610
	BIT(POWER_DOMAIN_PIPE_A) |			\
1611
	BIT(POWER_DOMAIN_TRANSCODER_EDP) |		\
6937 serge 1612
	BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |		\
1613
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |		\
1614
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |		\
1615
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |		\
5354 serge 1616
	BIT(POWER_DOMAIN_PORT_CRT) |			\
1617
	BIT(POWER_DOMAIN_PLLS) |			\
6084 serge 1618
	BIT(POWER_DOMAIN_AUX_A) |			\
1619
	BIT(POWER_DOMAIN_AUX_B) |			\
1620
	BIT(POWER_DOMAIN_AUX_C) |			\
1621
	BIT(POWER_DOMAIN_AUX_D) |			\
1622
	BIT(POWER_DOMAIN_GMBUS) |			\
5354 serge 1623
	BIT(POWER_DOMAIN_INIT))
1624
#define HSW_DISPLAY_POWER_DOMAINS (				\
1625
	(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |	\
1626
	BIT(POWER_DOMAIN_INIT))
1627
 
1628
#define BDW_ALWAYS_ON_POWER_DOMAINS (			\
1629
	HSW_ALWAYS_ON_POWER_DOMAINS |			\
1630
	BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
1631
#define BDW_DISPLAY_POWER_DOMAINS (				\
1632
	(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) |	\
1633
	BIT(POWER_DOMAIN_INIT))
1634
 
1635
#define VLV_ALWAYS_ON_POWER_DOMAINS	BIT(POWER_DOMAIN_INIT)
1636
#define VLV_DISPLAY_POWER_DOMAINS	POWER_DOMAIN_MASK
1637
 
1638
#define VLV_DPIO_CMN_BC_POWER_DOMAINS (		\
6937 serge 1639
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1640
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
5354 serge 1641
	BIT(POWER_DOMAIN_PORT_CRT) |		\
6084 serge 1642
	BIT(POWER_DOMAIN_AUX_B) |		\
1643
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1644
	BIT(POWER_DOMAIN_INIT))
1645
 
1646
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (	\
6937 serge 1647
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
6084 serge 1648
	BIT(POWER_DOMAIN_AUX_B) |		\
5354 serge 1649
	BIT(POWER_DOMAIN_INIT))
1650
 
1651
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (	\
6937 serge 1652
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
6084 serge 1653
	BIT(POWER_DOMAIN_AUX_B) |		\
5354 serge 1654
	BIT(POWER_DOMAIN_INIT))
1655
 
1656
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (	\
6937 serge 1657
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1658
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1659
	BIT(POWER_DOMAIN_INIT))
1660
 
1661
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (	\
6937 serge 1662
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1663
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1664
	BIT(POWER_DOMAIN_INIT))
1665
 
1666
#define CHV_DPIO_CMN_BC_POWER_DOMAINS (		\
6937 serge 1667
	BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |	\
1668
	BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |	\
6084 serge 1669
	BIT(POWER_DOMAIN_AUX_B) |		\
1670
	BIT(POWER_DOMAIN_AUX_C) |		\
5354 serge 1671
	BIT(POWER_DOMAIN_INIT))
1672
 
1673
#define CHV_DPIO_CMN_D_POWER_DOMAINS (		\
6937 serge 1674
	BIT(POWER_DOMAIN_PORT_DDI_D_LANES) |	\
6084 serge 1675
	BIT(POWER_DOMAIN_AUX_D) |		\
5354 serge 1676
	BIT(POWER_DOMAIN_INIT))
1677
 
1678
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
1679
	.sync_hw = i9xx_always_on_power_well_noop,
1680
	.enable = i9xx_always_on_power_well_noop,
1681
	.disable = i9xx_always_on_power_well_noop,
1682
	.is_enabled = i9xx_always_on_power_well_enabled,
1683
};
1684
 
1685
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
1686
	.sync_hw = chv_pipe_power_well_sync_hw,
1687
	.enable = chv_pipe_power_well_enable,
1688
	.disable = chv_pipe_power_well_disable,
1689
	.is_enabled = chv_pipe_power_well_enabled,
1690
};
1691
 
1692
static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
1693
	.sync_hw = vlv_power_well_sync_hw,
1694
	.enable = chv_dpio_cmn_power_well_enable,
1695
	.disable = chv_dpio_cmn_power_well_disable,
1696
	.is_enabled = vlv_power_well_enabled,
1697
};
1698
 
1699
static struct i915_power_well i9xx_always_on_power_well[] = {
1700
	{
1701
		.name = "always-on",
1702
		.always_on = 1,
1703
		.domains = POWER_DOMAIN_MASK,
1704
		.ops = &i9xx_always_on_power_well_ops,
1705
	},
1706
};
1707
 
1708
static const struct i915_power_well_ops hsw_power_well_ops = {
1709
	.sync_hw = hsw_power_well_sync_hw,
1710
	.enable = hsw_power_well_enable,
1711
	.disable = hsw_power_well_disable,
1712
	.is_enabled = hsw_power_well_enabled,
1713
};
1714
 
6084 serge 1715
static const struct i915_power_well_ops skl_power_well_ops = {
1716
	.sync_hw = skl_power_well_sync_hw,
1717
	.enable = skl_power_well_enable,
1718
	.disable = skl_power_well_disable,
1719
	.is_enabled = skl_power_well_enabled,
1720
};
1721
 
6937 serge 1722
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1723
	.sync_hw = gen9_dc_off_power_well_sync_hw,
1724
	.enable = gen9_dc_off_power_well_enable,
1725
	.disable = gen9_dc_off_power_well_disable,
1726
	.is_enabled = gen9_dc_off_power_well_enabled,
1727
};
1728
 
5354 serge 1729
static struct i915_power_well hsw_power_wells[] = {
1730
	{
1731
		.name = "always-on",
1732
		.always_on = 1,
1733
		.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
1734
		.ops = &i9xx_always_on_power_well_ops,
1735
	},
1736
	{
1737
		.name = "display",
1738
		.domains = HSW_DISPLAY_POWER_DOMAINS,
1739
		.ops = &hsw_power_well_ops,
1740
	},
1741
};
1742
 
1743
static struct i915_power_well bdw_power_wells[] = {
1744
	{
1745
		.name = "always-on",
1746
		.always_on = 1,
1747
		.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
1748
		.ops = &i9xx_always_on_power_well_ops,
1749
	},
1750
	{
1751
		.name = "display",
1752
		.domains = BDW_DISPLAY_POWER_DOMAINS,
1753
		.ops = &hsw_power_well_ops,
1754
	},
1755
};
1756
 
1757
static const struct i915_power_well_ops vlv_display_power_well_ops = {
1758
	.sync_hw = vlv_power_well_sync_hw,
1759
	.enable = vlv_display_power_well_enable,
1760
	.disable = vlv_display_power_well_disable,
1761
	.is_enabled = vlv_power_well_enabled,
1762
};
1763
 
1764
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
1765
	.sync_hw = vlv_power_well_sync_hw,
1766
	.enable = vlv_dpio_cmn_power_well_enable,
1767
	.disable = vlv_dpio_cmn_power_well_disable,
1768
	.is_enabled = vlv_power_well_enabled,
1769
};
1770
 
1771
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
1772
	.sync_hw = vlv_power_well_sync_hw,
1773
	.enable = vlv_power_well_enable,
1774
	.disable = vlv_power_well_disable,
1775
	.is_enabled = vlv_power_well_enabled,
1776
};
1777
 
1778
static struct i915_power_well vlv_power_wells[] = {
1779
	{
1780
		.name = "always-on",
1781
		.always_on = 1,
1782
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1783
		.ops = &i9xx_always_on_power_well_ops,
6937 serge 1784
		.data = PUNIT_POWER_WELL_ALWAYS_ON,
5354 serge 1785
	},
1786
	{
1787
		.name = "display",
1788
		.domains = VLV_DISPLAY_POWER_DOMAINS,
1789
		.data = PUNIT_POWER_WELL_DISP2D,
1790
		.ops = &vlv_display_power_well_ops,
1791
	},
1792
	{
1793
		.name = "dpio-tx-b-01",
1794
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1795
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1796
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1797
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1798
		.ops = &vlv_dpio_power_well_ops,
1799
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1800
	},
1801
	{
1802
		.name = "dpio-tx-b-23",
1803
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1804
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1805
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1806
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1807
		.ops = &vlv_dpio_power_well_ops,
1808
		.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1809
	},
1810
	{
1811
		.name = "dpio-tx-c-01",
1812
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1813
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1814
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1815
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1816
		.ops = &vlv_dpio_power_well_ops,
1817
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1818
	},
1819
	{
1820
		.name = "dpio-tx-c-23",
1821
		.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1822
			   VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
1823
			   VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1824
			   VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1825
		.ops = &vlv_dpio_power_well_ops,
1826
		.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1827
	},
1828
	{
1829
		.name = "dpio-common",
1830
		.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
1831
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1832
		.ops = &vlv_dpio_cmn_power_well_ops,
1833
	},
1834
};
1835
 
1836
static struct i915_power_well chv_power_wells[] = {
1837
	{
1838
		.name = "always-on",
1839
		.always_on = 1,
1840
		.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
1841
		.ops = &i9xx_always_on_power_well_ops,
1842
	},
1843
	{
1844
		.name = "display",
1845
		/*
6084 serge 1846
		 * Pipe A power well is the new disp2d well. Pipe B and C
1847
		 * power wells don't actually exist. Pipe A power well is
1848
		 * required for any pipe to work.
5354 serge 1849
		 */
6084 serge 1850
		.domains = VLV_DISPLAY_POWER_DOMAINS,
5354 serge 1851
		.data = PIPE_A,
1852
		.ops = &chv_pipe_power_well_ops,
1853
	},
1854
	{
1855
		.name = "dpio-common-bc",
6084 serge 1856
		.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
5354 serge 1857
		.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1858
		.ops = &chv_dpio_cmn_power_well_ops,
1859
	},
1860
	{
1861
		.name = "dpio-common-d",
6084 serge 1862
		.domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
5354 serge 1863
		.data = PUNIT_POWER_WELL_DPIO_CMN_D,
1864
		.ops = &chv_dpio_cmn_power_well_ops,
1865
	},
6084 serge 1866
};
1867
 
1868
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
1869
				    int power_well_id)
1870
{
1871
	struct i915_power_well *power_well;
1872
	bool ret;
1873
 
1874
	power_well = lookup_power_well(dev_priv, power_well_id);
1875
	ret = power_well->ops->is_enabled(dev_priv, power_well);
1876
 
1877
	return ret;
1878
}
1879
 
1880
static struct i915_power_well skl_power_wells[] = {
5354 serge 1881
	{
6084 serge 1882
		.name = "always-on",
1883
		.always_on = 1,
1884
		.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1885
		.ops = &i9xx_always_on_power_well_ops,
6937 serge 1886
		.data = SKL_DISP_PW_ALWAYS_ON,
5354 serge 1887
	},
1888
	{
6084 serge 1889
		.name = "power well 1",
6937 serge 1890
		/* Handled by the DMC firmware */
1891
		.domains = 0,
6084 serge 1892
		.ops = &skl_power_well_ops,
1893
		.data = SKL_DISP_PW_1,
5354 serge 1894
	},
1895
	{
6084 serge 1896
		.name = "MISC IO power well",
6937 serge 1897
		/* Handled by the DMC firmware */
1898
		.domains = 0,
6084 serge 1899
		.ops = &skl_power_well_ops,
1900
		.data = SKL_DISP_PW_MISC_IO,
5354 serge 1901
	},
1902
	{
6937 serge 1903
		.name = "DC off",
1904
		.domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
1905
		.ops = &gen9_dc_off_power_well_ops,
1906
		.data = SKL_DISP_PW_DC_OFF,
1907
	},
1908
	{
6084 serge 1909
		.name = "power well 2",
1910
		.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1911
		.ops = &skl_power_well_ops,
1912
		.data = SKL_DISP_PW_2,
5354 serge 1913
	},
1914
	{
6084 serge 1915
		.name = "DDI A/E power well",
1916
		.domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
1917
		.ops = &skl_power_well_ops,
1918
		.data = SKL_DISP_PW_DDI_A_E,
5354 serge 1919
	},
1920
	{
6084 serge 1921
		.name = "DDI B power well",
1922
		.domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
1923
		.ops = &skl_power_well_ops,
1924
		.data = SKL_DISP_PW_DDI_B,
5354 serge 1925
	},
6084 serge 1926
	{
1927
		.name = "DDI C power well",
1928
		.domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
1929
		.ops = &skl_power_well_ops,
1930
		.data = SKL_DISP_PW_DDI_C,
1931
	},
1932
	{
1933
		.name = "DDI D power well",
1934
		.domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
1935
		.ops = &skl_power_well_ops,
1936
		.data = SKL_DISP_PW_DDI_D,
1937
	},
5354 serge 1938
};
1939
 
6937 serge 1940
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
1941
{
1942
	struct i915_power_well *well;
1943
 
1944
	if (!IS_SKYLAKE(dev_priv))
1945
		return;
1946
 
1947
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1948
	intel_power_well_enable(dev_priv, well);
1949
 
1950
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1951
	intel_power_well_enable(dev_priv, well);
1952
}
1953
 
1954
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
1955
{
1956
	struct i915_power_well *well;
1957
 
1958
	if (!IS_SKYLAKE(dev_priv))
1959
		return;
1960
 
1961
	well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
1962
	intel_power_well_disable(dev_priv, well);
1963
 
1964
	well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
1965
	intel_power_well_disable(dev_priv, well);
1966
}
1967
 
6084 serge 1968
static struct i915_power_well bxt_power_wells[] = {
1969
	{
1970
		.name = "always-on",
1971
		.always_on = 1,
1972
		.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
1973
		.ops = &i9xx_always_on_power_well_ops,
1974
	},
1975
	{
1976
		.name = "power well 1",
1977
		.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
1978
		.ops = &skl_power_well_ops,
1979
		.data = SKL_DISP_PW_1,
1980
	},
1981
	{
6937 serge 1982
		.name = "DC off",
1983
		.domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
1984
		.ops = &gen9_dc_off_power_well_ops,
1985
		.data = SKL_DISP_PW_DC_OFF,
1986
	},
1987
	{
6084 serge 1988
		.name = "power well 2",
1989
		.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
1990
		.ops = &skl_power_well_ops,
1991
		.data = SKL_DISP_PW_2,
6937 serge 1992
	},
6084 serge 1993
};
1994
 
1995
static int
1996
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
1997
				   int disable_power_well)
5354 serge 1998
{
6084 serge 1999
	if (disable_power_well >= 0)
2000
		return !!disable_power_well;
5354 serge 2001
 
6937 serge 2002
	if (IS_BROXTON(dev_priv)) {
6084 serge 2003
		DRM_DEBUG_KMS("Disabling display power well support\n");
2004
		return 0;
5354 serge 2005
	}
2006
 
6084 serge 2007
	return 1;
5354 serge 2008
}
2009
 
2010
#define set_power_wells(power_domains, __power_wells) ({		\
2011
	(power_domains)->power_wells = (__power_wells);			\
2012
	(power_domains)->power_well_count = ARRAY_SIZE(__power_wells);	\
2013
})
2014
 
2015
/**
2016
 * intel_power_domains_init - initializes the power domain structures
2017
 * @dev_priv: i915 device instance
2018
 *
2019
 * Initializes the power domain structures for @dev_priv depending upon the
2020
 * supported platform.
2021
 */
2022
int intel_power_domains_init(struct drm_i915_private *dev_priv)
2023
{
2024
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2025
 
6084 serge 2026
	i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
2027
						     i915.disable_power_well);
2028
 
2029
	BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
2030
 
5354 serge 2031
	mutex_init(&power_domains->lock);
2032
 
2033
	/*
2034
	 * The enabling order will be from lower to higher indexed wells,
2035
	 * the disabling order is reversed.
2036
	 */
2037
	if (IS_HASWELL(dev_priv->dev)) {
2038
		set_power_wells(power_domains, hsw_power_wells);
2039
	} else if (IS_BROADWELL(dev_priv->dev)) {
2040
		set_power_wells(power_domains, bdw_power_wells);
6937 serge 2041
	} else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
6084 serge 2042
		set_power_wells(power_domains, skl_power_wells);
2043
	} else if (IS_BROXTON(dev_priv->dev)) {
2044
		set_power_wells(power_domains, bxt_power_wells);
5354 serge 2045
	} else if (IS_CHERRYVIEW(dev_priv->dev)) {
2046
		set_power_wells(power_domains, chv_power_wells);
2047
	} else if (IS_VALLEYVIEW(dev_priv->dev)) {
2048
		set_power_wells(power_domains, vlv_power_wells);
2049
	} else {
2050
		set_power_wells(power_domains, i9xx_always_on_power_well);
2051
	}
2052
 
2053
	return 0;
2054
}
2055
 
2056
/**
2057
 * intel_power_domains_fini - finalizes the power domain structures
2058
 * @dev_priv: i915 device instance
2059
 *
2060
 * Finalizes the power domain structures for @dev_priv depending upon the
2061
 * supported platform. This function also disables runtime pm and ensures that
2062
 * the device stays powered up so that the driver can be reloaded.
2063
 */
2064
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2065
{
6937 serge 2066
	struct device *device = &dev_priv->dev->pdev->dev;
5354 serge 2067
 
6937 serge 2068
	/*
2069
	 * The i915.ko module is still not prepared to be loaded when
5354 serge 2070
	 * the power well is not enabled, so just enable it in case
6937 serge 2071
	 * we're going to unload/reload.
2072
	 * The following also reacquires the RPM reference the core passed
2073
	 * to the driver during loading, which is dropped in
2074
	 * intel_runtime_pm_enable(). We have to hand back the control of the
2075
	 * device to the core with this reference held.
2076
	 */
5354 serge 2077
	intel_display_set_init_power(dev_priv, true);
6937 serge 2078
 
2079
	/* Remove the refcount we took to keep power well support disabled. */
2080
	if (!i915.disable_power_well)
2081
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2082
 
2083
	/*
2084
	 * Remove the refcount we took in intel_runtime_pm_enable() in case
2085
	 * the platform doesn't support runtime PM.
2086
	 */
2087
	if (!HAS_RUNTIME_PM(dev_priv))
2088
		pm_runtime_put(device);
5354 serge 2089
}
2090
 
6937 serge 2091
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
5354 serge 2092
{
2093
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2094
	struct i915_power_well *power_well;
2095
	int i;
2096
 
2097
	mutex_lock(&power_domains->lock);
2098
	for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
2099
		power_well->ops->sync_hw(dev_priv, power_well);
2100
		power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
2101
								     power_well);
2102
	}
2103
	mutex_unlock(&power_domains->lock);
2104
}
2105
 
6937 serge 2106
static void skl_display_core_init(struct drm_i915_private *dev_priv,
2107
				  bool resume)
2108
{
2109
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2110
	uint32_t val;
2111
 
2112
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2113
 
2114
	/* enable PCH reset handshake */
2115
	val = I915_READ(HSW_NDE_RSTWRN_OPT);
2116
	I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
2117
 
2118
	/* enable PG1 and Misc I/O */
2119
	mutex_lock(&power_domains->lock);
2120
	skl_pw1_misc_io_init(dev_priv);
2121
	mutex_unlock(&power_domains->lock);
2122
 
2123
	if (!resume)
2124
		return;
2125
 
2126
	skl_init_cdclk(dev_priv);
2127
 
2128
	if (dev_priv->csr.dmc_payload)
2129
		intel_csr_load_program(dev_priv);
2130
}
2131
 
2132
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2133
{
2134
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2135
 
2136
	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2137
 
2138
	skl_uninit_cdclk(dev_priv);
2139
 
2140
	/* The spec doesn't call for removing the reset handshake flag */
2141
	/* disable PG1 and Misc I/O */
2142
	mutex_lock(&power_domains->lock);
2143
	skl_pw1_misc_io_fini(dev_priv);
2144
	mutex_unlock(&power_domains->lock);
2145
}
2146
 
6084 serge 2147
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
2148
{
2149
	struct i915_power_well *cmn_bc =
2150
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2151
	struct i915_power_well *cmn_d =
2152
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
2153
 
2154
	/*
2155
	 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
2156
	 * workaround never ever read DISPLAY_PHY_CONTROL, and
2157
	 * instead maintain a shadow copy ourselves. Use the actual
2158
	 * power well state and lane status to reconstruct the
2159
	 * expected initial value.
2160
	 */
2161
	dev_priv->chv_phy_control =
2162
		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
2163
		PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
2164
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
2165
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
2166
		PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
2167
 
2168
	/*
2169
	 * If all lanes are disabled we leave the override disabled
2170
	 * with all power down bits cleared to match the state we
2171
	 * would use after disabling the port. Otherwise enable the
2172
	 * override and set the lane powerdown bits accding to the
2173
	 * current lane status.
2174
	 */
2175
	if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
2176
		uint32_t status = I915_READ(DPLL(PIPE_A));
2177
		unsigned int mask;
2178
 
2179
		mask = status & DPLL_PORTB_READY_MASK;
2180
		if (mask == 0xf)
2181
			mask = 0x0;
2182
		else
2183
			dev_priv->chv_phy_control |=
2184
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
2185
 
2186
		dev_priv->chv_phy_control |=
2187
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
2188
 
2189
		mask = (status & DPLL_PORTC_READY_MASK) >> 4;
2190
		if (mask == 0xf)
2191
			mask = 0x0;
2192
		else
2193
			dev_priv->chv_phy_control |=
2194
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
2195
 
2196
		dev_priv->chv_phy_control |=
2197
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
2198
 
2199
		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
2200
 
2201
		dev_priv->chv_phy_assert[DPIO_PHY0] = false;
2202
	} else {
2203
		dev_priv->chv_phy_assert[DPIO_PHY0] = true;
2204
	}
2205
 
2206
	if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
2207
		uint32_t status = I915_READ(DPIO_PHY_STATUS);
2208
		unsigned int mask;
2209
 
2210
		mask = status & DPLL_PORTD_READY_MASK;
2211
 
2212
		if (mask == 0xf)
2213
			mask = 0x0;
2214
		else
2215
			dev_priv->chv_phy_control |=
2216
				PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
2217
 
2218
		dev_priv->chv_phy_control |=
2219
			PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
2220
 
2221
		dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
2222
 
2223
		dev_priv->chv_phy_assert[DPIO_PHY1] = false;
2224
	} else {
2225
		dev_priv->chv_phy_assert[DPIO_PHY1] = true;
2226
	}
2227
 
2228
	I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
2229
 
2230
	DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
2231
		      dev_priv->chv_phy_control);
2232
}
2233
 
5354 serge 2234
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2235
{
2236
	struct i915_power_well *cmn =
2237
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
2238
	struct i915_power_well *disp2d =
2239
		lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
2240
 
2241
	/* If the display might be already active skip this */
2242
	if (cmn->ops->is_enabled(dev_priv, cmn) &&
2243
	    disp2d->ops->is_enabled(dev_priv, disp2d) &&
2244
	    I915_READ(DPIO_CTL) & DPIO_CMNRST)
2245
		return;
2246
 
2247
	DRM_DEBUG_KMS("toggling display PHY side reset\n");
2248
 
2249
	/* cmnlane needs DPLL registers */
2250
	disp2d->ops->enable(dev_priv, disp2d);
2251
 
2252
	/*
2253
	 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
2254
	 * Need to assert and de-assert PHY SB reset by gating the
2255
	 * common lane power, then un-gating it.
2256
	 * Simply ungating isn't enough to reset the PHY enough to get
2257
	 * ports and lanes running.
2258
	 */
2259
	cmn->ops->disable(dev_priv, cmn);
2260
}
2261
 
2262
/**
2263
 * intel_power_domains_init_hw - initialize hardware power domain state
2264
 * @dev_priv: i915 device instance
2265
 *
2266
 * This function initializes the hardware power domain state and enables all
2267
 * power domains using intel_display_set_init_power().
2268
 */
6937 serge 2269
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
5354 serge 2270
{
2271
	struct drm_device *dev = dev_priv->dev;
2272
	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2273
 
2274
	power_domains->initializing = true;
2275
 
6937 serge 2276
	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
2277
		skl_display_core_init(dev_priv, resume);
2278
	} else if (IS_CHERRYVIEW(dev)) {
5354 serge 2279
		mutex_lock(&power_domains->lock);
6084 serge 2280
		chv_phy_control_init(dev_priv);
2281
		mutex_unlock(&power_domains->lock);
2282
	} else if (IS_VALLEYVIEW(dev)) {
2283
		mutex_lock(&power_domains->lock);
5354 serge 2284
		vlv_cmnlane_wa(dev_priv);
2285
		mutex_unlock(&power_domains->lock);
2286
	}
2287
 
2288
	/* For now, we need the power well to be always enabled. */
2289
	intel_display_set_init_power(dev_priv, true);
6937 serge 2290
	/* Disable power support if the user asked so. */
2291
	if (!i915.disable_power_well)
2292
		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2293
	intel_power_domains_sync_hw(dev_priv);
5354 serge 2294
	power_domains->initializing = false;
2295
}
2296
 
2297
/**
6937 serge 2298
 * intel_power_domains_suspend - suspend power domain state
2299
 * @dev_priv: i915 device instance
2300
 *
2301
 * This function prepares the hardware power domain state before entering
2302
 * system suspend. It must be paired with intel_power_domains_init_hw().
2303
 */
2304
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2305
{
2306
	/*
2307
	 * Even if power well support was disabled we still want to disable
2308
	 * power wells while we are system suspended.
2309
	 */
2310
	if (!i915.disable_power_well)
2311
		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2312
 
2313
	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2314
		skl_display_core_uninit(dev_priv);
2315
}
2316
 
2317
/**
5354 serge 2318
 * intel_runtime_pm_get - grab a runtime pm reference
2319
 * @dev_priv: i915 device instance
2320
 *
2321
 * This function grabs a device-level runtime pm reference (mostly used for GEM
2322
 * code to ensure the GTT or GT is on) and ensures that it is powered up.
2323
 *
2324
 * Any runtime pm reference obtained by this function must have a symmetric
2325
 * call to intel_runtime_pm_put() to release the reference again.
2326
 */
2327
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2328
{
2329
	struct drm_device *dev = dev_priv->dev;
2330
	struct device *device = &dev->pdev->dev;
2331
 
6937 serge 2332
	pm_runtime_get_sync(device);
5354 serge 2333
 
6937 serge 2334
	atomic_inc(&dev_priv->pm.wakeref_count);
2335
	assert_rpm_wakelock_held(dev_priv);
5354 serge 2336
}
2337
 
2338
/**
6937 serge 2339
 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2340
 * @dev_priv: i915 device instance
2341
 *
2342
 * This function grabs a device-level runtime pm reference if the device is
2343
 * already in use and ensures that it is powered up.
2344
 *
2345
 * Any runtime pm reference obtained by this function must have a symmetric
2346
 * call to intel_runtime_pm_put() to release the reference again.
2347
 */
2348
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2349
{
2350
	struct drm_device *dev = dev_priv->dev;
2351
	struct device *device = &dev->pdev->dev;
2352
 
2353
	if (IS_ENABLED(CONFIG_PM)) {
2354
		int ret = pm_runtime_get_if_in_use(device);
2355
 
2356
		/*
2357
		 * In cases runtime PM is disabled by the RPM core and we get
2358
		 * an -EINVAL return value we are not supposed to call this
2359
		 * function, since the power state is undefined. This applies
2360
		 * atm to the late/early system suspend/resume handlers.
2361
		 */
2362
		WARN_ON_ONCE(ret < 0);
2363
		if (ret <= 0)
2364
			return false;
2365
	}
2366
 
2367
	atomic_inc(&dev_priv->pm.wakeref_count);
2368
	assert_rpm_wakelock_held(dev_priv);
2369
 
2370
	return true;
2371
}
2372
 
2373
/**
5354 serge 2374
 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2375
 * @dev_priv: i915 device instance
2376
 *
2377
 * This function grabs a device-level runtime pm reference (mostly used for GEM
2378
 * code to ensure the GTT or GT is on).
2379
 *
2380
 * It will _not_ power up the device but instead only check that it's powered
2381
 * on.  Therefore it is only valid to call this functions from contexts where
2382
 * the device is known to be powered up and where trying to power it up would
2383
 * result in hilarity and deadlocks. That pretty much means only the system
2384
 * suspend/resume code where this is used to grab runtime pm references for
2385
 * delayed setup down in work items.
2386
 *
2387
 * Any runtime pm reference obtained by this function must have a symmetric
2388
 * call to intel_runtime_pm_put() to release the reference again.
2389
 */
2390
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2391
{
2392
	struct drm_device *dev = dev_priv->dev;
2393
	struct device *device = &dev->pdev->dev;
2394
 
6937 serge 2395
	assert_rpm_wakelock_held(dev_priv);
2396
	pm_runtime_get_noresume(device);
5354 serge 2397
 
6937 serge 2398
	atomic_inc(&dev_priv->pm.wakeref_count);
5354 serge 2399
}
2400
 
2401
/**
2402
 * intel_runtime_pm_put - release a runtime pm reference
2403
 * @dev_priv: i915 device instance
2404
 *
2405
 * This function drops the device-level runtime pm reference obtained by
2406
 * intel_runtime_pm_get() and might power down the corresponding
2407
 * hardware block right away if this is the last reference.
2408
 */
2409
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2410
{
2411
	struct drm_device *dev = dev_priv->dev;
2412
	struct device *device = &dev->pdev->dev;
2413
 
6937 serge 2414
	assert_rpm_wakelock_held(dev_priv);
2415
	if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
2416
		atomic_inc(&dev_priv->pm.atomic_seq);
5354 serge 2417
 
6084 serge 2418
	pm_runtime_mark_last_busy(device);
2419
	pm_runtime_put_autosuspend(device);
5354 serge 2420
}
2421
 
2422
/**
2423
 * intel_runtime_pm_enable - enable runtime pm
2424
 * @dev_priv: i915 device instance
2425
 *
2426
 * This function enables runtime pm at the end of the driver load sequence.
2427
 *
2428
 * Note that this function does currently not enable runtime pm for the
2429
 * subordinate display power domains. That is only done on the first modeset
2430
 * using intel_display_set_init_power().
2431
 */
2432
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2433
{
2434
	struct drm_device *dev = dev_priv->dev;
2435
	struct device *device = &dev->pdev->dev;
2436
 
6937 serge 2437
	pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
2438
	pm_runtime_mark_last_busy(device);
5354 serge 2439
 
2440
	/*
6937 serge 2441
	 * Take a permanent reference to disable the RPM functionality and drop
2442
	 * it only when unloading the driver. Use the low level get/put helpers,
2443
	 * so the driver's own RPM reference tracking asserts also work on
2444
	 * platforms without RPM support.
5354 serge 2445
	 */
6937 serge 2446
	if (!HAS_RUNTIME_PM(dev)) {
2447
		pm_runtime_dont_use_autosuspend(device);
2448
		pm_runtime_get_sync(device);
2449
	} else {
2450
		pm_runtime_use_autosuspend(device);
5354 serge 2451
	}
2452
 
6937 serge 2453
	/*
2454
	 * The core calls the driver load handler with an RPM reference held.
2455
	 * We drop that here and will reacquire it during unloading in
2456
	 * intel_power_domains_fini().
2457
	 */
6084 serge 2458
	pm_runtime_put_autosuspend(device);
5354 serge 2459
}
2460