Subversion Repositories Kolibri OS

Rev

Rev 6937 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
6937 serge 31
#include 
3031 serge 32
#include 
6084 serge 33
#include 
3031 serge 34
#include 
35
#include 
36
#include 
2327 Serge 37
#include "intel_drv.h"
3031 serge 38
#include 
2327 Serge 39
#include "i915_drv.h"
40
 
5060 serge 41
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
2327 Serge 42
 
6084 serge 43
/* Compliance test status bits  */
44
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
45
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
46
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48
 
4560 Serge 49
struct dp_link_dpll {
6084 serge 50
	int clock;
4560 Serge 51
	struct dpll dpll;
52
};
53
 
54
static const struct dp_link_dpll gen4_dpll[] = {
6084 serge 55
	{ 162000,
4560 Serge 56
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
6084 serge 57
	{ 270000,
4560 Serge 58
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
59
};
60
 
61
static const struct dp_link_dpll pch_dpll[] = {
6084 serge 62
	{ 162000,
4560 Serge 63
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
6084 serge 64
	{ 270000,
4560 Serge 65
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
66
};
67
 
68
static const struct dp_link_dpll vlv_dpll[] = {
6084 serge 69
	{ 162000,
4560 Serge 70
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
6084 serge 71
	{ 270000,
4560 Serge 72
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
73
};
74
 
5060 serge 75
/*
76
 * CHV supports eDP 1.4 that have  more link rates.
77
 * Below only provides the fixed rate but exclude variable rate.
78
 */
79
static const struct dp_link_dpll chv_dpll[] = {
80
	/*
81
	 * CHV requires to program fractional division for m2.
82
	 * m2 is stored in fixed point format using formula below
83
	 * (m2_int << 22) | m2_fraction
84
	 */
6084 serge 85
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
5060 serge 86
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
6084 serge 87
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 88
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
6084 serge 89
	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 90
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
91
};
92
 
6084 serge 93
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
94
				  324000, 432000, 540000 };
95
static const int skl_rates[] = { 162000, 216000, 270000,
96
				  324000, 432000, 540000 };
97
static const int default_rates[] = { 162000, 270000, 540000 };
98
 
2327 Serge 99
/**
100
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
101
 * @intel_dp: DP struct
102
 *
103
 * If a CPU or PCH DP output is attached to an eDP panel, this function
104
 * will return true, and false otherwise.
105
 */
106
static bool is_edp(struct intel_dp *intel_dp)
107
{
3243 Serge 108
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
 
110
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
2327 Serge 111
}
112
 
3243 Serge 113
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
2327 Serge 114
{
3243 Serge 115
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116
 
117
	return intel_dig_port->base.base.dev;
2327 Serge 118
}
119
 
2330 Serge 120
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
121
{
3243 Serge 122
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2330 Serge 123
}
2327 Serge 124
 
2330 Serge 125
static void intel_dp_link_down(struct intel_dp *intel_dp);
5354 serge 126
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
5060 serge 127
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
5354 serge 128
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
129
static void vlv_steal_power_sequencer(struct drm_device *dev,
130
				      enum pipe pipe);
2330 Serge 131
 
6084 serge 132
static unsigned int intel_dp_unused_lane_mask(int lane_count)
2330 Serge 133
{
6084 serge 134
	return ~((1 << lane_count) - 1) & 0xf;
135
}
136
 
137
static int
138
intel_dp_max_link_bw(struct intel_dp  *intel_dp)
139
{
2330 Serge 140
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 141
 
2330 Serge 142
	switch (max_link_bw) {
143
	case DP_LINK_BW_1_62:
144
	case DP_LINK_BW_2_7:
6084 serge 145
	case DP_LINK_BW_5_4:
2330 Serge 146
		break;
147
	default:
4104 Serge 148
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
149
		     max_link_bw);
2330 Serge 150
		max_link_bw = DP_LINK_BW_1_62;
151
		break;
152
	}
153
	return max_link_bw;
154
}
2327 Serge 155
 
5060 serge 156
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
157
{
158
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
159
	u8 source_max, sink_max;
160
 
7144 serge 161
	source_max = intel_dig_port->max_lanes;
5060 serge 162
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
163
 
164
	return min(source_max, sink_max);
165
}
166
 
2342 Serge 167
/*
168
 * The units on the numbers in the next two are... bizarre.  Examples will
169
 * make it clearer; this one parallels an example in the eDP spec.
170
 *
171
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
172
 *
173
 *     270000 * 1 * 8 / 10 == 216000
174
 *
175
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
176
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
177
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
178
 * 119000.  At 18bpp that's 2142000 kilobits per second.
179
 *
180
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
181
 * get the result in decakilobits instead of kilobits.
182
 */
183
 
2330 Serge 184
static int
2351 Serge 185
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 186
{
2342 Serge 187
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 188
}
2327 Serge 189
 
2330 Serge 190
static int
191
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
192
{
193
	return (max_link_clock * max_lanes * 8) / 10;
194
}
2327 Serge 195
 
4560 Serge 196
static enum drm_mode_status
2330 Serge 197
intel_dp_mode_valid(struct drm_connector *connector,
198
		    struct drm_display_mode *mode)
199
{
200
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 201
	struct intel_connector *intel_connector = to_intel_connector(connector);
202
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
3746 Serge 203
	int target_clock = mode->clock;
204
	int max_rate, mode_rate, max_lanes, max_link_clock;
7144 serge 205
	int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
2327 Serge 206
 
3243 Serge 207
	if (is_edp(intel_dp) && fixed_mode) {
208
		if (mode->hdisplay > fixed_mode->hdisplay)
2330 Serge 209
			return MODE_PANEL;
2327 Serge 210
 
3243 Serge 211
		if (mode->vdisplay > fixed_mode->vdisplay)
2330 Serge 212
			return MODE_PANEL;
3746 Serge 213
 
214
		target_clock = fixed_mode->clock;
2330 Serge 215
	}
2327 Serge 216
 
6084 serge 217
	max_link_clock = intel_dp_max_link_rate(intel_dp);
5060 serge 218
	max_lanes = intel_dp_max_lane_count(intel_dp);
3746 Serge 219
 
220
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221
	mode_rate = intel_dp_link_required(target_clock, 18);
222
 
7144 serge 223
	if (mode_rate > max_rate || target_clock > max_dotclk)
2330 Serge 224
		return MODE_CLOCK_HIGH;
2327 Serge 225
 
2330 Serge 226
	if (mode->clock < 10000)
227
		return MODE_CLOCK_LOW;
228
 
3031 serge 229
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230
		return MODE_H_ILLEGAL;
231
 
2330 Serge 232
	return MODE_OK;
233
}
234
 
5354 serge 235
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
2330 Serge 236
{
237
	int	i;
238
	uint32_t v = 0;
239
 
240
	if (src_bytes > 4)
241
		src_bytes = 4;
242
	for (i = 0; i < src_bytes; i++)
243
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
244
	return v;
245
}
246
 
6084 serge 247
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
2330 Serge 248
{
249
	int i;
250
	if (dst_bytes > 4)
251
		dst_bytes = 4;
252
	for (i = 0; i < dst_bytes; i++)
253
		dst[i] = src >> ((3-i) * 8);
254
}
255
 
4560 Serge 256
static void
257
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 258
				    struct intel_dp *intel_dp);
4560 Serge 259
static void
260
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 261
					      struct intel_dp *intel_dp);
4560 Serge 262
 
5354 serge 263
static void pps_lock(struct intel_dp *intel_dp)
264
{
265
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266
	struct intel_encoder *encoder = &intel_dig_port->base;
267
	struct drm_device *dev = encoder->base.dev;
268
	struct drm_i915_private *dev_priv = dev->dev_private;
269
	enum intel_display_power_domain power_domain;
270
 
271
	/*
272
	 * See vlv_power_sequencer_reset() why we need
273
	 * a power domain reference here.
274
	 */
6084 serge 275
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 276
	intel_display_power_get(dev_priv, power_domain);
277
 
278
	mutex_lock(&dev_priv->pps_mutex);
279
}
280
 
281
static void pps_unlock(struct intel_dp *intel_dp)
282
{
283
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
284
	struct intel_encoder *encoder = &intel_dig_port->base;
285
	struct drm_device *dev = encoder->base.dev;
286
	struct drm_i915_private *dev_priv = dev->dev_private;
287
	enum intel_display_power_domain power_domain;
288
 
289
	mutex_unlock(&dev_priv->pps_mutex);
290
 
6084 serge 291
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 292
	intel_display_power_put(dev_priv, power_domain);
293
}
294
 
295
static void
296
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
297
{
298
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299
	struct drm_device *dev = intel_dig_port->base.base.dev;
300
	struct drm_i915_private *dev_priv = dev->dev_private;
301
	enum pipe pipe = intel_dp->pps_pipe;
6084 serge 302
	bool pll_enabled, release_cl_override = false;
303
	enum dpio_phy phy = DPIO_PHY(pipe);
304
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
5354 serge 305
	uint32_t DP;
306
 
307
	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
308
		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
309
		 pipe_name(pipe), port_name(intel_dig_port->port)))
310
		return;
311
 
312
	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
313
		      pipe_name(pipe), port_name(intel_dig_port->port));
314
 
315
	/* Preserve the BIOS-computed detected bit. This is
316
	 * supposed to be read-only.
317
	 */
318
	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
319
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
320
	DP |= DP_PORT_WIDTH(1);
321
	DP |= DP_LINK_TRAIN_PAT_1;
322
 
323
	if (IS_CHERRYVIEW(dev))
324
		DP |= DP_PIPE_SELECT_CHV(pipe);
325
	else if (pipe == PIPE_B)
326
		DP |= DP_PIPEB_SELECT;
327
 
328
	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
329
 
330
	/*
331
	 * The DPLL for the pipe must be enabled for this to work.
332
	 * So enable temporarily it if it's not already enabled.
333
	 */
6084 serge 334
	if (!pll_enabled) {
335
		release_cl_override = IS_CHERRYVIEW(dev) &&
336
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
337
 
7144 serge 338
		if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
339
				     &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
340
			DRM_ERROR("Failed to force on pll for pipe %c!\n",
341
				  pipe_name(pipe));
342
			return;
343
		}
6084 serge 344
	}
5354 serge 345
 
346
	/*
347
	 * Similar magic as in intel_dp_enable_port().
348
	 * We _must_ do this port enable + disable trick
349
	 * to make this power seqeuencer lock onto the port.
350
	 * Otherwise even VDD force bit won't work.
351
	 */
352
	I915_WRITE(intel_dp->output_reg, DP);
353
	POSTING_READ(intel_dp->output_reg);
354
 
355
	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
356
	POSTING_READ(intel_dp->output_reg);
357
 
358
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
359
	POSTING_READ(intel_dp->output_reg);
360
 
6084 serge 361
	if (!pll_enabled) {
5354 serge 362
		vlv_force_pll_off(dev, pipe);
6084 serge 363
 
364
		if (release_cl_override)
365
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
366
	}
5354 serge 367
}
368
 
4560 Serge 369
static enum pipe
370
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
371
{
372
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
373
	struct drm_device *dev = intel_dig_port->base.base.dev;
374
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 375
	struct intel_encoder *encoder;
376
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
4560 Serge 377
	enum pipe pipe;
378
 
5354 serge 379
	lockdep_assert_held(&dev_priv->pps_mutex);
4560 Serge 380
 
5354 serge 381
	/* We should never land here with regular DP ports */
382
	WARN_ON(!is_edp(intel_dp));
383
 
384
	if (intel_dp->pps_pipe != INVALID_PIPE)
385
		return intel_dp->pps_pipe;
386
 
387
	/*
388
	 * We don't have power sequencer currently.
389
	 * Pick one that's not used by other ports.
390
	 */
6937 serge 391
	for_each_intel_encoder(dev, encoder) {
5354 serge 392
		struct intel_dp *tmp;
393
 
394
		if (encoder->type != INTEL_OUTPUT_EDP)
395
			continue;
396
 
397
		tmp = enc_to_intel_dp(&encoder->base);
398
 
399
		if (tmp->pps_pipe != INVALID_PIPE)
400
			pipes &= ~(1 << tmp->pps_pipe);
401
	}
402
 
403
	/*
404
	 * Didn't find one. This should not happen since there
405
	 * are two power sequencers and up to two eDP ports.
406
	 */
407
	if (WARN_ON(pipes == 0))
408
		pipe = PIPE_A;
409
	else
410
		pipe = ffs(pipes) - 1;
411
 
412
	vlv_steal_power_sequencer(dev, pipe);
413
	intel_dp->pps_pipe = pipe;
414
 
415
	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416
		      pipe_name(intel_dp->pps_pipe),
417
		      port_name(intel_dig_port->port));
418
 
419
	/* init power sequencer on this pipe and port */
420
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
421
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
422
 
423
	/*
424
	 * Even vdd force doesn't work until we've made
425
	 * the power sequencer lock in on the port.
426
	 */
427
	vlv_power_sequencer_kick(intel_dp);
428
 
429
	return intel_dp->pps_pipe;
430
}
431
 
432
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
433
			       enum pipe pipe);
434
 
435
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
436
			       enum pipe pipe)
437
{
438
	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
439
}
440
 
441
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
442
				enum pipe pipe)
443
{
444
	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
445
}
446
 
447
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
448
			 enum pipe pipe)
449
{
450
	return true;
451
}
452
 
453
static enum pipe
454
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
455
		     enum port port,
456
		     vlv_pipe_check pipe_check)
457
{
458
	enum pipe pipe;
459
 
4560 Serge 460
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462
			PANEL_PORT_SELECT_MASK;
5354 serge 463
 
464
		if (port_sel != PANEL_PORT_SELECT_VLV(port))
465
			continue;
466
 
467
		if (!pipe_check(dev_priv, pipe))
468
			continue;
469
 
6084 serge 470
		return pipe;
4560 Serge 471
	}
472
 
5354 serge 473
	return INVALID_PIPE;
4560 Serge 474
}
475
 
5354 serge 476
static void
477
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
478
{
479
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480
	struct drm_device *dev = intel_dig_port->base.base.dev;
481
	struct drm_i915_private *dev_priv = dev->dev_private;
482
	enum port port = intel_dig_port->port;
483
 
484
	lockdep_assert_held(&dev_priv->pps_mutex);
485
 
486
	/* try to find a pipe with this port selected */
487
	/* first pick one where the panel is on */
488
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489
						  vlv_pipe_has_pp_on);
490
	/* didn't find one? pick one where vdd is on */
491
	if (intel_dp->pps_pipe == INVALID_PIPE)
492
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493
							  vlv_pipe_has_vdd_on);
494
	/* didn't find one? pick one with just the correct port */
495
	if (intel_dp->pps_pipe == INVALID_PIPE)
496
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497
							  vlv_pipe_any);
498
 
499
	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500
	if (intel_dp->pps_pipe == INVALID_PIPE) {
501
		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
502
			      port_name(port));
503
		return;
504
	}
505
 
506
	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507
		      port_name(port), pipe_name(intel_dp->pps_pipe));
508
 
509
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
510
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
511
}
512
 
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514
{
515
	struct drm_device *dev = dev_priv->dev;
516
	struct intel_encoder *encoder;
517
 
6937 serge 518
	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
5354 serge 519
		return;
520
 
521
	/*
522
	 * We can't grab pps_mutex here due to deadlock with power_domain
523
	 * mutex when power_domain functions are called while holding pps_mutex.
524
	 * That also means that in order to use pps_pipe the code needs to
525
	 * hold both a power domain reference and pps_mutex, and the power domain
526
	 * reference get/put must be done while _not_ holding pps_mutex.
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
528
	 * should use them always.
529
	 */
530
 
6937 serge 531
	for_each_intel_encoder(dev, encoder) {
5354 serge 532
		struct intel_dp *intel_dp;
533
 
534
		if (encoder->type != INTEL_OUTPUT_EDP)
535
			continue;
536
 
537
		intel_dp = enc_to_intel_dp(&encoder->base);
538
		intel_dp->pps_pipe = INVALID_PIPE;
539
	}
540
}
541
 
6937 serge 542
static i915_reg_t
543
_pp_ctrl_reg(struct intel_dp *intel_dp)
4560 Serge 544
{
545
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
546
 
6084 serge 547
	if (IS_BROXTON(dev))
548
		return BXT_PP_CONTROL(0);
549
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 550
		return PCH_PP_CONTROL;
551
	else
552
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
553
}
554
 
6937 serge 555
static i915_reg_t
556
_pp_stat_reg(struct intel_dp *intel_dp)
4560 Serge 557
{
558
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
 
6084 serge 560
	if (IS_BROXTON(dev))
561
		return BXT_PP_STATUS(0);
562
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 563
		return PCH_PP_STATUS;
564
	else
565
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566
}
567
 
5354 serge 568
#if 0
569
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570
   This function only applicable when panel PM state is not to be tracked */
571
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572
			      void *unused)
573
{
574
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575
						 edp_notifier);
576
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 578
 
5354 serge 579
	if (!is_edp(intel_dp) || code != SYS_RESTART)
580
		return 0;
581
 
582
	pps_lock(intel_dp);
583
 
6937 serge 584
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 585
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
6937 serge 586
		i915_reg_t pp_ctrl_reg, pp_div_reg;
6084 serge 587
		u32 pp_div;
5354 serge 588
 
589
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590
		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
591
		pp_div = I915_READ(pp_div_reg);
592
		pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
 
594
		/* 0x1F write to PP_DIV_REG sets max cycle delay */
595
		I915_WRITE(pp_div_reg, pp_div | 0x1F);
596
		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597
		msleep(intel_dp->panel_power_cycle_delay);
598
	}
599
 
600
	pps_unlock(intel_dp);
601
 
602
	return 0;
603
}
604
#endif
605
 
5060 serge 606
static bool edp_have_panel_power(struct intel_dp *intel_dp)
2342 Serge 607
{
3243 Serge 608
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 609
	struct drm_i915_private *dev_priv = dev->dev_private;
610
 
5354 serge 611
	lockdep_assert_held(&dev_priv->pps_mutex);
612
 
6937 serge 613
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5354 serge 614
	    intel_dp->pps_pipe == INVALID_PIPE)
615
		return false;
616
 
4560 Serge 617
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
2342 Serge 618
}
619
 
5060 serge 620
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
2342 Serge 621
{
3243 Serge 622
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 623
	struct drm_i915_private *dev_priv = dev->dev_private;
624
 
5354 serge 625
	lockdep_assert_held(&dev_priv->pps_mutex);
626
 
6937 serge 627
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5354 serge 628
	    intel_dp->pps_pipe == INVALID_PIPE)
629
		return false;
630
 
631
	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
2342 Serge 632
}
633
 
634
static void
635
intel_dp_check_edp(struct intel_dp *intel_dp)
636
{
3243 Serge 637
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 638
	struct drm_i915_private *dev_priv = dev->dev_private;
639
 
640
	if (!is_edp(intel_dp))
641
		return;
3746 Serge 642
 
5060 serge 643
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
2342 Serge 644
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
645
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
4560 Serge 646
			      I915_READ(_pp_stat_reg(intel_dp)),
647
			      I915_READ(_pp_ctrl_reg(intel_dp)));
2342 Serge 648
	}
649
}
650
 
3480 Serge 651
static uint32_t
652
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653
{
654
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655
	struct drm_device *dev = intel_dig_port->base.base.dev;
656
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 657
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
3480 Serge 658
	uint32_t status;
659
	bool done;
660
 
661
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662
	if (has_aux_irq)
663
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
4104 Serge 664
					  msecs_to_jiffies_timeout(10));
3480 Serge 665
	else
666
		done = wait_for_atomic(C, 10) == 0;
667
	if (!done)
668
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669
			  has_aux_irq);
670
#undef C
671
 
672
	return status;
673
}
674
 
5060 serge 675
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
4104 Serge 676
{
677
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678
	struct drm_device *dev = intel_dig_port->base.base.dev;
679
 
5060 serge 680
	/*
681
	 * The clock divider is based off the hrawclk, and would like to run at
682
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
4104 Serge 683
	 */
6937 serge 684
	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
5060 serge 685
}
686
 
687
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688
{
689
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 691
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 692
 
693
	if (index)
694
		return 0;
695
 
696
	if (intel_dig_port->port == PORT_A) {
6937 serge 697
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
6084 serge 698
 
5060 serge 699
	} else {
6937 serge 700
		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
5060 serge 701
	}
702
}
703
 
704
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705
{
706
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707
	struct drm_device *dev = intel_dig_port->base.base.dev;
708
	struct drm_i915_private *dev_priv = dev->dev_private;
709
 
710
	if (intel_dig_port->port == PORT_A) {
711
		if (index)
712
			return 0;
6084 serge 713
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
6937 serge 714
	} else if (HAS_PCH_LPT_H(dev_priv)) {
4104 Serge 715
		/* Workaround for non-ULT HSW */
716
		switch (index) {
717
		case 0: return 63;
718
		case 1: return 72;
719
		default: return 0;
720
		}
5060 serge 721
	} else  {
6937 serge 722
		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
4104 Serge 723
	}
724
}
725
 
5060 serge 726
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727
{
728
	return index ? 0 : 100;
729
}
730
 
5354 serge 731
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732
{
733
	/*
734
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
735
	 * derive the clock from CDCLK automatically). We still implement the
736
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
737
	 */
738
	return index ? 0 : 1;
739
}
740
 
5060 serge 741
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742
				      bool has_aux_irq,
743
				      int send_bytes,
744
				      uint32_t aux_clock_divider)
745
{
746
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747
	struct drm_device *dev = intel_dig_port->base.base.dev;
748
	uint32_t precharge, timeout;
749
 
750
	if (IS_GEN6(dev))
751
		precharge = 3;
752
	else
753
		precharge = 5;
754
 
6937 serge 755
	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5060 serge 756
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757
	else
758
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
 
760
	return DP_AUX_CH_CTL_SEND_BUSY |
761
	       DP_AUX_CH_CTL_DONE |
762
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
763
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
764
	       timeout |
765
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
766
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
768
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769
}
770
 
5354 serge 771
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772
				      bool has_aux_irq,
773
				      int send_bytes,
774
				      uint32_t unused)
775
{
776
	return DP_AUX_CH_CTL_SEND_BUSY |
777
	       DP_AUX_CH_CTL_DONE |
778
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
780
	       DP_AUX_CH_CTL_TIME_OUT_1600us |
781
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
782
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783
	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784
}
785
 
2330 Serge 786
static int
787
intel_dp_aux_ch(struct intel_dp *intel_dp,
5354 serge 788
		const uint8_t *send, int send_bytes,
2330 Serge 789
		uint8_t *recv, int recv_size)
790
{
3243 Serge 791
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 793
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 794
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
4104 Serge 795
	uint32_t aux_clock_divider;
3480 Serge 796
	int i, ret, recv_bytes;
2330 Serge 797
	uint32_t status;
5060 serge 798
	int try, clock = 0;
799
	bool has_aux_irq = HAS_AUX_IRQ(dev);
800
	bool vdd;
2330 Serge 801
 
5354 serge 802
	pps_lock(intel_dp);
5060 serge 803
 
5354 serge 804
	/*
805
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806
	 * In such cases we want to leave VDD enabled and it's up to upper layers
807
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808
	 * ourselves.
809
	 */
810
	vdd = edp_panel_vdd_on(intel_dp);
811
 
3480 Serge 812
	/* dp aux is extremely sensitive to irq latency, hence request the
813
	 * lowest possible wakeup latency and so prevent the cpu from going into
814
	 * deep sleep states.
815
	 */
816
 
2342 Serge 817
	intel_dp_check_edp(intel_dp);
2330 Serge 818
 
819
	/* Try to wait for any previous AUX channel activity */
820
	for (try = 0; try < 3; try++) {
3480 Serge 821
		status = I915_READ_NOTRACE(ch_ctl);
2330 Serge 822
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823
			break;
824
		msleep(1);
825
	}
826
 
827
	if (try == 3) {
6084 serge 828
		static u32 last_status = -1;
829
		const u32 status = I915_READ(ch_ctl);
830
 
831
		if (status != last_status) {
832
			WARN(1, "dp_aux_ch not started status 0x%08x\n",
833
			     status);
834
			last_status = status;
835
		}
836
 
3480 Serge 837
		ret = -EBUSY;
838
		goto out;
2330 Serge 839
	}
840
 
4560 Serge 841
	/* Only 5 data registers! */
842
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843
		ret = -E2BIG;
844
		goto out;
845
	}
846
 
5060 serge 847
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849
							  has_aux_irq,
850
							  send_bytes,
851
							  aux_clock_divider);
852
 
6084 serge 853
		/* Must try at least 3 times according to DP spec */
854
		for (try = 0; try < 5; try++) {
855
			/* Load the send data into the aux channel data registers */
856
			for (i = 0; i < send_bytes; i += 4)
6937 serge 857
				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
5354 serge 858
					   intel_dp_pack_aux(send + i,
859
							     send_bytes - i));
2330 Serge 860
 
6084 serge 861
			/* Send the command and wait for it to complete */
5060 serge 862
			I915_WRITE(ch_ctl, send_ctl);
2330 Serge 863
 
6084 serge 864
			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
3480 Serge 865
 
6084 serge 866
			/* Clear done status and any errors */
867
			I915_WRITE(ch_ctl,
868
				   status |
869
				   DP_AUX_CH_CTL_DONE |
870
				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
871
				   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 872
 
6084 serge 873
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874
				continue;
875
 
876
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877
			 *   400us delay required for errors and timeouts
878
			 *   Timeout errors from the HW already meet this
879
			 *   requirement so skip to next iteration
880
			 */
881
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882
				usleep_range(400, 500);
883
				continue;
884
			}
885
			if (status & DP_AUX_CH_CTL_DONE)
886
				goto done;
887
		}
2330 Serge 888
	}
889
 
890
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
3480 Serge 892
		ret = -EBUSY;
893
		goto out;
2330 Serge 894
	}
895
 
6084 serge 896
done:
2330 Serge 897
	/* Check for timeout or receive error.
898
	 * Timeouts occur when the sink is not connected
899
	 */
900
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
3480 Serge 902
		ret = -EIO;
903
		goto out;
2330 Serge 904
	}
905
 
906
	/* Timeouts occur when the device isn't connected, so they're
907
	 * "normal" -- don't fill the kernel log with these */
908
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
3480 Serge 910
		ret = -ETIMEDOUT;
911
		goto out;
2330 Serge 912
	}
913
 
914
	/* Unload any bytes sent back from the other side */
915
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
6937 serge 917
 
918
	/*
919
	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920
	 * We have no idea of what happened so we return -EBUSY so
921
	 * drm layer takes care for the necessary retries.
922
	 */
923
	if (recv_bytes == 0 || recv_bytes > 20) {
924
		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925
			      recv_bytes);
926
		/*
927
		 * FIXME: This patch was created on top of a series that
928
		 * organize the retries at drm level. There EBUSY should
929
		 * also take care for 1ms wait before retrying.
930
		 * That aux retries re-org is still needed and after that is
931
		 * merged we remove this sleep from here.
932
		 */
933
		usleep_range(1000, 1500);
934
		ret = -EBUSY;
935
		goto out;
936
	}
937
 
2330 Serge 938
	if (recv_bytes > recv_size)
939
		recv_bytes = recv_size;
940
 
941
	for (i = 0; i < recv_bytes; i += 4)
6937 serge 942
		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
6084 serge 943
				    recv + i, recv_bytes - i);
2330 Serge 944
 
3480 Serge 945
	ret = recv_bytes;
946
out:
947
 
5060 serge 948
	if (vdd)
949
		edp_panel_vdd_off(intel_dp, false);
950
 
5354 serge 951
	pps_unlock(intel_dp);
952
 
3480 Serge 953
	return ret;
2330 Serge 954
}
955
 
5060 serge 956
#define BARE_ADDRESS_SIZE	3
957
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
958
static ssize_t
959
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2330 Serge 960
{
5060 serge 961
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
962
	uint8_t txbuf[20], rxbuf[20];
963
	size_t txsize, rxsize;
2330 Serge 964
	int ret;
965
 
6084 serge 966
	txbuf[0] = (msg->request << 4) |
967
		((msg->address >> 16) & 0xf);
968
	txbuf[1] = (msg->address >> 8) & 0xff;
5060 serge 969
	txbuf[2] = msg->address & 0xff;
970
	txbuf[3] = msg->size - 1;
971
 
972
	switch (msg->request & ~DP_AUX_I2C_MOT) {
973
	case DP_AUX_NATIVE_WRITE:
974
	case DP_AUX_I2C_WRITE:
6084 serge 975
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
5060 serge 976
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
6084 serge 977
		rxsize = 2; /* 0 or 1 data bytes */
5060 serge 978
 
979
		if (WARN_ON(txsize > 20))
6084 serge 980
			return -E2BIG;
4560 Serge 981
 
7144 serge 982
		if (msg->buffer)
983
			memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
984
		else
985
			WARN_ON(msg->size);
5060 serge 986
 
987
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
988
		if (ret > 0) {
989
			msg->reply = rxbuf[0] >> 4;
990
 
6084 serge 991
			if (ret > 1) {
992
				/* Number of bytes written in a short write. */
993
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
994
			} else {
995
				/* Return payload size. */
996
				ret = msg->size;
997
			}
5060 serge 998
		}
6084 serge 999
		break;
2330 Serge 1000
 
5060 serge 1001
	case DP_AUX_NATIVE_READ:
1002
	case DP_AUX_I2C_READ:
1003
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1004
		rxsize = msg->size + 1;
2330 Serge 1005
 
5060 serge 1006
		if (WARN_ON(rxsize > 20))
6084 serge 1007
			return -E2BIG;
4560 Serge 1008
 
5060 serge 1009
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1010
		if (ret > 0) {
1011
			msg->reply = rxbuf[0] >> 4;
1012
			/*
1013
			 * Assume happy day, and copy the data. The caller is
1014
			 * expected to check msg->reply before touching it.
1015
			 *
1016
			 * Return payload size.
1017
			 */
1018
			ret--;
1019
			memcpy(msg->buffer, rxbuf + 1, ret);
1020
		}
1021
		break;
2330 Serge 1022
 
5060 serge 1023
	default:
1024
		ret = -EINVAL;
1025
		break;
1026
	}
2330 Serge 1027
 
6084 serge 1028
	return ret;
2330 Serge 1029
}
1030
 
6937 serge 1031
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1032
				       enum port port)
2330 Serge 1033
{
6937 serge 1034
	switch (port) {
1035
	case PORT_B:
1036
	case PORT_C:
1037
	case PORT_D:
1038
		return DP_AUX_CH_CTL(port);
1039
	default:
1040
		MISSING_CASE(port);
1041
		return DP_AUX_CH_CTL(PORT_B);
1042
	}
1043
}
2330 Serge 1044
 
6937 serge 1045
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1046
					enum port port, int index)
1047
{
1048
	switch (port) {
1049
	case PORT_B:
1050
	case PORT_C:
1051
	case PORT_D:
1052
		return DP_AUX_CH_DATA(port, index);
1053
	default:
1054
		MISSING_CASE(port);
1055
		return DP_AUX_CH_DATA(PORT_B, index);
1056
	}
1057
}
1058
 
1059
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1060
				       enum port port)
1061
{
1062
	switch (port) {
1063
	case PORT_A:
1064
		return DP_AUX_CH_CTL(port);
1065
	case PORT_B:
1066
	case PORT_C:
1067
	case PORT_D:
1068
		return PCH_DP_AUX_CH_CTL(port);
1069
	default:
1070
		MISSING_CASE(port);
1071
		return DP_AUX_CH_CTL(PORT_A);
1072
	}
1073
}
1074
 
1075
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1076
					enum port port, int index)
1077
{
1078
	switch (port) {
1079
	case PORT_A:
1080
		return DP_AUX_CH_DATA(port, index);
1081
	case PORT_B:
1082
	case PORT_C:
1083
	case PORT_D:
1084
		return PCH_DP_AUX_CH_DATA(port, index);
1085
	default:
1086
		MISSING_CASE(port);
1087
		return DP_AUX_CH_DATA(PORT_A, index);
1088
	}
1089
}
1090
 
1091
/*
1092
 * On SKL we don't have Aux for port E so we rely
1093
 * on VBT to set a proper alternate aux channel.
1094
 */
1095
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1096
{
1097
	const struct ddi_vbt_port_info *info =
1098
		&dev_priv->vbt.ddi_port_info[PORT_E];
1099
 
7144 serge 1100
	switch (info->alternate_aux_channel) {
6937 serge 1101
	case DP_AUX_A:
1102
		return PORT_A;
7144 serge 1103
	case DP_AUX_B:
6937 serge 1104
		return PORT_B;
7144 serge 1105
	case DP_AUX_C:
6937 serge 1106
		return PORT_C;
7144 serge 1107
	case DP_AUX_D:
6937 serge 1108
		return PORT_D;
1109
	default:
1110
		MISSING_CASE(info->alternate_aux_channel);
1111
		return PORT_A;
1112
	}
1113
}
1114
 
1115
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1116
				       enum port port)
1117
{
1118
	if (port == PORT_E)
1119
		port = skl_porte_aux_port(dev_priv);
1120
 
1121
	switch (port) {
1122
	case PORT_A:
1123
	case PORT_B:
1124
	case PORT_C:
1125
	case PORT_D:
1126
		return DP_AUX_CH_CTL(port);
7144 serge 1127
	default:
6937 serge 1128
		MISSING_CASE(port);
1129
		return DP_AUX_CH_CTL(PORT_A);
6084 serge 1130
	}
7144 serge 1131
}
6084 serge 1132
 
6937 serge 1133
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1134
					enum port port, int index)
1135
{
1136
	if (port == PORT_E)
1137
		port = skl_porte_aux_port(dev_priv);
1138
 
5060 serge 1139
	switch (port) {
1140
	case PORT_A:
1141
	case PORT_B:
1142
	case PORT_C:
1143
	case PORT_D:
6937 serge 1144
		return DP_AUX_CH_DATA(port, index);
2330 Serge 1145
	default:
6937 serge 1146
		MISSING_CASE(port);
1147
		return DP_AUX_CH_DATA(PORT_A, index);
2330 Serge 1148
	}
6937 serge 1149
}
2330 Serge 1150
 
6937 serge 1151
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1152
					 enum port port)
1153
{
1154
	if (INTEL_INFO(dev_priv)->gen >= 9)
1155
		return skl_aux_ctl_reg(dev_priv, port);
1156
	else if (HAS_PCH_SPLIT(dev_priv))
1157
		return ilk_aux_ctl_reg(dev_priv, port);
1158
	else
1159
		return g4x_aux_ctl_reg(dev_priv, port);
1160
}
2330 Serge 1161
 
6937 serge 1162
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1163
					  enum port port, int index)
1164
{
1165
	if (INTEL_INFO(dev_priv)->gen >= 9)
1166
		return skl_aux_data_reg(dev_priv, port, index);
1167
	else if (HAS_PCH_SPLIT(dev_priv))
1168
		return ilk_aux_data_reg(dev_priv, port, index);
1169
	else
1170
		return g4x_aux_data_reg(dev_priv, port, index);
1171
}
1172
 
1173
static void intel_aux_reg_init(struct intel_dp *intel_dp)
1174
{
1175
	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1176
	enum port port = dp_to_dig_port(intel_dp)->port;
1177
	int i;
1178
 
1179
	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1180
	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1181
		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1182
}
1183
 
1184
static void
1185
intel_dp_aux_fini(struct intel_dp *intel_dp)
1186
{
1187
	drm_dp_aux_unregister(&intel_dp->aux);
1188
	kfree(intel_dp->aux.name);
1189
}
1190
 
1191
static int
1192
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1193
{
1194
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1195
	enum port port = intel_dig_port->port;
1196
	int ret;
1197
 
1198
	intel_aux_reg_init(intel_dp);
1199
 
1200
	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1201
	if (!intel_dp->aux.name)
1202
		return -ENOMEM;
1203
 
7144 serge 1204
	intel_dp->aux.dev = connector->base.kdev;
5060 serge 1205
	intel_dp->aux.transfer = intel_dp_aux_transfer;
2330 Serge 1206
 
5060 serge 1207
	ret = drm_dp_aux_register(&intel_dp->aux);
6084 serge 1208
	if (ret < 0) {
5060 serge 1209
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
6937 serge 1210
			  intel_dp->aux.name, ret);
1211
		kfree(intel_dp->aux.name);
1212
		return ret;
2330 Serge 1213
	}
6103 serge 1214
 
6937 serge 1215
	return 0;
5060 serge 1216
}
2330 Serge 1217
 
5060 serge 1218
static void
1219
intel_dp_connector_unregister(struct intel_connector *intel_connector)
1220
{
1221
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
4560 Serge 1222
 
7144 serge 1223
	intel_dp_aux_fini(intel_dp);
5060 serge 1224
	intel_connector_unregister(intel_connector);
2330 Serge 1225
}
1226
 
5060 serge 1227
static void
6084 serge 1228
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5354 serge 1229
{
1230
	u32 ctrl1;
1231
 
6084 serge 1232
	memset(&pipe_config->dpll_hw_state, 0,
1233
	       sizeof(pipe_config->dpll_hw_state));
1234
 
5354 serge 1235
	pipe_config->ddi_pll_sel = SKL_DPLL0;
1236
	pipe_config->dpll_hw_state.cfgcr1 = 0;
1237
	pipe_config->dpll_hw_state.cfgcr2 = 0;
1238
 
1239
	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
6084 serge 1240
	switch (pipe_config->port_clock / 2) {
1241
	case 81000:
1242
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5354 serge 1243
					      SKL_DPLL0);
1244
		break;
6084 serge 1245
	case 135000:
1246
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5354 serge 1247
					      SKL_DPLL0);
1248
		break;
6084 serge 1249
	case 270000:
1250
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5354 serge 1251
					      SKL_DPLL0);
1252
		break;
6084 serge 1253
	case 162000:
1254
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1255
					      SKL_DPLL0);
1256
		break;
1257
	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1258
	results in CDCLK change. Need to handle the change of CDCLK by
1259
	disabling pipes and re-enabling them */
1260
	case 108000:
1261
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1262
					      SKL_DPLL0);
1263
		break;
1264
	case 216000:
1265
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1266
					      SKL_DPLL0);
1267
		break;
1268
 
5354 serge 1269
	}
1270
	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1271
}
1272
 
6084 serge 1273
void
1274
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
2330 Serge 1275
{
6084 serge 1276
	memset(&pipe_config->dpll_hw_state, 0,
1277
	       sizeof(pipe_config->dpll_hw_state));
1278
 
1279
	switch (pipe_config->port_clock / 2) {
1280
	case 81000:
5060 serge 1281
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1282
		break;
6084 serge 1283
	case 135000:
5060 serge 1284
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1285
		break;
6084 serge 1286
	case 270000:
5060 serge 1287
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1288
		break;
1289
	}
2330 Serge 1290
}
1291
 
6084 serge 1292
static int
1293
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1294
{
1295
	if (intel_dp->num_sink_rates) {
1296
		*sink_rates = intel_dp->sink_rates;
1297
		return intel_dp->num_sink_rates;
1298
	}
1299
 
1300
	*sink_rates = default_rates;
1301
 
1302
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1303
}
1304
 
6937 serge 1305
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
6084 serge 1306
{
6937 serge 1307
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1308
	struct drm_device *dev = dig_port->base.base.dev;
1309
 
6084 serge 1310
	/* WaDisableHBR2:skl */
6937 serge 1311
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
6084 serge 1312
		return false;
1313
 
1314
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1315
	    (INTEL_INFO(dev)->gen >= 9))
1316
		return true;
1317
	else
1318
		return false;
1319
}
1320
 
1321
static int
6937 serge 1322
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
6084 serge 1323
{
6937 serge 1324
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1325
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 1326
	int size;
1327
 
1328
	if (IS_BROXTON(dev)) {
1329
		*source_rates = bxt_rates;
1330
		size = ARRAY_SIZE(bxt_rates);
6937 serge 1331
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 1332
		*source_rates = skl_rates;
1333
		size = ARRAY_SIZE(skl_rates);
1334
	} else {
1335
		*source_rates = default_rates;
1336
		size = ARRAY_SIZE(default_rates);
1337
	}
1338
 
1339
	/* This depends on the fact that 5.4 is last value in the array */
6937 serge 1340
	if (!intel_dp_source_supports_hbr2(intel_dp))
6084 serge 1341
		size--;
1342
 
1343
	return size;
1344
}
1345
 
4104 Serge 1346
static void
1347
intel_dp_set_clock(struct intel_encoder *encoder,
6084 serge 1348
		   struct intel_crtc_state *pipe_config)
4104 Serge 1349
{
1350
	struct drm_device *dev = encoder->base.dev;
4560 Serge 1351
	const struct dp_link_dpll *divisor = NULL;
1352
	int i, count = 0;
4104 Serge 1353
 
1354
	if (IS_G4X(dev)) {
4560 Serge 1355
		divisor = gen4_dpll;
1356
		count = ARRAY_SIZE(gen4_dpll);
4104 Serge 1357
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 1358
		divisor = pch_dpll;
1359
		count = ARRAY_SIZE(pch_dpll);
5060 serge 1360
	} else if (IS_CHERRYVIEW(dev)) {
1361
		divisor = chv_dpll;
1362
		count = ARRAY_SIZE(chv_dpll);
4560 Serge 1363
	} else if (IS_VALLEYVIEW(dev)) {
1364
		divisor = vlv_dpll;
1365
		count = ARRAY_SIZE(vlv_dpll);
5060 serge 1366
	}
4560 Serge 1367
 
1368
	if (divisor && count) {
1369
		for (i = 0; i < count; i++) {
6084 serge 1370
			if (pipe_config->port_clock == divisor[i].clock) {
4560 Serge 1371
				pipe_config->dpll = divisor[i].dpll;
5060 serge 1372
				pipe_config->clock_set = true;
4560 Serge 1373
				break;
1374
			}
1375
		}
4104 Serge 1376
	}
1377
}
1378
 
6084 serge 1379
static int intersect_rates(const int *source_rates, int source_len,
1380
			   const int *sink_rates, int sink_len,
1381
			   int *common_rates)
1382
{
1383
	int i = 0, j = 0, k = 0;
1384
 
1385
	while (i < source_len && j < sink_len) {
1386
		if (source_rates[i] == sink_rates[j]) {
1387
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1388
				return k;
1389
			common_rates[k] = source_rates[i];
1390
			++k;
1391
			++i;
1392
			++j;
1393
		} else if (source_rates[i] < sink_rates[j]) {
1394
			++i;
1395
		} else {
1396
			++j;
1397
		}
1398
	}
1399
	return k;
1400
}
1401
 
1402
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1403
				 int *common_rates)
1404
{
1405
	const int *source_rates, *sink_rates;
1406
	int source_len, sink_len;
1407
 
1408
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
6937 serge 1409
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
6084 serge 1410
 
1411
	return intersect_rates(source_rates, source_len,
1412
			       sink_rates, sink_len,
1413
			       common_rates);
1414
}
1415
 
1416
static void snprintf_int_array(char *str, size_t len,
1417
			       const int *array, int nelem)
1418
{
1419
	int i;
1420
 
1421
	str[0] = '\0';
1422
 
1423
	for (i = 0; i < nelem; i++) {
1424
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1425
		if (r >= len)
1426
			return;
1427
		str += r;
1428
		len -= r;
1429
	}
1430
}
1431
 
1432
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1433
{
1434
	const int *source_rates, *sink_rates;
1435
	int source_len, sink_len, common_len;
1436
	int common_rates[DP_MAX_SUPPORTED_RATES];
1437
	char str[128]; /* FIXME: too big for stack? */
1438
 
1439
	if ((drm_debug & DRM_UT_KMS) == 0)
1440
		return;
1441
 
6937 serge 1442
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
6084 serge 1443
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1444
	DRM_DEBUG_KMS("source rates: %s\n", str);
1445
 
1446
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1447
	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1448
	DRM_DEBUG_KMS("sink rates: %s\n", str);
1449
 
1450
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1451
	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1452
	DRM_DEBUG_KMS("common rates: %s\n", str);
1453
}
1454
 
1455
static int rate_to_index(int find, const int *rates)
1456
{
1457
	int i = 0;
1458
 
1459
	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1460
		if (find == rates[i])
1461
			break;
1462
 
1463
	return i;
1464
}
1465
 
1466
int
1467
intel_dp_max_link_rate(struct intel_dp *intel_dp)
1468
{
1469
	int rates[DP_MAX_SUPPORTED_RATES] = {};
1470
	int len;
1471
 
1472
	len = intel_dp_common_rates(intel_dp, rates);
1473
	if (WARN_ON(len <= 0))
1474
		return 162000;
1475
 
1476
	return rates[rate_to_index(0, rates) - 1];
1477
}
1478
 
1479
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1480
{
1481
	return rate_to_index(rate, intel_dp->sink_rates);
1482
}
1483
 
6937 serge 1484
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
7144 serge 1485
			   uint8_t *link_bw, uint8_t *rate_select)
6084 serge 1486
{
1487
	if (intel_dp->num_sink_rates) {
1488
		*link_bw = 0;
1489
		*rate_select =
1490
			intel_dp_rate_select(intel_dp, port_clock);
1491
	} else {
1492
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1493
		*rate_select = 0;
1494
	}
1495
}
1496
 
3243 Serge 1497
bool
3746 Serge 1498
intel_dp_compute_config(struct intel_encoder *encoder,
6084 serge 1499
			struct intel_crtc_state *pipe_config)
2330 Serge 1500
{
3746 Serge 1501
	struct drm_device *dev = encoder->base.dev;
1502
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1503
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
3746 Serge 1504
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1505
	enum port port = dp_to_dig_port(intel_dp)->port;
6084 serge 1506
	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
3243 Serge 1507
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2330 Serge 1508
	int lane_count, clock;
5060 serge 1509
	int min_lane_count = 1;
1510
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1511
	/* Conveniently, the link BW constants become indices with a shift...*/
1512
	int min_clock = 0;
6084 serge 1513
	int max_clock;
3031 serge 1514
	int bpp, mode_rate;
4104 Serge 1515
	int link_avail, link_clock;
6084 serge 1516
	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1517
	int common_len;
1518
	uint8_t link_bw, rate_select;
2330 Serge 1519
 
6084 serge 1520
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1521
 
1522
	/* No common link rates between source and sink */
1523
	WARN_ON(common_len <= 0);
1524
 
1525
	max_clock = common_len - 1;
1526
 
4104 Serge 1527
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
3746 Serge 1528
		pipe_config->has_pch_encoder = true;
1529
 
1530
	pipe_config->has_dp_encoder = true;
5354 serge 1531
	pipe_config->has_drrs = false;
6084 serge 1532
	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
3746 Serge 1533
 
3243 Serge 1534
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1535
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1536
				       adjusted_mode);
6084 serge 1537
 
1538
		if (INTEL_INFO(dev)->gen >= 9) {
1539
			int ret;
1540
			ret = skl_update_scaler_crtc(pipe_config);
1541
			if (ret)
1542
				return ret;
1543
		}
1544
 
6937 serge 1545
		if (HAS_GMCH_DISPLAY(dev))
4104 Serge 1546
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1547
						 intel_connector->panel.fitting_mode);
1548
		else
1549
			intel_pch_panel_fitting(intel_crtc, pipe_config,
1550
						intel_connector->panel.fitting_mode);
2330 Serge 1551
	}
1552
 
3031 serge 1553
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1554
		return false;
1555
 
1556
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
6084 serge 1557
		      "max bw %d pixel clock %iKHz\n",
1558
		      max_lane_count, common_rates[max_clock],
4560 Serge 1559
		      adjusted_mode->crtc_clock);
3031 serge 1560
 
3746 Serge 1561
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1562
	 * bpc in between. */
4104 Serge 1563
	bpp = pipe_config->pipe_bpp;
5060 serge 1564
	if (is_edp(intel_dp)) {
3746 Serge 1565
 
6084 serge 1566
		/* Get bpp from vbt only for panels that dont have bpp in edid */
1567
		if (intel_connector->base.display_info.bpc == 0 &&
1568
			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1569
			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1570
				      dev_priv->vbt.edp_bpp);
1571
			bpp = dev_priv->vbt.edp_bpp;
1572
		}
1573
 
5354 serge 1574
		/*
1575
		 * Use the maximum clock and number of lanes the eDP panel
1576
		 * advertizes being capable of. The panels are generally
1577
		 * designed to support only a single clock and lane
1578
		 * configuration, and typically these values correspond to the
1579
		 * native resolution of the panel.
1580
		 */
6084 serge 1581
		min_lane_count = max_lane_count;
5354 serge 1582
		min_clock = max_clock;
5060 serge 1583
	}
1584
 
3746 Serge 1585
	for (; bpp >= 6*3; bpp -= 2*3) {
4560 Serge 1586
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1587
						   bpp);
3746 Serge 1588
 
5060 serge 1589
		for (clock = min_clock; clock <= max_clock; clock++) {
6084 serge 1590
			for (lane_count = min_lane_count;
1591
				lane_count <= max_lane_count;
1592
				lane_count <<= 1) {
1593
 
1594
				link_clock = common_rates[clock];
3746 Serge 1595
				link_avail = intel_dp_max_data_rate(link_clock,
1596
								    lane_count);
1597
 
1598
				if (mode_rate <= link_avail) {
1599
					goto found;
1600
				}
1601
			}
1602
		}
1603
	}
1604
 
6084 serge 1605
	return false;
3031 serge 1606
 
3746 Serge 1607
found:
3480 Serge 1608
	if (intel_dp->color_range_auto) {
1609
		/*
1610
		 * See:
1611
		 * CEA-861-E - 5.1 Default Encoding Parameters
1612
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1613
		 */
6084 serge 1614
		pipe_config->limited_color_range =
1615
			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1616
	} else {
1617
		pipe_config->limited_color_range =
1618
			intel_dp->limited_color_range;
3480 Serge 1619
	}
1620
 
6084 serge 1621
	pipe_config->lane_count = lane_count;
3480 Serge 1622
 
3746 Serge 1623
	pipe_config->pipe_bpp = bpp;
6084 serge 1624
	pipe_config->port_clock = common_rates[clock];
3746 Serge 1625
 
6084 serge 1626
	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1627
			      &link_bw, &rate_select);
1628
 
1629
	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1630
		      link_bw, rate_select, pipe_config->lane_count,
4104 Serge 1631
		      pipe_config->port_clock, bpp);
6084 serge 1632
	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1633
		      mode_rate, link_avail);
2330 Serge 1634
 
3746 Serge 1635
	intel_link_compute_m_n(bpp, lane_count,
4560 Serge 1636
			       adjusted_mode->crtc_clock,
1637
			       pipe_config->port_clock,
3746 Serge 1638
			       &pipe_config->dp_m_n);
2330 Serge 1639
 
5060 serge 1640
	if (intel_connector->panel.downclock_mode != NULL &&
6084 serge 1641
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
5354 serge 1642
			pipe_config->has_drrs = true;
5060 serge 1643
			intel_link_compute_m_n(bpp, lane_count,
1644
				intel_connector->panel.downclock_mode->clock,
1645
				pipe_config->port_clock,
1646
				&pipe_config->dp_m2_n2);
1647
	}
1648
 
6937 serge 1649
	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
6084 serge 1650
		skl_edp_set_pll_config(pipe_config);
1651
	else if (IS_BROXTON(dev))
1652
		/* handled in ddi */;
5354 serge 1653
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6084 serge 1654
		hsw_dp_set_ddi_pll_sel(pipe_config);
5060 serge 1655
	else
6084 serge 1656
		intel_dp_set_clock(encoder, pipe_config);
4104 Serge 1657
 
3746 Serge 1658
	return true;
2327 Serge 1659
}
1660
 
6084 serge 1661
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1662
			      const struct intel_crtc_state *pipe_config)
1663
{
1664
	intel_dp->link_rate = pipe_config->port_clock;
1665
	intel_dp->lane_count = pipe_config->lane_count;
1666
}
1667
 
5060 serge 1668
static void intel_dp_prepare(struct intel_encoder *encoder)
2330 Serge 1669
{
4104 Serge 1670
	struct drm_device *dev = encoder->base.dev;
2342 Serge 1671
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1672
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1673
	enum port port = dp_to_dig_port(intel_dp)->port;
1674
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6084 serge 1675
	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
2327 Serge 1676
 
6084 serge 1677
	intel_dp_set_link_params(intel_dp, crtc->config);
1678
 
2342 Serge 1679
	/*
1680
	 * There are four kinds of DP registers:
1681
	 *
1682
	 * 	IBX PCH
1683
	 * 	SNB CPU
1684
	 *	IVB CPU
1685
	 * 	CPT PCH
1686
	 *
1687
	 * IBX PCH and CPU are the same for almost everything,
1688
	 * except that the CPU DP PLL is configured in this
1689
	 * register
1690
	 *
1691
	 * CPT PCH is quite different, having many bits moved
1692
	 * to the TRANS_DP_CTL register instead. That
1693
	 * configuration happens (oddly) in ironlake_pch_enable
1694
	 */
2327 Serge 1695
 
2342 Serge 1696
	/* Preserve the BIOS-computed detected bit. This is
1697
	 * supposed to be read-only.
1698
	 */
1699
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 1700
 
2342 Serge 1701
	/* Handle DP bits in common between all three register formats */
1702
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
6084 serge 1703
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
2342 Serge 1704
 
1705
	/* Split out the IBX/CPU vs CPT settings */
1706
 
6084 serge 1707
	if (IS_GEN7(dev) && port == PORT_A) {
2342 Serge 1708
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1709
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1710
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1711
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1712
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1713
 
4560 Serge 1714
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 1715
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1716
 
4104 Serge 1717
		intel_dp->DP |= crtc->pipe << 29;
6084 serge 1718
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1719
		u32 trans_dp;
2342 Serge 1720
 
6084 serge 1721
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1722
 
1723
		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1724
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1725
			trans_dp |= TRANS_DP_ENH_FRAMING;
1726
		else
1727
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1728
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1729
	} else {
1730
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
6937 serge 1731
		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
6084 serge 1732
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1733
 
2342 Serge 1734
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1735
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1736
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1737
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1738
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1739
 
4560 Serge 1740
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
6084 serge 1741
			intel_dp->DP |= DP_ENHANCED_FRAMING;
2342 Serge 1742
 
6084 serge 1743
		if (IS_CHERRYVIEW(dev))
5060 serge 1744
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
6084 serge 1745
		else if (crtc->pipe == PIPE_B)
1746
			intel_dp->DP |= DP_PIPEB_SELECT;
2342 Serge 1747
	}
2330 Serge 1748
}
2327 Serge 1749
 
5060 serge 1750
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1751
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2342 Serge 1752
 
5060 serge 1753
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1754
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2342 Serge 1755
 
5060 serge 1756
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1757
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2342 Serge 1758
 
5060 serge 1759
static void wait_panel_status(struct intel_dp *intel_dp,
2342 Serge 1760
				       u32 mask,
1761
				       u32 value)
1762
{
3243 Serge 1763
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 1764
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1765
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2342 Serge 1766
 
5354 serge 1767
	lockdep_assert_held(&dev_priv->pps_mutex);
1768
 
4560 Serge 1769
	pp_stat_reg = _pp_stat_reg(intel_dp);
1770
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1771
 
2342 Serge 1772
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
6084 serge 1773
			mask, value,
3746 Serge 1774
			I915_READ(pp_stat_reg),
1775
			I915_READ(pp_ctrl_reg));
2342 Serge 1776
 
3746 Serge 1777
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
2342 Serge 1778
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
3746 Serge 1779
				I915_READ(pp_stat_reg),
1780
				I915_READ(pp_ctrl_reg));
2342 Serge 1781
	}
4560 Serge 1782
 
1783
	DRM_DEBUG_KMS("Wait complete\n");
2342 Serge 1784
}
1785
 
5060 serge 1786
static void wait_panel_on(struct intel_dp *intel_dp)
2342 Serge 1787
{
1788
	DRM_DEBUG_KMS("Wait for panel power on\n");
5060 serge 1789
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2342 Serge 1790
}
1791
 
5060 serge 1792
static void wait_panel_off(struct intel_dp *intel_dp)
2342 Serge 1793
{
1794
	DRM_DEBUG_KMS("Wait for panel power off time\n");
5060 serge 1795
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2342 Serge 1796
}
1797
 
5060 serge 1798
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2342 Serge 1799
{
7144 serge 1800
	ktime_t panel_power_on_time;
1801
	s64 panel_power_off_duration;
1802
 
2342 Serge 1803
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
5060 serge 1804
 
7144 serge 1805
	/* take the difference of currrent time and panel power off time
1806
	 * and then make panel wait for t11_t12 if needed. */
1807
	panel_power_on_time = ktime_get();
1808
	panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1809
 
5060 serge 1810
	/* When we disable the VDD override bit last we have to do the manual
1811
	 * wait. */
7144 serge 1812
	if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1813
		wait_remaining_ms_from_jiffies(jiffies,
1814
				       intel_dp->panel_power_cycle_delay - panel_power_off_duration);
5060 serge 1815
 
1816
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2342 Serge 1817
}
1818
 
5060 serge 1819
static void wait_backlight_on(struct intel_dp *intel_dp)
1820
{
1821
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1822
				       intel_dp->backlight_on_delay);
1823
}
2342 Serge 1824
 
5060 serge 1825
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1826
{
1827
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1828
				       intel_dp->backlight_off_delay);
1829
}
1830
 
2342 Serge 1831
/* Read the current pp_control value, unlocking the register if it
1832
 * is locked
1833
 */
1834
 
3746 Serge 1835
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2342 Serge 1836
{
3746 Serge 1837
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1838
	struct drm_i915_private *dev_priv = dev->dev_private;
1839
	u32 control;
2342 Serge 1840
 
5354 serge 1841
	lockdep_assert_held(&dev_priv->pps_mutex);
1842
 
4560 Serge 1843
	control = I915_READ(_pp_ctrl_reg(intel_dp));
6084 serge 1844
	if (!IS_BROXTON(dev)) {
1845
		control &= ~PANEL_UNLOCK_MASK;
1846
		control |= PANEL_UNLOCK_REGS;
1847
	}
2342 Serge 1848
	return control;
1849
}
1850
 
5354 serge 1851
/*
1852
 * Must be paired with edp_panel_vdd_off().
1853
 * Must hold pps_mutex around the whole on/off sequence.
1854
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1855
 */
1856
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1857
{
3243 Serge 1858
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5060 serge 1859
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1860
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2330 Serge 1861
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1862
	enum intel_display_power_domain power_domain;
2330 Serge 1863
	u32 pp;
6937 serge 1864
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
5060 serge 1865
	bool need_to_disable = !intel_dp->want_panel_vdd;
2327 Serge 1866
 
5354 serge 1867
	lockdep_assert_held(&dev_priv->pps_mutex);
1868
 
2342 Serge 1869
	if (!is_edp(intel_dp))
5060 serge 1870
		return false;
2327 Serge 1871
 
7144 serge 1872
	cancel_delayed_work(&intel_dp->panel_vdd_work);
2342 Serge 1873
	intel_dp->want_panel_vdd = true;
1874
 
5060 serge 1875
	if (edp_have_panel_vdd(intel_dp))
1876
		return need_to_disable;
2342 Serge 1877
 
6084 serge 1878
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 1879
	intel_display_power_get(dev_priv, power_domain);
4560 Serge 1880
 
5354 serge 1881
	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1882
		      port_name(intel_dig_port->port));
4560 Serge 1883
 
5060 serge 1884
	if (!edp_have_panel_power(intel_dp))
1885
		wait_panel_power_cycle(intel_dp);
2342 Serge 1886
 
3746 Serge 1887
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1888
	pp |= EDP_FORCE_VDD;
2342 Serge 1889
 
4560 Serge 1890
	pp_stat_reg = _pp_stat_reg(intel_dp);
1891
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1892
 
1893
	I915_WRITE(pp_ctrl_reg, pp);
1894
	POSTING_READ(pp_ctrl_reg);
1895
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1896
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2342 Serge 1897
	/*
1898
	 * If the panel wasn't on, delay before accessing aux channel
1899
	 */
5060 serge 1900
	if (!edp_have_panel_power(intel_dp)) {
5354 serge 1901
		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1902
			      port_name(intel_dig_port->port));
2342 Serge 1903
		msleep(intel_dp->panel_power_up_delay);
1904
	}
5060 serge 1905
 
1906
	return need_to_disable;
2330 Serge 1907
}
2327 Serge 1908
 
5354 serge 1909
/*
1910
 * Must be paired with intel_edp_panel_vdd_off() or
1911
 * intel_edp_panel_off().
1912
 * Nested calls to these functions are not allowed since
1913
 * we drop the lock. Caller must use some higher level
1914
 * locking to prevent nested calls from other threads.
1915
 */
5060 serge 1916
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1917
{
5354 serge 1918
	bool vdd;
5060 serge 1919
 
5354 serge 1920
	if (!is_edp(intel_dp))
1921
		return;
1922
 
1923
	pps_lock(intel_dp);
1924
	vdd = edp_panel_vdd_on(intel_dp);
1925
	pps_unlock(intel_dp);
1926
 
6084 serge 1927
	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
5354 serge 1928
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 1929
}
1930
 
1931
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1932
{
3243 Serge 1933
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1934
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1935
	struct intel_digital_port *intel_dig_port =
1936
		dp_to_dig_port(intel_dp);
1937
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1938
	enum intel_display_power_domain power_domain;
2330 Serge 1939
	u32 pp;
6937 serge 1940
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2327 Serge 1941
 
5354 serge 1942
	lockdep_assert_held(&dev_priv->pps_mutex);
3480 Serge 1943
 
5354 serge 1944
	WARN_ON(intel_dp->want_panel_vdd);
5060 serge 1945
 
5354 serge 1946
	if (!edp_have_panel_vdd(intel_dp))
1947
		return;
4560 Serge 1948
 
5354 serge 1949
	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1950
		      port_name(intel_dig_port->port));
1951
 
6084 serge 1952
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1953
	pp &= ~EDP_FORCE_VDD;
2327 Serge 1954
 
6084 serge 1955
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1956
	pp_stat_reg = _pp_stat_reg(intel_dp);
3746 Serge 1957
 
6084 serge 1958
	I915_WRITE(pp_ctrl_reg, pp);
1959
	POSTING_READ(pp_ctrl_reg);
3746 Serge 1960
 
2330 Serge 1961
	/* Make sure sequencer is idle before allowing subsequent activity */
6084 serge 1962
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1963
	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
4560 Serge 1964
 
6084 serge 1965
	if ((pp & POWER_TARGET_ON) == 0)
7144 serge 1966
		intel_dp->panel_power_off_time = ktime_get();
4560 Serge 1967
 
6084 serge 1968
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1969
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 1970
}
2327 Serge 1971
 
5060 serge 1972
static void edp_panel_vdd_work(struct work_struct *__work)
3243 Serge 1973
{
3482 Serge 1974
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1975
						 struct intel_dp, panel_vdd_work);
1976
 
5354 serge 1977
	pps_lock(intel_dp);
1978
	if (!intel_dp->want_panel_vdd)
6084 serge 1979
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 1980
	pps_unlock(intel_dp);
3243 Serge 1981
}
2342 Serge 1982
 
5060 serge 1983
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2330 Serge 1984
{
5060 serge 1985
	unsigned long delay;
1986
 
1987
	/*
1988
	 * Queue the timer to fire a long time from now (relative to the power
1989
	 * down delay) to keep the panel power up across a sequence of
1990
	 * operations.
1991
	 */
1992
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
6320 serge 1993
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
5060 serge 1994
}
1995
 
5354 serge 1996
/*
1997
 * Must be paired with edp_panel_vdd_on().
1998
 * Must hold pps_mutex around the whole on/off sequence.
1999
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2000
 */
5060 serge 2001
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2002
{
5354 serge 2003
	struct drm_i915_private *dev_priv =
2004
		intel_dp_to_dev(intel_dp)->dev_private;
2005
 
2006
	lockdep_assert_held(&dev_priv->pps_mutex);
2007
 
2342 Serge 2008
	if (!is_edp(intel_dp))
2009
		return;
2010
 
6084 serge 2011
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
5354 serge 2012
	     port_name(dp_to_dig_port(intel_dp)->port));
2342 Serge 2013
 
2014
	intel_dp->want_panel_vdd = false;
2015
 
5060 serge 2016
	if (sync)
2017
		edp_panel_vdd_off_sync(intel_dp);
2018
	else
2019
		edp_panel_vdd_schedule_off(intel_dp);
2342 Serge 2020
}
2021
 
5354 serge 2022
static void edp_panel_on(struct intel_dp *intel_dp)
2342 Serge 2023
{
3243 Serge 2024
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2025
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 2026
	u32 pp;
6937 serge 2027
	i915_reg_t pp_ctrl_reg;
2327 Serge 2028
 
5354 serge 2029
	lockdep_assert_held(&dev_priv->pps_mutex);
2030
 
2342 Serge 2031
	if (!is_edp(intel_dp))
2032
		return;
2327 Serge 2033
 
5354 serge 2034
	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2035
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 2036
 
5354 serge 2037
	if (WARN(edp_have_panel_power(intel_dp),
2038
		 "eDP port %c panel power already on\n",
2039
		 port_name(dp_to_dig_port(intel_dp)->port)))
2342 Serge 2040
		return;
2041
 
5060 serge 2042
	wait_panel_power_cycle(intel_dp);
2342 Serge 2043
 
4560 Serge 2044
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2045
	pp = ironlake_get_pp_control(intel_dp);
2342 Serge 2046
	if (IS_GEN5(dev)) {
6084 serge 2047
		/* ILK workaround: disable reset around power sequence */
2048
		pp &= ~PANEL_POWER_RESET;
4560 Serge 2049
		I915_WRITE(pp_ctrl_reg, pp);
2050
		POSTING_READ(pp_ctrl_reg);
2342 Serge 2051
	}
2327 Serge 2052
 
2342 Serge 2053
	pp |= POWER_TARGET_ON;
2054
	if (!IS_GEN5(dev))
2055
		pp |= PANEL_POWER_RESET;
2056
 
3746 Serge 2057
	I915_WRITE(pp_ctrl_reg, pp);
2058
	POSTING_READ(pp_ctrl_reg);
2059
 
5060 serge 2060
	wait_panel_on(intel_dp);
2061
	intel_dp->last_power_on = jiffies;
2327 Serge 2062
 
2342 Serge 2063
	if (IS_GEN5(dev)) {
6084 serge 2064
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
4560 Serge 2065
		I915_WRITE(pp_ctrl_reg, pp);
2066
		POSTING_READ(pp_ctrl_reg);
2342 Serge 2067
	}
2330 Serge 2068
}
2327 Serge 2069
 
5354 serge 2070
void intel_edp_panel_on(struct intel_dp *intel_dp)
2330 Serge 2071
{
5354 serge 2072
	if (!is_edp(intel_dp))
2073
		return;
2074
 
2075
	pps_lock(intel_dp);
2076
	edp_panel_on(intel_dp);
2077
	pps_unlock(intel_dp);
2078
}
2079
 
2080
 
2081
static void edp_panel_off(struct intel_dp *intel_dp)
2082
{
5060 serge 2083
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2084
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3243 Serge 2085
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2086
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2087
	enum intel_display_power_domain power_domain;
2342 Serge 2088
	u32 pp;
6937 serge 2089
	i915_reg_t pp_ctrl_reg;
2327 Serge 2090
 
5354 serge 2091
	lockdep_assert_held(&dev_priv->pps_mutex);
2092
 
2342 Serge 2093
	if (!is_edp(intel_dp))
2094
		return;
2327 Serge 2095
 
5354 serge 2096
	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2097
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 2098
 
5354 serge 2099
	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2100
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 2101
 
3746 Serge 2102
	pp = ironlake_get_pp_control(intel_dp);
3031 serge 2103
	/* We need to switch off panel power _and_ force vdd, for otherwise some
2104
	 * panels get very unhappy and cease to work. */
5060 serge 2105
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2106
		EDP_BLC_ENABLE);
2327 Serge 2107
 
4560 Serge 2108
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2109
 
5060 serge 2110
	intel_dp->want_panel_vdd = false;
2111
 
3746 Serge 2112
	I915_WRITE(pp_ctrl_reg, pp);
2113
	POSTING_READ(pp_ctrl_reg);
2114
 
7144 serge 2115
	intel_dp->panel_power_off_time = ktime_get();
5060 serge 2116
	wait_panel_off(intel_dp);
2117
 
2118
	/* We got a reference when we enabled the VDD. */
6084 serge 2119
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 2120
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 2121
}
2327 Serge 2122
 
5354 serge 2123
void intel_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 2124
{
5354 serge 2125
	if (!is_edp(intel_dp))
2126
		return;
2127
 
2128
	pps_lock(intel_dp);
2129
	edp_panel_off(intel_dp);
2130
	pps_unlock(intel_dp);
2131
}
2132
 
2133
/* Enable backlight in the panel power control. */
2134
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2135
{
3243 Serge 2136
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2137
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2138
	struct drm_i915_private *dev_priv = dev->dev_private;
2139
	u32 pp;
6937 serge 2140
	i915_reg_t pp_ctrl_reg;
2327 Serge 2141
 
2330 Serge 2142
	/*
2143
	 * If we enable the backlight right away following a panel power
2144
	 * on, we may see slight flicker as the panel syncs with the eDP
2145
	 * link.  So delay a bit to make sure the image is solid before
2146
	 * allowing it to appear.
2147
	 */
5060 serge 2148
	wait_backlight_on(intel_dp);
5354 serge 2149
 
2150
	pps_lock(intel_dp);
2151
 
3746 Serge 2152
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2153
	pp |= EDP_BLC_ENABLE;
3243 Serge 2154
 
4560 Serge 2155
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2156
 
2157
	I915_WRITE(pp_ctrl_reg, pp);
2158
	POSTING_READ(pp_ctrl_reg);
5354 serge 2159
 
2160
	pps_unlock(intel_dp);
2330 Serge 2161
}
2327 Serge 2162
 
5354 serge 2163
/* Enable backlight PWM and backlight PP control. */
2164
void intel_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 2165
{
5354 serge 2166
	if (!is_edp(intel_dp))
2167
		return;
2168
 
2169
	DRM_DEBUG_KMS("\n");
2170
 
2171
	intel_panel_enable_backlight(intel_dp->attached_connector);
2172
	_intel_edp_backlight_on(intel_dp);
2173
}
2174
 
2175
/* Disable backlight in the panel power control. */
2176
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2177
{
3243 Serge 2178
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2179
	struct drm_i915_private *dev_priv = dev->dev_private;
2180
	u32 pp;
6937 serge 2181
	i915_reg_t pp_ctrl_reg;
2327 Serge 2182
 
2342 Serge 2183
	if (!is_edp(intel_dp))
2184
		return;
2185
 
5354 serge 2186
	pps_lock(intel_dp);
2187
 
3746 Serge 2188
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2189
	pp &= ~EDP_BLC_ENABLE;
3746 Serge 2190
 
4560 Serge 2191
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2192
 
2193
	I915_WRITE(pp_ctrl_reg, pp);
2194
	POSTING_READ(pp_ctrl_reg);
5354 serge 2195
 
2196
	pps_unlock(intel_dp);
2197
 
5060 serge 2198
	intel_dp->last_backlight_off = jiffies;
2199
	edp_wait_backlight_off(intel_dp);
5354 serge 2200
}
5060 serge 2201
 
5354 serge 2202
/* Disable backlight PP control and backlight PWM. */
2203
void intel_edp_backlight_off(struct intel_dp *intel_dp)
2204
{
2205
	if (!is_edp(intel_dp))
2206
		return;
2207
 
2208
	DRM_DEBUG_KMS("\n");
2209
 
2210
	_intel_edp_backlight_off(intel_dp);
5060 serge 2211
	intel_panel_disable_backlight(intel_dp->attached_connector);
2330 Serge 2212
}
2327 Serge 2213
 
5354 serge 2214
/*
2215
 * Hook for controlling the panel power control backlight through the bl_power
2216
 * sysfs attribute. Take care to handle multiple calls.
2217
 */
2218
static void intel_edp_backlight_power(struct intel_connector *connector,
2219
				      bool enable)
2220
{
2221
	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2222
	bool is_enabled;
2223
 
2224
	pps_lock(intel_dp);
2225
	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2226
	pps_unlock(intel_dp);
2227
 
2228
	if (is_enabled == enable)
2229
		return;
2230
 
2231
	DRM_DEBUG_KMS("panel power control backlight %s\n",
2232
		      enable ? "enable" : "disable");
2233
 
2234
	if (enable)
2235
		_intel_edp_backlight_on(intel_dp);
2236
	else
2237
		_intel_edp_backlight_off(intel_dp);
2238
}
2239
 
6937 serge 2240
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2241
{
2242
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2243
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2244
	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2245
 
2246
	I915_STATE_WARN(cur_state != state,
2247
			"DP port %c state assertion failure (expected %s, current %s)\n",
2248
			port_name(dig_port->port),
7144 serge 2249
			onoff(state), onoff(cur_state));
6937 serge 2250
}
2251
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2252
 
2253
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2254
{
2255
	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2256
 
2257
	I915_STATE_WARN(cur_state != state,
2258
			"eDP PLL state assertion failure (expected %s, current %s)\n",
7144 serge 2259
			onoff(state), onoff(cur_state));
6937 serge 2260
}
2261
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2262
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2263
 
3031 serge 2264
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 2265
{
3243 Serge 2266
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6937 serge 2267
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2268
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2327 Serge 2269
 
6937 serge 2270
	assert_pipe_disabled(dev_priv, crtc->pipe);
2271
	assert_dp_port_disabled(intel_dp);
2272
	assert_edp_pll_disabled(dev_priv);
3031 serge 2273
 
6937 serge 2274
	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2275
		      crtc->config->port_clock);
3031 serge 2276
 
6937 serge 2277
	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2278
 
2279
	if (crtc->config->port_clock == 162000)
2280
		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2281
	else
2282
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2283
 
2284
	I915_WRITE(DP_A, intel_dp->DP);
2285
	POSTING_READ(DP_A);
2286
	udelay(500);
2287
 
3031 serge 2288
	intel_dp->DP |= DP_PLL_ENABLE;
6937 serge 2289
 
3031 serge 2290
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2291
	POSTING_READ(DP_A);
2292
	udelay(200);
2293
}
2327 Serge 2294
 
3031 serge 2295
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 2296
{
3243 Serge 2297
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6937 serge 2298
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2299
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2327 Serge 2300
 
6937 serge 2301
	assert_pipe_disabled(dev_priv, crtc->pipe);
2302
	assert_dp_port_disabled(intel_dp);
2303
	assert_edp_pll_enabled(dev_priv);
3031 serge 2304
 
6937 serge 2305
	DRM_DEBUG_KMS("disabling eDP PLL\n");
3031 serge 2306
 
6937 serge 2307
	intel_dp->DP &= ~DP_PLL_ENABLE;
2308
 
2309
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2310
	POSTING_READ(DP_A);
2311
	udelay(200);
2312
}
2327 Serge 2313
 
2330 Serge 2314
/* If the sink supports it, try to set the power state appropriately */
3243 Serge 2315
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2330 Serge 2316
{
2317
	int ret, i;
2327 Serge 2318
 
2330 Serge 2319
	/* Should have a valid DPCD by this point */
2320
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2321
		return;
2327 Serge 2322
 
2330 Serge 2323
	if (mode != DRM_MODE_DPMS_ON) {
5060 serge 2324
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2325
					 DP_SET_POWER_D3);
2330 Serge 2326
	} else {
2327
		/*
2328
		 * When turning on, we need to retry for 1ms to give the sink
2329
		 * time to wake up.
2330
		 */
2331
		for (i = 0; i < 3; i++) {
5060 serge 2332
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2333
						 DP_SET_POWER_D0);
2330 Serge 2334
			if (ret == 1)
2335
				break;
2336
			msleep(1);
2337
		}
2338
	}
5354 serge 2339
 
2340
	if (ret != 1)
2341
		DRM_DEBUG_KMS("failed to %s sink power state\n",
2342
			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2330 Serge 2343
}
2327 Serge 2344
 
3031 serge 2345
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2346
				  enum pipe *pipe)
2330 Serge 2347
{
3031 serge 2348
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2349
	enum port port = dp_to_dig_port(intel_dp)->port;
3031 serge 2350
	struct drm_device *dev = encoder->base.dev;
2351
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2352
	enum intel_display_power_domain power_domain;
2353
	u32 tmp;
6937 serge 2354
	bool ret;
2327 Serge 2355
 
5060 serge 2356
	power_domain = intel_display_port_power_domain(encoder);
6937 serge 2357
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 2358
		return false;
2359
 
6937 serge 2360
	ret = false;
2361
 
5060 serge 2362
	tmp = I915_READ(intel_dp->output_reg);
2363
 
3031 serge 2364
	if (!(tmp & DP_PORT_EN))
6937 serge 2365
		goto out;
2342 Serge 2366
 
6084 serge 2367
	if (IS_GEN7(dev) && port == PORT_A) {
3031 serge 2368
		*pipe = PORT_TO_PIPE_CPT(tmp);
6084 serge 2369
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2370
		enum pipe p;
2327 Serge 2371
 
6084 serge 2372
		for_each_pipe(dev_priv, p) {
2373
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2374
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2375
				*pipe = p;
6937 serge 2376
				ret = true;
2377
 
2378
				goto out;
3031 serge 2379
			}
2380
		}
3243 Serge 2381
 
2382
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
6937 serge 2383
			      i915_mmio_reg_offset(intel_dp->output_reg));
6084 serge 2384
	} else if (IS_CHERRYVIEW(dev)) {
2385
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2386
	} else {
2387
		*pipe = PORT_TO_PIPE(tmp);
3031 serge 2388
	}
2389
 
6937 serge 2390
	ret = true;
2391
 
2392
out:
2393
	intel_display_power_put(dev_priv, power_domain);
2394
 
2395
	return ret;
2330 Serge 2396
}
2327 Serge 2397
 
4104 Serge 2398
static void intel_dp_get_config(struct intel_encoder *encoder,
6084 serge 2399
				struct intel_crtc_state *pipe_config)
4104 Serge 2400
{
2401
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2402
	u32 tmp, flags = 0;
2403
	struct drm_device *dev = encoder->base.dev;
2404
	struct drm_i915_private *dev_priv = dev->dev_private;
2405
	enum port port = dp_to_dig_port(intel_dp)->port;
2406
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2407
	int dotclock;
4104 Serge 2408
 
5060 serge 2409
	tmp = I915_READ(intel_dp->output_reg);
2410
 
6084 serge 2411
	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2412
 
2413
	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2414
		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2415
 
2416
		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
4104 Serge 2417
			flags |= DRM_MODE_FLAG_PHSYNC;
2418
		else
2419
			flags |= DRM_MODE_FLAG_NHSYNC;
2420
 
6084 serge 2421
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
4104 Serge 2422
			flags |= DRM_MODE_FLAG_PVSYNC;
2423
		else
2424
			flags |= DRM_MODE_FLAG_NVSYNC;
2425
	} else {
6084 serge 2426
		if (tmp & DP_SYNC_HS_HIGH)
4104 Serge 2427
			flags |= DRM_MODE_FLAG_PHSYNC;
2428
		else
2429
			flags |= DRM_MODE_FLAG_NHSYNC;
2430
 
6084 serge 2431
		if (tmp & DP_SYNC_VS_HIGH)
4104 Serge 2432
			flags |= DRM_MODE_FLAG_PVSYNC;
2433
		else
2434
			flags |= DRM_MODE_FLAG_NVSYNC;
2435
	}
2436
 
6084 serge 2437
	pipe_config->base.adjusted_mode.flags |= flags;
4104 Serge 2438
 
5139 serge 2439
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
6937 serge 2440
	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
5139 serge 2441
		pipe_config->limited_color_range = true;
2442
 
4560 Serge 2443
	pipe_config->has_dp_encoder = true;
2444
 
6084 serge 2445
	pipe_config->lane_count =
2446
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2447
 
4560 Serge 2448
	intel_dp_get_m_n(crtc, pipe_config);
2449
 
2450
	if (port == PORT_A) {
6937 serge 2451
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
4104 Serge 2452
			pipe_config->port_clock = 162000;
2453
		else
2454
			pipe_config->port_clock = 270000;
2455
	}
4280 Serge 2456
 
4560 Serge 2457
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2458
					    &pipe_config->dp_m_n);
2459
 
2460
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2461
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2462
 
6084 serge 2463
	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
4560 Serge 2464
 
4280 Serge 2465
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2466
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2467
		/*
2468
		 * This is a big fat ugly hack.
2469
		 *
2470
		 * Some machines in UEFI boot mode provide us a VBT that has 18
2471
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2472
		 * unknown we fail to light up. Yet the same BIOS boots up with
2473
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2474
		 * max, not what it tells us to use.
2475
		 *
2476
		 * Note: This will still be broken if the eDP panel is not lit
2477
		 * up by the BIOS, and thus we can't get the mode at module
2478
		 * load.
2479
		 */
2480
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2481
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2482
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2483
	}
4104 Serge 2484
}
2485
 
3031 serge 2486
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 2487
{
3031 serge 2488
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2489
	struct drm_device *dev = encoder->base.dev;
5354 serge 2490
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2327 Serge 2491
 
6084 serge 2492
	if (crtc->config->has_audio)
5354 serge 2493
		intel_audio_codec_disable(encoder);
2494
 
6084 serge 2495
	if (HAS_PSR(dev) && !HAS_DDI(dev))
2496
		intel_psr_disable(intel_dp);
2497
 
3031 serge 2498
	/* Make sure the panel is off before trying to change the mode. But also
2499
	 * ensure that we have vdd while we switch off the panel. */
5060 serge 2500
	intel_edp_panel_vdd_on(intel_dp);
2501
	intel_edp_backlight_off(intel_dp);
4560 Serge 2502
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
5060 serge 2503
	intel_edp_panel_off(intel_dp);
2330 Serge 2504
 
5354 serge 2505
	/* disable the port before the pipe on g4x */
2506
	if (INTEL_INFO(dev)->gen < 5)
3031 serge 2507
		intel_dp_link_down(intel_dp);
2508
}
2330 Serge 2509
 
5354 serge 2510
static void ilk_post_disable_dp(struct intel_encoder *encoder)
3031 serge 2511
{
2512
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2513
	enum port port = dp_to_dig_port(intel_dp)->port;
5060 serge 2514
 
2515
	intel_dp_link_down(intel_dp);
6937 serge 2516
 
2517
	/* Only ilk+ has port A */
5354 serge 2518
	if (port == PORT_A)
6084 serge 2519
		ironlake_edp_pll_off(intel_dp);
5060 serge 2520
}
2521
 
2522
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2523
{
2524
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2525
 
2526
	intel_dp_link_down(intel_dp);
2527
}
2528
 
6084 serge 2529
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2530
				     bool reset)
5060 serge 2531
{
6084 serge 2532
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2533
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2534
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2535
	enum pipe pipe = crtc->pipe;
2536
	uint32_t val;
3031 serge 2537
 
6084 serge 2538
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2539
	if (reset)
2540
		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2541
	else
2542
		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2543
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
5060 serge 2544
 
6084 serge 2545
	if (crtc->config->lane_count > 2) {
2546
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2547
		if (reset)
2548
			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2549
		else
2550
			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2551
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2552
	}
5060 serge 2553
 
2554
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2555
	val |= CHV_PCS_REQ_SOFTRESET_EN;
6084 serge 2556
	if (reset)
2557
		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2558
	else
2559
		val |= DPIO_PCS_CLK_SOFT_RESET;
5060 serge 2560
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2561
 
6084 serge 2562
	if (crtc->config->lane_count > 2) {
2563
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2564
		val |= CHV_PCS_REQ_SOFTRESET_EN;
2565
		if (reset)
2566
			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2567
		else
2568
			val |= DPIO_PCS_CLK_SOFT_RESET;
2569
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2570
	}
2571
}
5060 serge 2572
 
6084 serge 2573
static void chv_post_disable_dp(struct intel_encoder *encoder)
2574
{
2575
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2576
	struct drm_device *dev = encoder->base.dev;
2577
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2578
 
6084 serge 2579
	intel_dp_link_down(intel_dp);
5060 serge 2580
 
6084 serge 2581
	mutex_lock(&dev_priv->sb_lock);
2582
 
2583
	/* Assert data lane reset */
2584
	chv_data_lane_soft_reset(encoder, true);
2585
 
2586
	mutex_unlock(&dev_priv->sb_lock);
2330 Serge 2587
}
2588
 
5354 serge 2589
static void
2590
_intel_dp_set_link_train(struct intel_dp *intel_dp,
2591
			 uint32_t *DP,
2592
			 uint8_t dp_train_pat)
2593
{
2594
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2595
	struct drm_device *dev = intel_dig_port->base.base.dev;
2596
	struct drm_i915_private *dev_priv = dev->dev_private;
2597
	enum port port = intel_dig_port->port;
2598
 
2599
	if (HAS_DDI(dev)) {
2600
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2601
 
2602
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2603
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2604
		else
2605
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2606
 
2607
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2608
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2609
		case DP_TRAINING_PATTERN_DISABLE:
2610
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2611
 
2612
			break;
2613
		case DP_TRAINING_PATTERN_1:
2614
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2615
			break;
2616
		case DP_TRAINING_PATTERN_2:
2617
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2618
			break;
2619
		case DP_TRAINING_PATTERN_3:
2620
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2621
			break;
2622
		}
2623
		I915_WRITE(DP_TP_CTL(port), temp);
2624
 
6084 serge 2625
	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2626
		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
5354 serge 2627
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2628
 
2629
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2630
		case DP_TRAINING_PATTERN_DISABLE:
2631
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2632
			break;
2633
		case DP_TRAINING_PATTERN_1:
2634
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2635
			break;
2636
		case DP_TRAINING_PATTERN_2:
2637
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2638
			break;
2639
		case DP_TRAINING_PATTERN_3:
2640
			DRM_ERROR("DP training pattern 3 not supported\n");
2641
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2642
			break;
2643
		}
2644
 
2645
	} else {
2646
		if (IS_CHERRYVIEW(dev))
2647
			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2648
		else
2649
			*DP &= ~DP_LINK_TRAIN_MASK;
2650
 
2651
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2652
		case DP_TRAINING_PATTERN_DISABLE:
2653
			*DP |= DP_LINK_TRAIN_OFF;
2654
			break;
2655
		case DP_TRAINING_PATTERN_1:
2656
			*DP |= DP_LINK_TRAIN_PAT_1;
2657
			break;
2658
		case DP_TRAINING_PATTERN_2:
2659
			*DP |= DP_LINK_TRAIN_PAT_2;
2660
			break;
2661
		case DP_TRAINING_PATTERN_3:
2662
			if (IS_CHERRYVIEW(dev)) {
2663
				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2664
			} else {
2665
				DRM_ERROR("DP training pattern 3 not supported\n");
2666
				*DP |= DP_LINK_TRAIN_PAT_2;
2667
			}
2668
			break;
2669
		}
2670
	}
2671
}
2672
 
2673
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2674
{
2675
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2676
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 2677
	struct intel_crtc *crtc =
2678
		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
5354 serge 2679
 
2680
	/* enable with pattern 1 (as per spec) */
2681
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2682
				 DP_TRAINING_PATTERN_1);
2683
 
2684
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2685
	POSTING_READ(intel_dp->output_reg);
2686
 
2687
	/*
2688
	 * Magic for VLV/CHV. We _must_ first set up the register
2689
	 * without actually enabling the port, and then do another
2690
	 * write to enable the port. Otherwise link training will
2691
	 * fail when the power sequencer is freshly used for this port.
2692
	 */
2693
	intel_dp->DP |= DP_PORT_EN;
6937 serge 2694
	if (crtc->config->has_audio)
2695
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
5354 serge 2696
 
2697
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2698
	POSTING_READ(intel_dp->output_reg);
2699
}
2700
 
3031 serge 2701
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 2702
{
3031 serge 2703
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2704
	struct drm_device *dev = encoder->base.dev;
2330 Serge 2705
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2706
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2330 Serge 2707
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
6937 serge 2708
	enum port port = dp_to_dig_port(intel_dp)->port;
2709
	enum pipe pipe = crtc->pipe;
2330 Serge 2710
 
3031 serge 2711
	if (WARN_ON(dp_reg & DP_PORT_EN))
2712
		return;
2342 Serge 2713
 
5354 serge 2714
	pps_lock(intel_dp);
2715
 
6937 serge 2716
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 2717
		vlv_init_panel_power_sequencer(intel_dp);
2718
 
6937 serge 2719
	/*
2720
	 * We get an occasional spurious underrun between the port
2721
	 * enable and vdd enable, when enabling port A eDP.
2722
	 *
2723
	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2724
	 */
2725
	if (port == PORT_A)
2726
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2727
 
5354 serge 2728
	intel_dp_enable_port(intel_dp);
2729
 
6937 serge 2730
	if (port == PORT_A && IS_GEN5(dev_priv)) {
2731
		/*
2732
		 * Underrun reporting for the other pipe was disabled in
2733
		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2734
		 * enabled, so it's now safe to re-enable underrun reporting.
2735
		 */
2736
		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2737
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2738
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2739
	}
2740
 
5354 serge 2741
	edp_panel_vdd_on(intel_dp);
2742
	edp_panel_on(intel_dp);
2743
	edp_panel_vdd_off(intel_dp, true);
2744
 
6937 serge 2745
	if (port == PORT_A)
2746
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2747
 
5354 serge 2748
	pps_unlock(intel_dp);
2749
 
6937 serge 2750
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 2751
		unsigned int lane_mask = 0x0;
5354 serge 2752
 
6084 serge 2753
		if (IS_CHERRYVIEW(dev))
2754
			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2755
 
2756
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2757
				    lane_mask);
2758
	}
2759
 
3031 serge 2760
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
6084 serge 2761
	intel_dp_start_link_train(intel_dp);
3746 Serge 2762
	intel_dp_stop_link_train(intel_dp);
5354 serge 2763
 
6084 serge 2764
	if (crtc->config->has_audio) {
5354 serge 2765
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
6937 serge 2766
				 pipe_name(pipe));
5354 serge 2767
		intel_audio_codec_enable(encoder);
2768
	}
4560 Serge 2769
}
2770
 
2771
static void g4x_enable_dp(struct intel_encoder *encoder)
2772
{
2773
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2774
 
2775
	intel_enable_dp(encoder);
5060 serge 2776
	intel_edp_backlight_on(intel_dp);
2330 Serge 2777
}
2778
 
4104 Serge 2779
static void vlv_enable_dp(struct intel_encoder *encoder)
2780
{
4560 Serge 2781
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2782
 
5060 serge 2783
	intel_edp_backlight_on(intel_dp);
6084 serge 2784
	intel_psr_enable(intel_dp);
4104 Serge 2785
}
2786
 
4560 Serge 2787
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
3031 serge 2788
{
6937 serge 2789
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3031 serge 2790
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
6937 serge 2791
	enum port port = dp_to_dig_port(intel_dp)->port;
2792
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3031 serge 2793
 
5060 serge 2794
	intel_dp_prepare(encoder);
2795
 
6937 serge 2796
	if (port == PORT_A && IS_GEN5(dev_priv)) {
2797
		/*
2798
		 * We get FIFO underruns on the other pipe when
2799
		 * enabling the CPU eDP PLL, and when enabling CPU
2800
		 * eDP port. We could potentially avoid the PLL
2801
		 * underrun with a vblank wait just prior to enabling
2802
		 * the PLL, but that doesn't appear to help the port
2803
		 * enable case. Just sweep it all under the rug.
2804
		 */
2805
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2806
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2807
	}
2808
 
5060 serge 2809
	/* Only ilk+ has port A */
6937 serge 2810
	if (port == PORT_A)
3031 serge 2811
		ironlake_edp_pll_on(intel_dp);
7144 serge 2812
}
3031 serge 2813
 
5354 serge 2814
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2815
{
2816
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2817
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2818
	enum pipe pipe = intel_dp->pps_pipe;
6937 serge 2819
	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5354 serge 2820
 
2821
	edp_panel_vdd_off_sync(intel_dp);
2822
 
2823
	/*
2824
	 * VLV seems to get confused when multiple power seqeuencers
2825
	 * have the same port selected (even if only one has power/vdd
2826
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2827
	 * CHV on the other hand doesn't seem to mind having the same port
2828
	 * selected in multiple power seqeuencers, but let's clear the
2829
	 * port select always when logically disconnecting a power sequencer
2830
	 * from a port.
2831
	 */
2832
	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2833
		      pipe_name(pipe), port_name(intel_dig_port->port));
2834
	I915_WRITE(pp_on_reg, 0);
2835
	POSTING_READ(pp_on_reg);
2836
 
2837
	intel_dp->pps_pipe = INVALID_PIPE;
2838
}
2839
 
2840
static void vlv_steal_power_sequencer(struct drm_device *dev,
2841
				      enum pipe pipe)
2842
{
2843
	struct drm_i915_private *dev_priv = dev->dev_private;
2844
	struct intel_encoder *encoder;
2845
 
2846
	lockdep_assert_held(&dev_priv->pps_mutex);
2847
 
2848
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2849
		return;
2850
 
6937 serge 2851
	for_each_intel_encoder(dev, encoder) {
5354 serge 2852
		struct intel_dp *intel_dp;
2853
		enum port port;
2854
 
2855
		if (encoder->type != INTEL_OUTPUT_EDP)
2856
			continue;
2857
 
2858
		intel_dp = enc_to_intel_dp(&encoder->base);
2859
		port = dp_to_dig_port(intel_dp)->port;
2860
 
2861
		if (intel_dp->pps_pipe != pipe)
2862
			continue;
2863
 
2864
		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2865
			      pipe_name(pipe), port_name(port));
2866
 
6084 serge 2867
		WARN(encoder->base.crtc,
5354 serge 2868
		     "stealing pipe %c power sequencer from active eDP port %c\n",
2869
		     pipe_name(pipe), port_name(port));
2870
 
2871
		/* make sure vdd is off before we steal it */
2872
		vlv_detach_power_sequencer(intel_dp);
2873
	}
2874
}
2875
 
2876
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2877
{
2878
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2879
	struct intel_encoder *encoder = &intel_dig_port->base;
2880
	struct drm_device *dev = encoder->base.dev;
2881
	struct drm_i915_private *dev_priv = dev->dev_private;
2882
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2883
 
2884
	lockdep_assert_held(&dev_priv->pps_mutex);
2885
 
2886
	if (!is_edp(intel_dp))
2887
		return;
2888
 
2889
	if (intel_dp->pps_pipe == crtc->pipe)
2890
		return;
2891
 
2892
	/*
2893
	 * If another power sequencer was being used on this
2894
	 * port previously make sure to turn off vdd there while
2895
	 * we still have control of it.
2896
	 */
2897
	if (intel_dp->pps_pipe != INVALID_PIPE)
2898
		vlv_detach_power_sequencer(intel_dp);
2899
 
2900
	/*
2901
	 * We may be stealing the power
2902
	 * sequencer from another port.
2903
	 */
2904
	vlv_steal_power_sequencer(dev, crtc->pipe);
2905
 
2906
	/* now it's all ours */
2907
	intel_dp->pps_pipe = crtc->pipe;
2908
 
2909
	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2910
		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2911
 
2912
	/* init power sequencer on this pipe and port */
2913
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2914
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2915
}
2916
 
4104 Serge 2917
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2918
{
2919
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2920
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2921
	struct drm_device *dev = encoder->base.dev;
2922
	struct drm_i915_private *dev_priv = dev->dev_private;
2923
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2924
	enum dpio_channel port = vlv_dport_to_channel(dport);
6084 serge 2925
	int pipe = intel_crtc->pipe;
2926
	u32 val;
4104 Serge 2927
 
6084 serge 2928
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 2929
 
4560 Serge 2930
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
6084 serge 2931
	val = 0;
2932
	if (pipe)
2933
		val |= (1<<21);
2934
	else
2935
		val &= ~(1<<21);
2936
	val |= 0x001000c4;
4560 Serge 2937
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2938
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2939
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
4104 Serge 2940
 
6084 serge 2941
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2942
 
2943
	intel_enable_dp(encoder);
4539 Serge 2944
}
4104 Serge 2945
 
4560 Serge 2946
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
4104 Serge 2947
{
2948
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2949
	struct drm_device *dev = encoder->base.dev;
2950
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2951
	struct intel_crtc *intel_crtc =
2952
		to_intel_crtc(encoder->base.crtc);
2953
	enum dpio_channel port = vlv_dport_to_channel(dport);
2954
	int pipe = intel_crtc->pipe;
4104 Serge 2955
 
5060 serge 2956
	intel_dp_prepare(encoder);
2957
 
4104 Serge 2958
	/* Program Tx lane resets to default */
6084 serge 2959
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 2960
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
4104 Serge 2961
			 DPIO_PCS_TX_LANE2_RESET |
2962
			 DPIO_PCS_TX_LANE1_RESET);
4560 Serge 2963
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
4104 Serge 2964
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2965
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2966
			 (1<
2967
				 DPIO_PCS_CLK_SOFT_RESET);
2968
 
2969
	/* Fix up inter-pair skew failure */
4560 Serge 2970
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2971
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2972
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
6084 serge 2973
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2974
}
2975
 
5060 serge 2976
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2977
{
2978
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2979
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2980
	struct drm_device *dev = encoder->base.dev;
2981
	struct drm_i915_private *dev_priv = dev->dev_private;
2982
	struct intel_crtc *intel_crtc =
2983
		to_intel_crtc(encoder->base.crtc);
2984
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2985
	int pipe = intel_crtc->pipe;
6084 serge 2986
	int data, i, stagger;
5060 serge 2987
	u32 val;
2988
 
6084 serge 2989
	mutex_lock(&dev_priv->sb_lock);
5060 serge 2990
 
5354 serge 2991
	/* allow hardware to manage TX FIFO reset source */
2992
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2993
	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2994
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2995
 
6084 serge 2996
	if (intel_crtc->config->lane_count > 2) {
2997
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2998
		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2999
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3000
	}
5354 serge 3001
 
5060 serge 3002
	/* Program Tx lane latency optimal setting*/
6084 serge 3003
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3004
		/* Set the upar bit */
6084 serge 3005
		if (intel_crtc->config->lane_count == 1)
3006
			data = 0x0;
3007
		else
3008
			data = (i == 1) ? 0x0 : 0x1;
5060 serge 3009
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3010
				data << DPIO_UPAR_SHIFT);
3011
	}
3012
 
3013
	/* Data lane stagger programming */
6084 serge 3014
	if (intel_crtc->config->port_clock > 270000)
3015
		stagger = 0x18;
3016
	else if (intel_crtc->config->port_clock > 135000)
3017
		stagger = 0xd;
3018
	else if (intel_crtc->config->port_clock > 67500)
3019
		stagger = 0x7;
3020
	else if (intel_crtc->config->port_clock > 33750)
3021
		stagger = 0x4;
3022
	else
3023
		stagger = 0x2;
5060 serge 3024
 
6084 serge 3025
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3026
	val |= DPIO_TX2_STAGGER_MASK(0x1f);
3027
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
5060 serge 3028
 
6084 serge 3029
	if (intel_crtc->config->lane_count > 2) {
3030
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3031
		val |= DPIO_TX2_STAGGER_MASK(0x1f);
3032
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3033
	}
3034
 
3035
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3036
		       DPIO_LANESTAGGER_STRAP(stagger) |
3037
		       DPIO_LANESTAGGER_STRAP_OVRD |
3038
		       DPIO_TX1_STAGGER_MASK(0x1f) |
3039
		       DPIO_TX1_STAGGER_MULT(6) |
3040
		       DPIO_TX2_STAGGER_MULT(0));
3041
 
3042
	if (intel_crtc->config->lane_count > 2) {
3043
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3044
			       DPIO_LANESTAGGER_STRAP(stagger) |
3045
			       DPIO_LANESTAGGER_STRAP_OVRD |
3046
			       DPIO_TX1_STAGGER_MASK(0x1f) |
3047
			       DPIO_TX1_STAGGER_MULT(7) |
3048
			       DPIO_TX2_STAGGER_MULT(5));
3049
	}
3050
 
3051
	/* Deassert data lane reset */
3052
	chv_data_lane_soft_reset(encoder, false);
3053
 
3054
	mutex_unlock(&dev_priv->sb_lock);
3055
 
5060 serge 3056
	intel_enable_dp(encoder);
6084 serge 3057
 
3058
	/* Second common lane will stay alive on its own now */
3059
	if (dport->release_cl2_override) {
3060
		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3061
		dport->release_cl2_override = false;
3062
	}
5060 serge 3063
}
3064
 
3065
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3066
{
3067
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3068
	struct drm_device *dev = encoder->base.dev;
3069
	struct drm_i915_private *dev_priv = dev->dev_private;
3070
	struct intel_crtc *intel_crtc =
3071
		to_intel_crtc(encoder->base.crtc);
3072
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3073
	enum pipe pipe = intel_crtc->pipe;
6084 serge 3074
	unsigned int lane_mask =
3075
		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
5060 serge 3076
	u32 val;
3077
 
5354 serge 3078
	intel_dp_prepare(encoder);
3079
 
6084 serge 3080
	/*
3081
	 * Must trick the second common lane into life.
3082
	 * Otherwise we can't even access the PLL.
3083
	 */
3084
	if (ch == DPIO_CH0 && pipe == PIPE_B)
3085
		dport->release_cl2_override =
3086
			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
5060 serge 3087
 
6084 serge 3088
	chv_phy_powergate_lanes(encoder, true, lane_mask);
3089
 
3090
	mutex_lock(&dev_priv->sb_lock);
3091
 
3092
	/* Assert data lane reset */
3093
	chv_data_lane_soft_reset(encoder, true);
3094
 
5060 serge 3095
	/* program left/right clock distribution */
3096
	if (pipe != PIPE_B) {
3097
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3098
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3099
		if (ch == DPIO_CH0)
3100
			val |= CHV_BUFLEFTENA1_FORCE;
3101
		if (ch == DPIO_CH1)
3102
			val |= CHV_BUFRIGHTENA1_FORCE;
3103
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3104
	} else {
3105
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3106
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3107
		if (ch == DPIO_CH0)
3108
			val |= CHV_BUFLEFTENA2_FORCE;
3109
		if (ch == DPIO_CH1)
3110
			val |= CHV_BUFRIGHTENA2_FORCE;
3111
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3112
	}
3113
 
3114
	/* program clock channel usage */
3115
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3116
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3117
	if (pipe != PIPE_B)
3118
		val &= ~CHV_PCS_USEDCLKCHANNEL;
3119
	else
3120
		val |= CHV_PCS_USEDCLKCHANNEL;
3121
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3122
 
6084 serge 3123
	if (intel_crtc->config->lane_count > 2) {
3124
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3125
		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3126
		if (pipe != PIPE_B)
3127
			val &= ~CHV_PCS_USEDCLKCHANNEL;
3128
		else
3129
			val |= CHV_PCS_USEDCLKCHANNEL;
3130
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3131
	}
5060 serge 3132
 
3133
	/*
3134
	 * This a a bit weird since generally CL
3135
	 * matches the pipe, but here we need to
3136
	 * pick the CL based on the port.
3137
	 */
3138
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3139
	if (pipe != PIPE_B)
3140
		val &= ~CHV_CMN_USEDCLKCHANNEL;
3141
	else
3142
		val |= CHV_CMN_USEDCLKCHANNEL;
3143
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3144
 
6084 serge 3145
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3146
}
3147
 
6084 serge 3148
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3149
{
3150
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3151
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3152
	u32 val;
3153
 
3154
	mutex_lock(&dev_priv->sb_lock);
3155
 
3156
	/* disable left/right clock distribution */
3157
	if (pipe != PIPE_B) {
3158
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3159
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3160
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3161
	} else {
3162
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3163
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3164
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3165
	}
3166
 
3167
	mutex_unlock(&dev_priv->sb_lock);
3168
 
3169
	/*
3170
	 * Leave the power down bit cleared for at least one
3171
	 * lane so that chv_powergate_phy_ch() will power
3172
	 * on something when the channel is otherwise unused.
3173
	 * When the port is off and the override is removed
3174
	 * the lanes power down anyway, so otherwise it doesn't
3175
	 * really matter what the state of power down bits is
3176
	 * after this.
3177
	 */
3178
	chv_phy_powergate_lanes(encoder, false, 0x0);
3179
}
3180
 
2330 Serge 3181
/*
3182
 * Native read with retry for link status and receiver capability reads for
3183
 * cases where the sink may still be asleep.
5060 serge 3184
 *
3185
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3186
 * supposed to retry 3 times per the spec.
2330 Serge 3187
 */
5060 serge 3188
static ssize_t
3189
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3190
			void *buffer, size_t size)
2330 Serge 3191
{
5060 serge 3192
	ssize_t ret;
3193
	int i;
2330 Serge 3194
 
5354 serge 3195
	/*
3196
	 * Sometime we just get the same incorrect byte repeated
3197
	 * over the entire buffer. Doing just one throw away read
3198
	 * initially seems to "solve" it.
3199
	 */
3200
	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3201
 
2330 Serge 3202
	for (i = 0; i < 3; i++) {
5060 serge 3203
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3204
		if (ret == size)
3205
			return ret;
2330 Serge 3206
		msleep(1);
3207
	}
3208
 
5060 serge 3209
	return ret;
2330 Serge 3210
}
3211
 
3212
/*
3213
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3214
 * link status information
3215
 */
6937 serge 3216
bool
2342 Serge 3217
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3218
{
5060 serge 3219
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 3220
				       DP_LANE0_1_STATUS,
3221
				       link_status,
5060 serge 3222
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2330 Serge 3223
}
3224
 
5060 serge 3225
/* These are source-specific values. */
6937 serge 3226
uint8_t
2342 Serge 3227
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 3228
{
3243 Serge 3229
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 3230
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3231
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3232
 
6084 serge 3233
	if (IS_BROXTON(dev))
3234
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3235
	else if (INTEL_INFO(dev)->gen >= 9) {
3236
		if (dev_priv->edp_low_vswing && port == PORT_A)
3237
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5354 serge 3238
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6937 serge 3239
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 3240
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4104 Serge 3241
	else if (IS_GEN7(dev) && port == PORT_A)
5354 serge 3242
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4104 Serge 3243
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
5354 serge 3244
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2342 Serge 3245
	else
5354 serge 3246
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2342 Serge 3247
}
3248
 
6937 serge 3249
uint8_t
2342 Serge 3250
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3251
{
3243 Serge 3252
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 3253
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3254
 
5354 serge 3255
	if (INTEL_INFO(dev)->gen >= 9) {
2342 Serge 3256
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3257
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3258
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3259
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3261
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3262
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
6084 serge 3263
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3264
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3265
		default:
5354 serge 3266
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3267
		}
5354 serge 3268
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3269
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3270
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3271
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3272
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3273
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3274
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3275
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3276
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3277
		default:
3278
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3279
		}
6937 serge 3280
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3243 Serge 3281
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3282
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3283
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3284
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3285
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3286
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3287
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3288
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3289
		default:
5354 serge 3290
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
4104 Serge 3291
		}
3292
	} else if (IS_GEN7(dev) && port == PORT_A) {
3293
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3294
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3295
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3296
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3297
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3298
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2342 Serge 3299
		default:
5354 serge 3300
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2342 Serge 3301
		}
3302
	} else {
6084 serge 3303
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3304
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3305
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3306
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3307
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3308
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3309
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3310
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
6084 serge 3311
		default:
5354 serge 3312
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
6084 serge 3313
		}
2330 Serge 3314
	}
3315
}
3316
 
6084 serge 3317
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
4104 Serge 3318
{
3319
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3320
	struct drm_i915_private *dev_priv = dev->dev_private;
3321
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
4560 Serge 3322
	struct intel_crtc *intel_crtc =
3323
		to_intel_crtc(dport->base.base.crtc);
4104 Serge 3324
	unsigned long demph_reg_value, preemph_reg_value,
3325
		uniqtranscale_reg_value;
3326
	uint8_t train_set = intel_dp->train_set[0];
4560 Serge 3327
	enum dpio_channel port = vlv_dport_to_channel(dport);
3328
	int pipe = intel_crtc->pipe;
4104 Serge 3329
 
3330
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3331
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4104 Serge 3332
		preemph_reg_value = 0x0004000;
3333
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3334
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3335
			demph_reg_value = 0x2B405555;
3336
			uniqtranscale_reg_value = 0x552AB83A;
3337
			break;
5354 serge 3338
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3339
			demph_reg_value = 0x2B404040;
3340
			uniqtranscale_reg_value = 0x5548B83A;
3341
			break;
5354 serge 3342
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3343
			demph_reg_value = 0x2B245555;
3344
			uniqtranscale_reg_value = 0x5560B83A;
3345
			break;
5354 serge 3346
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3347
			demph_reg_value = 0x2B405555;
3348
			uniqtranscale_reg_value = 0x5598DA3A;
3349
			break;
3350
		default:
3351
			return 0;
3352
		}
3353
		break;
5354 serge 3354
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4104 Serge 3355
		preemph_reg_value = 0x0002000;
3356
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3357
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3358
			demph_reg_value = 0x2B404040;
3359
			uniqtranscale_reg_value = 0x5552B83A;
3360
			break;
5354 serge 3361
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3362
			demph_reg_value = 0x2B404848;
3363
			uniqtranscale_reg_value = 0x5580B83A;
3364
			break;
5354 serge 3365
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3366
			demph_reg_value = 0x2B404040;
3367
			uniqtranscale_reg_value = 0x55ADDA3A;
3368
			break;
3369
		default:
3370
			return 0;
3371
		}
3372
		break;
5354 serge 3373
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4104 Serge 3374
		preemph_reg_value = 0x0000000;
3375
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3376
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3377
			demph_reg_value = 0x2B305555;
3378
			uniqtranscale_reg_value = 0x5570B83A;
3379
			break;
5354 serge 3380
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3381
			demph_reg_value = 0x2B2B4040;
3382
			uniqtranscale_reg_value = 0x55ADDA3A;
3383
			break;
3384
		default:
3385
			return 0;
3386
		}
3387
		break;
5354 serge 3388
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4104 Serge 3389
		preemph_reg_value = 0x0006000;
3390
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3391
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3392
			demph_reg_value = 0x1B405555;
3393
			uniqtranscale_reg_value = 0x55ADDA3A;
3394
			break;
3395
		default:
3396
			return 0;
3397
		}
3398
		break;
3399
	default:
3400
		return 0;
3401
	}
3402
 
6084 serge 3403
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 3404
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3405
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3406
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
4104 Serge 3407
			 uniqtranscale_reg_value);
4560 Serge 3408
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3409
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3410
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3411
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
6084 serge 3412
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 3413
 
3414
	return 0;
3415
}
3416
 
6084 serge 3417
static bool chv_need_uniq_trans_scale(uint8_t train_set)
5060 serge 3418
{
6084 serge 3419
	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3420
		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3421
}
3422
 
3423
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3424
{
5060 serge 3425
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3426
	struct drm_i915_private *dev_priv = dev->dev_private;
3427
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3428
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3429
	u32 deemph_reg_value, margin_reg_value, val;
3430
	uint8_t train_set = intel_dp->train_set[0];
3431
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3432
	enum pipe pipe = intel_crtc->pipe;
3433
	int i;
3434
 
3435
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3436
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
5060 serge 3437
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3438
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3439
			deemph_reg_value = 128;
3440
			margin_reg_value = 52;
3441
			break;
5354 serge 3442
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3443
			deemph_reg_value = 128;
3444
			margin_reg_value = 77;
3445
			break;
5354 serge 3446
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3447
			deemph_reg_value = 128;
3448
			margin_reg_value = 102;
3449
			break;
5354 serge 3450
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
5060 serge 3451
			deemph_reg_value = 128;
3452
			margin_reg_value = 154;
3453
			/* FIXME extra to set for 1200 */
3454
			break;
3455
		default:
3456
			return 0;
3457
		}
3458
		break;
5354 serge 3459
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
5060 serge 3460
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3461
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3462
			deemph_reg_value = 85;
3463
			margin_reg_value = 78;
3464
			break;
5354 serge 3465
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3466
			deemph_reg_value = 85;
3467
			margin_reg_value = 116;
3468
			break;
5354 serge 3469
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3470
			deemph_reg_value = 85;
3471
			margin_reg_value = 154;
3472
			break;
3473
		default:
3474
			return 0;
3475
		}
3476
		break;
5354 serge 3477
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
5060 serge 3478
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3479
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3480
			deemph_reg_value = 64;
3481
			margin_reg_value = 104;
3482
			break;
5354 serge 3483
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3484
			deemph_reg_value = 64;
3485
			margin_reg_value = 154;
3486
			break;
3487
		default:
3488
			return 0;
3489
		}
3490
		break;
5354 serge 3491
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
5060 serge 3492
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3493
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3494
			deemph_reg_value = 43;
3495
			margin_reg_value = 154;
3496
			break;
3497
		default:
3498
			return 0;
3499
		}
3500
		break;
3501
	default:
3502
		return 0;
3503
	}
3504
 
6084 serge 3505
	mutex_lock(&dev_priv->sb_lock);
5060 serge 3506
 
3507
	/* Clear calc init */
3508
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3509
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
5354 serge 3510
	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3511
	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
5060 serge 3512
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3513
 
6084 serge 3514
	if (intel_crtc->config->lane_count > 2) {
3515
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3516
		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3517
		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3518
		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3519
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3520
	}
5060 serge 3521
 
5354 serge 3522
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3523
	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3524
	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3525
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3526
 
6084 serge 3527
	if (intel_crtc->config->lane_count > 2) {
3528
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3529
		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3530
		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3531
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3532
	}
5354 serge 3533
 
5060 serge 3534
	/* Program swing deemph */
6084 serge 3535
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3536
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3537
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3538
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3539
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3540
	}
3541
 
3542
	/* Program swing margin */
6084 serge 3543
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3544
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
6084 serge 3545
 
5354 serge 3546
		val &= ~DPIO_SWING_MARGIN000_MASK;
3547
		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
6084 serge 3548
 
3549
		/*
3550
		 * Supposedly this value shouldn't matter when unique transition
3551
		 * scale is disabled, but in fact it does matter. Let's just
3552
		 * always program the same value and hope it's OK.
3553
		 */
3554
		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3555
		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3556
 
5060 serge 3557
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3558
	}
3559
 
6084 serge 3560
	/*
3561
	 * The document said it needs to set bit 27 for ch0 and bit 26
3562
	 * for ch1. Might be a typo in the doc.
3563
	 * For now, for this unique transition scale selection, set bit
3564
	 * 27 for ch0 and ch1.
3565
	 */
3566
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3567
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
6084 serge 3568
		if (chv_need_uniq_trans_scale(train_set))
3569
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3570
		else
3571
			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
5060 serge 3572
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3573
	}
3574
 
3575
	/* Start swing calculation */
3576
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3577
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3578
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3579
 
6084 serge 3580
	if (intel_crtc->config->lane_count > 2) {
3581
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3582
		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3583
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3584
	}
5060 serge 3585
 
6084 serge 3586
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3587
 
3588
	return 0;
3589
}
3590
 
2330 Serge 3591
static uint32_t
6084 serge 3592
gen4_signal_levels(uint8_t train_set)
2330 Serge 3593
{
3594
	uint32_t	signal_levels = 0;
3595
 
3596
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3597
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2330 Serge 3598
	default:
3599
		signal_levels |= DP_VOLTAGE_0_4;
3600
		break;
5354 serge 3601
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2330 Serge 3602
		signal_levels |= DP_VOLTAGE_0_6;
3603
		break;
5354 serge 3604
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2330 Serge 3605
		signal_levels |= DP_VOLTAGE_0_8;
3606
		break;
5354 serge 3607
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2330 Serge 3608
		signal_levels |= DP_VOLTAGE_1_2;
3609
		break;
3610
	}
3611
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3612
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3613
	default:
3614
		signal_levels |= DP_PRE_EMPHASIS_0;
3615
		break;
5354 serge 3616
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3617
		signal_levels |= DP_PRE_EMPHASIS_3_5;
3618
		break;
5354 serge 3619
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3620
		signal_levels |= DP_PRE_EMPHASIS_6;
3621
		break;
5354 serge 3622
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
2330 Serge 3623
		signal_levels |= DP_PRE_EMPHASIS_9_5;
3624
		break;
3625
	}
3626
	return signal_levels;
3627
}
3628
 
3629
/* Gen6's DP voltage swing and pre-emphasis control */
3630
static uint32_t
6084 serge 3631
gen6_edp_signal_levels(uint8_t train_set)
2330 Serge 3632
{
3633
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3634
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3635
	switch (signal_levels) {
5354 serge 3636
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3637
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3638
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5354 serge 3639
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3640
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
5354 serge 3641
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3642
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3643
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
5354 serge 3644
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3645
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3646
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
5354 serge 3647
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3648
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3649
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3650
	default:
3651
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3652
			      "0x%x\n", signal_levels);
3653
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3654
	}
3655
}
3656
 
2342 Serge 3657
/* Gen7's DP voltage swing and pre-emphasis control */
3658
static uint32_t
6084 serge 3659
gen7_edp_signal_levels(uint8_t train_set)
2342 Serge 3660
{
3661
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3662
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3663
	switch (signal_levels) {
5354 serge 3664
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3665
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
5354 serge 3666
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3667
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
5354 serge 3668
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2342 Serge 3669
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3670
 
5354 serge 3671
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3672
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
5354 serge 3673
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3674
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3675
 
5354 serge 3676
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3677
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
5354 serge 3678
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3679
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3680
 
3681
	default:
3682
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3683
			      "0x%x\n", signal_levels);
3684
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3685
	}
3686
}
3687
 
6937 serge 3688
void
3689
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3480 Serge 3690
{
3691
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 3692
	enum port port = intel_dig_port->port;
3480 Serge 3693
	struct drm_device *dev = intel_dig_port->base.base.dev;
6937 serge 3694
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 3695
	uint32_t signal_levels, mask = 0;
3480 Serge 3696
	uint8_t train_set = intel_dp->train_set[0];
3697
 
6084 serge 3698
	if (HAS_DDI(dev)) {
3699
		signal_levels = ddi_signal_levels(intel_dp);
3700
 
3701
		if (IS_BROXTON(dev))
3702
			signal_levels = 0;
3703
		else
3704
			mask = DDI_BUF_EMP_MASK;
5060 serge 3705
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 3706
		signal_levels = chv_signal_levels(intel_dp);
4104 Serge 3707
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 3708
		signal_levels = vlv_signal_levels(intel_dp);
4104 Serge 3709
	} else if (IS_GEN7(dev) && port == PORT_A) {
6084 serge 3710
		signal_levels = gen7_edp_signal_levels(train_set);
3480 Serge 3711
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4104 Serge 3712
	} else if (IS_GEN6(dev) && port == PORT_A) {
6084 serge 3713
		signal_levels = gen6_edp_signal_levels(train_set);
3480 Serge 3714
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3715
	} else {
6084 serge 3716
		signal_levels = gen4_signal_levels(train_set);
3480 Serge 3717
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3718
	}
3719
 
6084 serge 3720
	if (mask)
3721
		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3480 Serge 3722
 
6084 serge 3723
	DRM_DEBUG_KMS("Using vswing level %d\n",
3724
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3725
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3726
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3727
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3728
 
6937 serge 3729
	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3480 Serge 3730
 
6937 serge 3731
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2330 Serge 3732
	POSTING_READ(intel_dp->output_reg);
7144 serge 3733
}
2330 Serge 3734
 
6937 serge 3735
void
3736
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
7144 serge 3737
				       uint8_t dp_train_pat)
4560 Serge 3738
{
3739
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3740
	struct drm_i915_private *dev_priv =
3741
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3742
 
6937 serge 3743
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4560 Serge 3744
 
6937 serge 3745
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4560 Serge 3746
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3747
}
3748
 
6937 serge 3749
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3746 Serge 3750
{
3751
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3752
	struct drm_device *dev = intel_dig_port->base.base.dev;
3753
	struct drm_i915_private *dev_priv = dev->dev_private;
3754
	enum port port = intel_dig_port->port;
3755
	uint32_t val;
3756
 
3757
	if (!HAS_DDI(dev))
3758
		return;
3759
 
3760
	val = I915_READ(DP_TP_CTL(port));
3761
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3762
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3763
	I915_WRITE(DP_TP_CTL(port), val);
3764
 
3765
	/*
3766
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3767
	 * we need to set idle transmission mode is to work around a HW issue
3768
	 * where we enable the pipe while not in idle link-training mode.
3769
	 * In this case there is requirement to wait for a minimum number of
3770
	 * idle patterns to be sent.
3771
	 */
3772
	if (port == PORT_A)
3773
		return;
3774
 
3775
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3776
		     1))
3777
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3778
}
3779
 
6084 serge 3780
static void
2330 Serge 3781
intel_dp_link_down(struct intel_dp *intel_dp)
3782
{
3243 Serge 3783
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3784
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4104 Serge 3785
	enum port port = intel_dig_port->port;
3243 Serge 3786
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 3787
	struct drm_i915_private *dev_priv = dev->dev_private;
3788
	uint32_t DP = intel_dp->DP;
3789
 
5060 serge 3790
	if (WARN_ON(HAS_DDI(dev)))
3243 Serge 3791
		return;
3792
 
3031 serge 3793
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 3794
		return;
3795
 
3796
	DRM_DEBUG_KMS("\n");
3797
 
6084 serge 3798
	if ((IS_GEN7(dev) && port == PORT_A) ||
3799
	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2330 Serge 3800
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
6084 serge 3801
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
2330 Serge 3802
	} else {
5354 serge 3803
		if (IS_CHERRYVIEW(dev))
3804
			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3805
		else
6084 serge 3806
			DP &= ~DP_LINK_TRAIN_MASK;
3807
		DP |= DP_LINK_TRAIN_PAT_IDLE;
2330 Serge 3808
	}
6084 serge 3809
	I915_WRITE(intel_dp->output_reg, DP);
2330 Serge 3810
	POSTING_READ(intel_dp->output_reg);
3811
 
6084 serge 3812
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3813
	I915_WRITE(intel_dp->output_reg, DP);
3814
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3815
 
6084 serge 3816
	/*
3817
	 * HW workaround for IBX, we need to move the port
3818
	 * to transcoder A after disabling it to allow the
3819
	 * matching HDMI port to be enabled on transcoder A.
3820
	 */
3821
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
6937 serge 3822
		/*
3823
		 * We get CPU/PCH FIFO underruns on the other pipe when
3824
		 * doing the workaround. Sweep them under the rug.
3825
		 */
3826
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3827
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3828
 
6084 serge 3829
		/* always enable with pattern 1 (as per spec) */
3830
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3831
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
2330 Serge 3832
		I915_WRITE(intel_dp->output_reg, DP);
6084 serge 3833
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3834
 
6084 serge 3835
		DP &= ~DP_PORT_EN;
3836
		I915_WRITE(intel_dp->output_reg, DP);
3837
		POSTING_READ(intel_dp->output_reg);
6937 serge 3838
 
3839
		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3840
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3841
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
2330 Serge 3842
	}
3843
 
2342 Serge 3844
	msleep(intel_dp->panel_power_down_delay);
6937 serge 3845
 
3846
	intel_dp->DP = DP;
2330 Serge 3847
}
3848
 
3849
static bool
3850
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3851
{
4560 Serge 3852
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3853
	struct drm_device *dev = dig_port->base.base.dev;
3854
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3855
	uint8_t rev;
4560 Serge 3856
 
5060 serge 3857
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3858
				    sizeof(intel_dp->dpcd)) < 0)
3031 serge 3859
		return false; /* aux transfer failed */
3860
 
5354 serge 3861
	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3480 Serge 3862
 
3031 serge 3863
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3864
		return false; /* DPCD not present */
3865
 
4104 Serge 3866
	/* Check if the panel supports PSR */
3867
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4560 Serge 3868
	if (is_edp(intel_dp)) {
5060 serge 3869
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
6084 serge 3870
					intel_dp->psr_dpcd,
3871
					sizeof(intel_dp->psr_dpcd));
4560 Serge 3872
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3873
			dev_priv->psr.sink_support = true;
6084 serge 3874
			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4560 Serge 3875
		}
6084 serge 3876
 
3877
		if (INTEL_INFO(dev)->gen >= 9 &&
3878
			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3879
			uint8_t frame_sync_cap;
3880
 
3881
			dev_priv->psr.sink_support = true;
3882
			intel_dp_dpcd_read_wake(&intel_dp->aux,
3883
					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3884
					&frame_sync_cap, 1);
3885
			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3886
			/* PSR2 needs frame sync as well */
3887
			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3888
			DRM_DEBUG_KMS("PSR2 %s on sink",
3889
				dev_priv->psr.psr2_support ? "supported" : "not supported");
3890
		}
4560 Serge 3891
	}
3892
 
6084 serge 3893
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
6937 serge 3894
		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
6084 serge 3895
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
5060 serge 3896
 
6084 serge 3897
	/* Intermediate frequency support */
3898
	if (is_edp(intel_dp) &&
3899
	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3900
	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3901
	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3902
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3903
		int i;
3904
 
3905
		intel_dp_dpcd_read_wake(&intel_dp->aux,
3906
				DP_SUPPORTED_LINK_RATES,
3907
				sink_rates,
3908
				sizeof(sink_rates));
3909
 
3910
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3911
			int val = le16_to_cpu(sink_rates[i]);
3912
 
3913
			if (val == 0)
3914
				break;
3915
 
3916
			/* Value read is in kHz while drm clock is saved in deca-kHz */
3917
			intel_dp->sink_rates[i] = (val * 200) / 10;
3918
		}
3919
		intel_dp->num_sink_rates = i;
3920
	}
3921
 
3922
	intel_dp_print_rates(intel_dp);
3923
 
3031 serge 3924
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3925
	      DP_DWN_STRM_PORT_PRESENT))
3926
		return true; /* native DP sink */
3927
 
3928
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3929
		return true; /* no per-port downstream info */
3930
 
5060 serge 3931
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
6084 serge 3932
				    intel_dp->downstream_ports,
5060 serge 3933
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3031 serge 3934
		return false; /* downstream port status fetch failed */
3935
 
6084 serge 3936
	return true;
3031 serge 3937
}
2330 Serge 3938
 
3031 serge 3939
static void
3940
intel_dp_probe_oui(struct intel_dp *intel_dp)
3941
{
3942
	u8 buf[3];
3943
 
3944
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3945
		return;
3946
 
5060 serge 3947
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3031 serge 3948
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3949
			      buf[0], buf[1], buf[2]);
3950
 
5060 serge 3951
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3031 serge 3952
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3953
			      buf[0], buf[1], buf[2]);
2330 Serge 3954
}
3955
 
2342 Serge 3956
static bool
5060 serge 3957
intel_dp_probe_mst(struct intel_dp *intel_dp)
3958
{
3959
	u8 buf[1];
3960
 
3961
	if (!intel_dp->can_mst)
3962
		return false;
3963
 
3964
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3965
		return false;
3966
 
3967
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3968
		if (buf[0] & DP_MST_CAP) {
3969
			DRM_DEBUG_KMS("Sink is MST capable\n");
3970
			intel_dp->is_mst = true;
3971
		} else {
3972
			DRM_DEBUG_KMS("Sink is not MST capable\n");
3973
			intel_dp->is_mst = false;
3974
		}
3975
	}
3976
 
3977
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3978
	return intel_dp->is_mst;
3979
}
3980
 
6084 serge 3981
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
5060 serge 3982
{
6084 serge 3983
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6937 serge 3984
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 3985
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
5354 serge 3986
	u8 buf;
6084 serge 3987
	int ret = 0;
6937 serge 3988
	int count = 0;
3989
	int attempts = 10;
5060 serge 3990
 
6084 serge 3991
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3992
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3993
		ret = -EIO;
3994
		goto out;
3995
	}
3996
 
3997
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3998
			       buf & ~DP_TEST_SINK_START) < 0) {
3999
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4000
		ret = -EIO;
4001
		goto out;
4002
	}
4003
 
6937 serge 4004
	do {
4005
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4006
 
4007
		if (drm_dp_dpcd_readb(&intel_dp->aux,
4008
				      DP_TEST_SINK_MISC, &buf) < 0) {
4009
			ret = -EIO;
4010
			goto out;
4011
		}
4012
		count = buf & DP_TEST_COUNT_MASK;
4013
	} while (--attempts && count);
4014
 
4015
	if (attempts == 0) {
7144 serge 4016
		DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
6937 serge 4017
		ret = -ETIMEDOUT;
4018
	}
4019
 
6084 serge 4020
 out:
4021
	hsw_enable_ips(intel_crtc);
4022
	return ret;
4023
}
4024
 
4025
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4026
{
4027
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6937 serge 4028
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 4029
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4030
	u8 buf;
4031
	int ret;
4032
 
5354 serge 4033
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4034
		return -EIO;
5060 serge 4035
 
5354 serge 4036
	if (!(buf & DP_TEST_CRC_SUPPORTED))
5060 serge 4037
		return -ENOTTY;
4038
 
5354 serge 4039
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4040
		return -EIO;
4041
 
6937 serge 4042
	if (buf & DP_TEST_SINK_START) {
4043
		ret = intel_dp_sink_crc_stop(intel_dp);
4044
		if (ret)
4045
			return ret;
4046
	}
4047
 
6084 serge 4048
	hsw_disable_ips(intel_crtc);
4049
 
5060 serge 4050
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
6084 serge 4051
			       buf | DP_TEST_SINK_START) < 0) {
4052
		hsw_enable_ips(intel_crtc);
5354 serge 4053
		return -EIO;
6084 serge 4054
	}
5060 serge 4055
 
6937 serge 4056
	intel_wait_for_vblank(dev, intel_crtc->pipe);
6084 serge 4057
	return 0;
4058
}
5354 serge 4059
 
6084 serge 4060
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4061
{
4062
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4063
	struct drm_device *dev = dig_port->base.base.dev;
4064
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4065
	u8 buf;
4066
	int count, ret;
4067
	int attempts = 6;
4068
 
4069
	ret = intel_dp_sink_crc_start(intel_dp);
4070
	if (ret)
4071
		return ret;
4072
 
5354 serge 4073
	do {
6084 serge 4074
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4075
 
5354 serge 4076
		if (drm_dp_dpcd_readb(&intel_dp->aux,
6084 serge 4077
				      DP_TEST_SINK_MISC, &buf) < 0) {
4078
			ret = -EIO;
4079
			goto stop;
4080
		}
4081
		count = buf & DP_TEST_COUNT_MASK;
5060 serge 4082
 
6937 serge 4083
	} while (--attempts && count == 0);
6084 serge 4084
 
5354 serge 4085
	if (attempts == 0) {
7144 serge 4086
		DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4087
		ret = -ETIMEDOUT;
4088
		goto stop;
4089
	}
6937 serge 4090
 
4091
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4092
		ret = -EIO;
4093
		goto stop;
5354 serge 4094
	}
4095
 
6084 serge 4096
stop:
4097
	intel_dp_sink_crc_stop(intel_dp);
4098
	return ret;
5060 serge 4099
}
4100
 
4101
static bool
2342 Serge 4102
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4103
{
5060 serge 4104
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 4105
				       DP_DEVICE_SERVICE_IRQ_VECTOR,
5060 serge 4106
				       sink_irq_vector, 1) == 1;
4107
}
4108
 
4109
static bool
4110
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4111
{
2342 Serge 4112
	int ret;
4113
 
5060 serge 4114
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4115
					     DP_SINK_COUNT_ESI,
4116
					     sink_irq_vector, 14);
4117
	if (ret != 14)
2342 Serge 4118
		return false;
4119
 
4120
	return true;
4121
}
4122
 
6084 serge 4123
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
2342 Serge 4124
{
6084 serge 4125
	uint8_t test_result = DP_TEST_ACK;
4126
	return test_result;
2342 Serge 4127
}
4128
 
6084 serge 4129
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4130
{
4131
	uint8_t test_result = DP_TEST_NAK;
4132
	return test_result;
4133
}
4134
 
4135
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4136
{
4137
	uint8_t test_result = DP_TEST_NAK;
4138
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4139
	struct drm_connector *connector = &intel_connector->base;
4140
 
4141
	if (intel_connector->detect_edid == NULL ||
4142
	    connector->edid_corrupt ||
4143
	    intel_dp->aux.i2c_defer_count > 6) {
4144
		/* Check EDID read for NACKs, DEFERs and corruption
4145
		 * (DP CTS 1.2 Core r1.1)
4146
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4147
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4148
		 *    4.2.2.6 : EDID corruption detected
4149
		 * Use failsafe mode for all cases
4150
		 */
4151
		if (intel_dp->aux.i2c_nack_count > 0 ||
4152
			intel_dp->aux.i2c_defer_count > 0)
4153
			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4154
				      intel_dp->aux.i2c_nack_count,
4155
				      intel_dp->aux.i2c_defer_count);
4156
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4157
	} else {
4158
		struct edid *block = intel_connector->detect_edid;
4159
 
4160
		/* We have to write the checksum
4161
		 * of the last block read
4162
		 */
4163
		block += intel_connector->detect_edid->extensions;
4164
 
4165
		if (!drm_dp_dpcd_write(&intel_dp->aux,
4166
					DP_TEST_EDID_CHECKSUM,
4167
					&block->checksum,
4168
					1))
4169
			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4170
 
4171
		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4172
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4173
	}
4174
 
4175
	/* Set test active flag here so userspace doesn't interrupt things */
4176
	intel_dp->compliance_test_active = 1;
4177
 
4178
	return test_result;
4179
}
4180
 
4181
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4182
{
4183
	uint8_t test_result = DP_TEST_NAK;
4184
	return test_result;
4185
}
4186
 
4187
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4188
{
4189
	uint8_t response = DP_TEST_NAK;
4190
	uint8_t rxdata = 0;
4191
	int status = 0;
4192
 
4193
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4194
	if (status <= 0) {
4195
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4196
		goto update_status;
4197
	}
4198
 
4199
	switch (rxdata) {
4200
	case DP_TEST_LINK_TRAINING:
4201
		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4202
		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4203
		response = intel_dp_autotest_link_training(intel_dp);
4204
		break;
4205
	case DP_TEST_LINK_VIDEO_PATTERN:
4206
		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4207
		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4208
		response = intel_dp_autotest_video_pattern(intel_dp);
4209
		break;
4210
	case DP_TEST_LINK_EDID_READ:
4211
		DRM_DEBUG_KMS("EDID test requested\n");
4212
		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4213
		response = intel_dp_autotest_edid(intel_dp);
4214
		break;
4215
	case DP_TEST_LINK_PHY_TEST_PATTERN:
4216
		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4217
		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4218
		response = intel_dp_autotest_phy_pattern(intel_dp);
4219
		break;
4220
	default:
4221
		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4222
		break;
4223
	}
4224
 
4225
update_status:
4226
	status = drm_dp_dpcd_write(&intel_dp->aux,
4227
				   DP_TEST_RESPONSE,
4228
				   &response, 1);
4229
	if (status <= 0)
4230
		DRM_DEBUG_KMS("Could not write test response to sink\n");
4231
}
4232
 
5060 serge 4233
static int
4234
intel_dp_check_mst_status(struct intel_dp *intel_dp)
4235
{
4236
	bool bret;
4237
 
4238
	if (intel_dp->is_mst) {
4239
		u8 esi[16] = { 0 };
4240
		int ret = 0;
4241
		int retry;
4242
		bool handled;
4243
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4244
go_again:
4245
		if (bret == true) {
4246
 
4247
			/* check link status - esi[10] = 0x200c */
6084 serge 4248
			if (intel_dp->active_mst_links &&
4249
			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5060 serge 4250
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4251
				intel_dp_start_link_train(intel_dp);
4252
				intel_dp_stop_link_train(intel_dp);
4253
			}
4254
 
6084 serge 4255
			DRM_DEBUG_KMS("got esi %3ph\n", esi);
5060 serge 4256
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4257
 
4258
			if (handled) {
4259
				for (retry = 0; retry < 3; retry++) {
4260
					int wret;
4261
					wret = drm_dp_dpcd_write(&intel_dp->aux,
4262
								 DP_SINK_COUNT_ESI+1,
4263
								 &esi[1], 3);
4264
					if (wret == 3) {
4265
						break;
4266
					}
4267
				}
4268
 
4269
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4270
				if (bret == true) {
6084 serge 4271
					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
5060 serge 4272
					goto go_again;
4273
				}
4274
			} else
4275
				ret = 0;
4276
 
4277
			return ret;
4278
		} else {
4279
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4280
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4281
			intel_dp->is_mst = false;
4282
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4283
			/* send a hotplug event */
4284
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4285
		}
4286
	}
4287
	return -EINVAL;
4288
}
4289
 
2330 Serge 4290
/*
4291
 * According to DP spec
4292
 * 5.1.2:
4293
 *  1. Read DPCD
4294
 *  2. Configure link according to Receiver Capabilities
4295
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4296
 *  4. Check link status on receipt of hot-plug interrupt
4297
 */
6084 serge 4298
static void
2330 Serge 4299
intel_dp_check_link_status(struct intel_dp *intel_dp)
4300
{
5060 serge 4301
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3243 Serge 4302
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2342 Serge 4303
	u8 sink_irq_vector;
4304
	u8 link_status[DP_LINK_STATUS_SIZE];
4305
 
5060 serge 4306
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4307
 
6937 serge 4308
	/*
4309
	 * Clearing compliance test variables to allow capturing
4310
	 * of values for next automated test request.
4311
	 */
4312
	intel_dp->compliance_test_active = 0;
4313
	intel_dp->compliance_test_type = 0;
4314
	intel_dp->compliance_test_data = 0;
4315
 
6084 serge 4316
	if (!intel_encoder->base.crtc)
2330 Serge 4317
		return;
4318
 
5060 serge 4319
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4320
		return;
4321
 
2330 Serge 4322
	/* Try to read receiver status if the link appears to be up */
2342 Serge 4323
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 4324
		return;
4325
	}
4326
 
4327
	/* Now read the DPCD to see if it's actually running */
4328
	if (!intel_dp_get_dpcd(intel_dp)) {
4329
		return;
4330
	}
4331
 
2342 Serge 4332
	/* Try to read the source of the interrupt */
4333
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4334
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4335
		/* Clear interrupt source */
5060 serge 4336
		drm_dp_dpcd_writeb(&intel_dp->aux,
6084 serge 4337
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4338
				   sink_irq_vector);
2342 Serge 4339
 
4340
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
6084 serge 4341
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
2342 Serge 4342
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4343
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4344
	}
4345
 
6937 serge 4346
	/* if link training is requested we should perform it always */
4347
	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4348
		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
2330 Serge 4349
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
5060 serge 4350
			      intel_encoder->base.name);
2330 Serge 4351
		intel_dp_start_link_train(intel_dp);
3746 Serge 4352
		intel_dp_stop_link_train(intel_dp);
2330 Serge 4353
	}
4354
}
4355
 
3031 serge 4356
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 4357
static enum drm_connector_status
4358
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4359
{
3031 serge 4360
	uint8_t *dpcd = intel_dp->dpcd;
4361
	uint8_t type;
4362
 
4363
	if (!intel_dp_get_dpcd(intel_dp))
4364
		return connector_status_disconnected;
4365
 
4366
	/* if there's no downstream port, we're done */
4367
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 4368
		return connector_status_connected;
3031 serge 4369
 
4370
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4560 Serge 4371
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4372
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3031 serge 4373
		uint8_t reg;
5060 serge 4374
 
4375
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4376
					    ®, 1) < 0)
3031 serge 4377
			return connector_status_unknown;
5060 serge 4378
 
3031 serge 4379
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4380
					      : connector_status_disconnected;
4381
	}
4382
 
4383
	/* If no HPD, poke DDC gently */
5060 serge 4384
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3031 serge 4385
		return connector_status_connected;
4386
 
4387
	/* Well we tried, say unknown for unreliable port types */
4560 Serge 4388
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6084 serge 4389
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4560 Serge 4390
		if (type == DP_DS_PORT_TYPE_VGA ||
4391
		    type == DP_DS_PORT_TYPE_NON_EDID)
6084 serge 4392
			return connector_status_unknown;
4560 Serge 4393
	} else {
4394
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4395
			DP_DWN_STRM_PORT_TYPE_MASK;
4396
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4397
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5060 serge 4398
			return connector_status_unknown;
4560 Serge 4399
	}
3031 serge 4400
 
4401
	/* Anything else is out of spec, warn and ignore */
4402
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 4403
	return connector_status_disconnected;
4404
}
4405
 
4406
static enum drm_connector_status
5354 serge 4407
edp_detect(struct intel_dp *intel_dp)
2330 Serge 4408
{
3243 Serge 4409
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 4410
	enum drm_connector_status status;
4411
 
6084 serge 4412
	status = intel_panel_detect(dev);
4413
	if (status == connector_status_unknown)
4414
		status = connector_status_connected;
5354 serge 4415
 
6084 serge 4416
	return status;
5354 serge 4417
}
2330 Serge 4418
 
6084 serge 4419
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4420
				       struct intel_digital_port *port)
5354 serge 4421
{
6084 serge 4422
	u32 bit;
5354 serge 4423
 
6084 serge 4424
	switch (port->port) {
4425
	case PORT_A:
4426
		return true;
4427
	case PORT_B:
4428
		bit = SDE_PORTB_HOTPLUG;
4429
		break;
4430
	case PORT_C:
4431
		bit = SDE_PORTC_HOTPLUG;
4432
		break;
4433
	case PORT_D:
4434
		bit = SDE_PORTD_HOTPLUG;
4435
		break;
4436
	default:
4437
		MISSING_CASE(port->port);
4438
		return false;
4439
	}
3480 Serge 4440
 
6084 serge 4441
	return I915_READ(SDEISR) & bit;
2330 Serge 4442
}
4443
 
6084 serge 4444
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4445
				       struct intel_digital_port *port)
2330 Serge 4446
{
6084 serge 4447
	u32 bit;
2330 Serge 4448
 
6084 serge 4449
	switch (port->port) {
4450
	case PORT_A:
4451
		return true;
3480 Serge 4452
	case PORT_B:
6084 serge 4453
		bit = SDE_PORTB_HOTPLUG_CPT;
2330 Serge 4454
		break;
3480 Serge 4455
	case PORT_C:
6084 serge 4456
		bit = SDE_PORTC_HOTPLUG_CPT;
2330 Serge 4457
		break;
3480 Serge 4458
	case PORT_D:
6084 serge 4459
		bit = SDE_PORTD_HOTPLUG_CPT;
2330 Serge 4460
		break;
6084 serge 4461
	case PORT_E:
4462
		bit = SDE_PORTE_HOTPLUG_SPT;
4463
		break;
2330 Serge 4464
	default:
6084 serge 4465
		MISSING_CASE(port->port);
4466
		return false;
2330 Serge 4467
	}
6084 serge 4468
 
4469
	return I915_READ(SDEISR) & bit;
4470
}
4471
 
4472
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4473
				       struct intel_digital_port *port)
4474
{
4475
	u32 bit;
4476
 
4477
	switch (port->port) {
4478
	case PORT_B:
4479
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4480
		break;
4481
	case PORT_C:
4482
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4483
		break;
4484
	case PORT_D:
4485
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4486
		break;
4487
	default:
4488
		MISSING_CASE(port->port);
4489
		return false;
4560 Serge 4490
	}
2330 Serge 4491
 
6084 serge 4492
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5097 serge 4493
}
4494
 
6660 serge 4495
static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
7144 serge 4496
					struct intel_digital_port *port)
6084 serge 4497
{
4498
	u32 bit;
4499
 
4500
	switch (port->port) {
4501
	case PORT_B:
6660 serge 4502
		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4503
		break;
4504
	case PORT_C:
6660 serge 4505
		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4506
		break;
4507
	case PORT_D:
6660 serge 4508
		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4509
		break;
4510
	default:
4511
		MISSING_CASE(port->port);
4512
		return false;
4513
	}
4514
 
4515
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4516
}
4517
 
4518
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4519
				       struct intel_digital_port *intel_dig_port)
4520
{
4521
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4522
	enum port port;
6296 serge 4523
	u32 bit;
6084 serge 4524
 
6296 serge 4525
	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4526
	switch (port) {
4527
	case PORT_A:
4528
		bit = BXT_DE_PORT_HP_DDIA;
4529
		break;
4530
	case PORT_B:
4531
		bit = BXT_DE_PORT_HP_DDIB;
4532
		break;
4533
	case PORT_C:
4534
		bit = BXT_DE_PORT_HP_DDIC;
4535
		break;
4536
	default:
4537
		MISSING_CASE(port);
4538
		return false;
4539
	}
6084 serge 4540
 
4541
	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4542
}
4543
 
4544
/*
4545
 * intel_digital_port_connected - is the specified port connected?
4546
 * @dev_priv: i915 private structure
4547
 * @port: the port to test
4548
 *
4549
 * Return %true if @port is connected, %false otherwise.
4550
 */
6937 serge 4551
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
6084 serge 4552
					 struct intel_digital_port *port)
4553
{
4554
	if (HAS_PCH_IBX(dev_priv))
4555
		return ibx_digital_port_connected(dev_priv, port);
7144 serge 4556
	else if (HAS_PCH_SPLIT(dev_priv))
6084 serge 4557
		return cpt_digital_port_connected(dev_priv, port);
4558
	else if (IS_BROXTON(dev_priv))
4559
		return bxt_digital_port_connected(dev_priv, port);
6660 serge 4560
	else if (IS_GM45(dev_priv))
4561
		return gm45_digital_port_connected(dev_priv, port);
6084 serge 4562
	else
4563
		return g4x_digital_port_connected(dev_priv, port);
4564
}
4565
 
2342 Serge 4566
static struct edid *
5354 serge 4567
intel_dp_get_edid(struct intel_dp *intel_dp)
2342 Serge 4568
{
5354 serge 4569
	struct intel_connector *intel_connector = intel_dp->attached_connector;
3243 Serge 4570
 
4571
	/* use cached edid if we have one */
4572
	if (intel_connector->edid) {
4573
		/* invalid edid */
4574
		if (IS_ERR(intel_connector->edid))
3031 serge 4575
			return NULL;
4576
 
4560 Serge 4577
		return drm_edid_duplicate(intel_connector->edid);
5354 serge 4578
	} else
4579
		return drm_get_edid(&intel_connector->base,
4580
				    &intel_dp->aux.ddc);
4581
}
3031 serge 4582
 
5354 serge 4583
static void
4584
intel_dp_set_edid(struct intel_dp *intel_dp)
4585
{
4586
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4587
	struct edid *edid;
4588
 
4589
	edid = intel_dp_get_edid(intel_dp);
4590
	intel_connector->detect_edid = edid;
4591
 
4592
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4593
		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4594
	else
4595
		intel_dp->has_audio = drm_detect_monitor_audio(edid);
2342 Serge 4596
}
4597
 
5354 serge 4598
static void
4599
intel_dp_unset_edid(struct intel_dp *intel_dp)
2342 Serge 4600
{
5354 serge 4601
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2342 Serge 4602
 
5354 serge 4603
	kfree(intel_connector->detect_edid);
4604
	intel_connector->detect_edid = NULL;
3243 Serge 4605
 
5354 serge 4606
	intel_dp->has_audio = false;
4607
}
3031 serge 4608
 
2330 Serge 4609
static enum drm_connector_status
4610
intel_dp_detect(struct drm_connector *connector, bool force)
4611
{
4612
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 4613
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4614
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4615
	struct drm_device *dev = connector->dev;
2330 Serge 4616
	enum drm_connector_status status;
5060 serge 4617
	enum intel_display_power_domain power_domain;
4618
	bool ret;
6084 serge 4619
	u8 sink_irq_vector;
2330 Serge 4620
 
4104 Serge 4621
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5060 serge 4622
		      connector->base.id, connector->name);
5354 serge 4623
	intel_dp_unset_edid(intel_dp);
4104 Serge 4624
 
5060 serge 4625
	if (intel_dp->is_mst) {
4626
		/* MST devices are disconnected from a monitor POV */
4627
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4628
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5354 serge 4629
		return connector_status_disconnected;
5060 serge 4630
	}
4631
 
6084 serge 4632
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4633
	intel_display_power_get(to_i915(dev), power_domain);
2330 Serge 4634
 
5354 serge 4635
	/* Can't disconnect eDP, but you can close the lid... */
4636
	if (is_edp(intel_dp))
4637
		status = edp_detect(intel_dp);
6937 serge 4638
	else if (intel_digital_port_connected(to_i915(dev),
4639
					      dp_to_dig_port(intel_dp)))
4640
		status = intel_dp_detect_dpcd(intel_dp);
2330 Serge 4641
	else
6937 serge 4642
		status = connector_status_disconnected;
4643
 
4644
	if (status != connector_status_connected) {
4645
		intel_dp->compliance_test_active = 0;
4646
		intel_dp->compliance_test_type = 0;
4647
		intel_dp->compliance_test_data = 0;
4648
 
4560 Serge 4649
		goto out;
6937 serge 4650
	}
3031 serge 4651
 
4652
	intel_dp_probe_oui(intel_dp);
4653
 
5060 serge 4654
	ret = intel_dp_probe_mst(intel_dp);
4655
	if (ret) {
4656
		/* if we are in MST mode then this connector
4657
		   won't appear connected or have anything with EDID on it */
4658
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4659
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4660
		status = connector_status_disconnected;
4661
		goto out;
4662
	}
4663
 
6937 serge 4664
	/*
4665
	 * Clearing NACK and defer counts to get their exact values
4666
	 * while reading EDID which are required by Compliance tests
4667
	 * 4.2.2.4 and 4.2.2.5
4668
	 */
4669
	intel_dp->aux.i2c_nack_count = 0;
4670
	intel_dp->aux.i2c_defer_count = 0;
4671
 
5354 serge 4672
	intel_dp_set_edid(intel_dp);
3243 Serge 4673
 
4674
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4675
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4560 Serge 4676
	status = connector_status_connected;
4677
 
6084 serge 4678
	/* Try to read the source of the interrupt */
4679
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4680
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4681
		/* Clear interrupt source */
4682
		drm_dp_dpcd_writeb(&intel_dp->aux,
4683
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4684
				   sink_irq_vector);
4685
 
4686
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4687
			intel_dp_handle_test_request(intel_dp);
4688
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4689
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4690
	}
4691
 
4560 Serge 4692
out:
6084 serge 4693
	intel_display_power_put(to_i915(dev), power_domain);
4560 Serge 4694
	return status;
2330 Serge 4695
}
4696
 
5354 serge 4697
static void
4698
intel_dp_force(struct drm_connector *connector)
2330 Serge 4699
{
4700
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5354 serge 4701
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
6084 serge 4702
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5060 serge 4703
	enum intel_display_power_domain power_domain;
2330 Serge 4704
 
5354 serge 4705
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4706
		      connector->base.id, connector->name);
4707
	intel_dp_unset_edid(intel_dp);
2330 Serge 4708
 
5354 serge 4709
	if (connector->status != connector_status_connected)
4710
		return;
5060 serge 4711
 
6084 serge 4712
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4713
	intel_display_power_get(dev_priv, power_domain);
5354 serge 4714
 
4715
	intel_dp_set_edid(intel_dp);
4716
 
6084 serge 4717
	intel_display_power_put(dev_priv, power_domain);
5354 serge 4718
 
4719
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4720
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4721
}
4722
 
4723
static int intel_dp_get_modes(struct drm_connector *connector)
4724
{
4725
	struct intel_connector *intel_connector = to_intel_connector(connector);
4726
	struct edid *edid;
4727
 
4728
	edid = intel_connector->detect_edid;
4729
	if (edid) {
4730
		int ret = intel_connector_update_modes(connector, edid);
6084 serge 4731
		if (ret)
4732
			return ret;
5354 serge 4733
	}
2330 Serge 4734
 
3243 Serge 4735
	/* if eDP has no EDID, fall back to fixed mode */
5354 serge 4736
	if (is_edp(intel_attached_dp(connector)) &&
4737
	    intel_connector->panel.fixed_mode) {
6084 serge 4738
		struct drm_display_mode *mode;
5354 serge 4739
 
4740
		mode = drm_mode_duplicate(connector->dev,
3243 Serge 4741
					  intel_connector->panel.fixed_mode);
4742
		if (mode) {
2330 Serge 4743
			drm_mode_probed_add(connector, mode);
4744
			return 1;
4745
		}
4746
	}
5354 serge 4747
 
2330 Serge 4748
	return 0;
4749
}
4750
 
3243 Serge 4751
static bool
4752
intel_dp_detect_audio(struct drm_connector *connector)
4753
{
5354 serge 4754
	bool has_audio = false;
3243 Serge 4755
	struct edid *edid;
2330 Serge 4756
 
5354 serge 4757
	edid = to_intel_connector(connector)->detect_edid;
4758
	if (edid)
3243 Serge 4759
		has_audio = drm_detect_monitor_audio(edid);
2330 Serge 4760
 
3243 Serge 4761
	return has_audio;
4762
}
2330 Serge 4763
 
4764
static int
4765
intel_dp_set_property(struct drm_connector *connector,
4766
		      struct drm_property *property,
4767
		      uint64_t val)
4768
{
4769
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3243 Serge 4770
	struct intel_connector *intel_connector = to_intel_connector(connector);
4771
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4772
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 4773
	int ret;
4774
 
3243 Serge 4775
	ret = drm_object_property_set_value(&connector->base, property, val);
2330 Serge 4776
	if (ret)
4777
		return ret;
3480 Serge 4778
 
2330 Serge 4779
	if (property == dev_priv->force_audio_property) {
4780
		int i = val;
4781
		bool has_audio;
4782
 
4783
		if (i == intel_dp->force_audio)
4784
			return 0;
4785
 
4786
		intel_dp->force_audio = i;
4787
 
3031 serge 4788
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 4789
			has_audio = intel_dp_detect_audio(connector);
4790
		else
3031 serge 4791
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 4792
 
4793
		if (has_audio == intel_dp->has_audio)
4794
			return 0;
4795
 
4796
		intel_dp->has_audio = has_audio;
4797
		goto done;
4798
	}
4799
 
4800
	if (property == dev_priv->broadcast_rgb_property) {
3746 Serge 4801
		bool old_auto = intel_dp->color_range_auto;
6084 serge 4802
		bool old_range = intel_dp->limited_color_range;
3746 Serge 4803
 
3480 Serge 4804
		switch (val) {
4805
		case INTEL_BROADCAST_RGB_AUTO:
4806
			intel_dp->color_range_auto = true;
4807
			break;
4808
		case INTEL_BROADCAST_RGB_FULL:
4809
			intel_dp->color_range_auto = false;
6084 serge 4810
			intel_dp->limited_color_range = false;
3480 Serge 4811
			break;
4812
		case INTEL_BROADCAST_RGB_LIMITED:
4813
			intel_dp->color_range_auto = false;
6084 serge 4814
			intel_dp->limited_color_range = true;
3480 Serge 4815
			break;
4816
		default:
4817
			return -EINVAL;
4818
		}
3746 Serge 4819
 
4820
		if (old_auto == intel_dp->color_range_auto &&
6084 serge 4821
		    old_range == intel_dp->limited_color_range)
3746 Serge 4822
			return 0;
4823
 
6084 serge 4824
		goto done;
2330 Serge 4825
	}
4826
 
3243 Serge 4827
	if (is_edp(intel_dp) &&
4828
	    property == connector->dev->mode_config.scaling_mode_property) {
4829
		if (val == DRM_MODE_SCALE_NONE) {
4830
			DRM_DEBUG_KMS("no scaling not supported\n");
4831
			return -EINVAL;
4832
		}
4833
 
4834
		if (intel_connector->panel.fitting_mode == val) {
4835
			/* the eDP scaling property is not changed */
4836
			return 0;
4837
		}
4838
		intel_connector->panel.fitting_mode = val;
4839
 
4840
		goto done;
4841
	}
4842
 
2330 Serge 4843
	return -EINVAL;
4844
 
4845
done:
3480 Serge 4846
	if (intel_encoder->base.crtc)
4847
		intel_crtc_restore_mode(intel_encoder->base.crtc);
2330 Serge 4848
 
4849
	return 0;
4850
}
4851
 
4852
static void
4104 Serge 4853
intel_dp_connector_destroy(struct drm_connector *connector)
2330 Serge 4854
{
3243 Serge 4855
	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 Serge 4856
 
5354 serge 4857
	kfree(intel_connector->detect_edid);
4858
 
3243 Serge 4859
	if (!IS_ERR_OR_NULL(intel_connector->edid))
4860
		kfree(intel_connector->edid);
4861
 
4104 Serge 4862
	/* Can't call is_edp() since the encoder may have been destroyed
4863
	 * already. */
4864
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3243 Serge 4865
		intel_panel_fini(&intel_connector->panel);
2330 Serge 4866
 
4867
	drm_connector_cleanup(connector);
4868
	kfree(connector);
4869
}
4870
 
3243 Serge 4871
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4872
{
3243 Serge 4873
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4874
	struct intel_dp *intel_dp = &intel_dig_port->dp;
2330 Serge 4875
 
5060 serge 4876
	intel_dp_mst_encoder_cleanup(intel_dig_port);
2342 Serge 4877
	if (is_edp(intel_dp)) {
7144 serge 4878
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 4879
		/*
4880
		 * vdd might still be enabled do to the delayed vdd off.
4881
		 * Make sure vdd is actually turned off here.
4882
		 */
4883
		pps_lock(intel_dp);
5060 serge 4884
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 4885
		pps_unlock(intel_dp);
4886
 
6937 serge 4887
		if (intel_dp->edp_notifier.notifier_call) {
4888
			intel_dp->edp_notifier.notifier_call = NULL;
4889
		}
2342 Serge 4890
	}
6084 serge 4891
	drm_encoder_cleanup(encoder);
3243 Serge 4892
	kfree(intel_dig_port);
2330 Serge 4893
}
4894
 
6660 serge 4895
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5060 serge 4896
{
4897
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4898
 
4899
	if (!is_edp(intel_dp))
4900
		return;
4901
 
5354 serge 4902
	/*
4903
	 * vdd might still be enabled do to the delayed vdd off.
4904
	 * Make sure vdd is actually turned off here.
4905
	 */
7144 serge 4906
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 4907
	pps_lock(intel_dp);
5060 serge 4908
	edp_panel_vdd_off_sync(intel_dp);
5354 serge 4909
	pps_unlock(intel_dp);
5060 serge 4910
}
4911
 
5354 serge 4912
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4913
{
4914
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4915
	struct drm_device *dev = intel_dig_port->base.base.dev;
4916
	struct drm_i915_private *dev_priv = dev->dev_private;
4917
	enum intel_display_power_domain power_domain;
4918
 
4919
	lockdep_assert_held(&dev_priv->pps_mutex);
4920
 
4921
	if (!edp_have_panel_vdd(intel_dp))
4922
		return;
4923
 
4924
	/*
4925
	 * The VDD bit needs a power domain reference, so if the bit is
4926
	 * already enabled when we boot or resume, grab this reference and
4927
	 * schedule a vdd off, so we don't hold on to the reference
4928
	 * indefinitely.
4929
	 */
4930
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6084 serge 4931
	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5354 serge 4932
	intel_display_power_get(dev_priv, power_domain);
4933
 
4934
	edp_panel_vdd_schedule_off(intel_dp);
4935
}
4936
 
6660 serge 4937
void intel_dp_encoder_reset(struct drm_encoder *encoder)
5060 serge 4938
{
7144 serge 4939
	struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4940
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5354 serge 4941
 
7144 serge 4942
	if (!HAS_DDI(dev_priv))
4943
		intel_dp->DP = I915_READ(intel_dp->output_reg);
4944
 
5354 serge 4945
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4946
		return;
4947
 
4948
	pps_lock(intel_dp);
4949
 
4950
	/*
4951
	 * Read out the current power sequencer assignment,
4952
	 * in case the BIOS did something with it.
4953
	 */
6937 serge 4954
	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
5354 serge 4955
		vlv_initial_power_sequencer_setup(intel_dp);
4956
 
4957
	intel_edp_panel_vdd_sanitize(intel_dp);
4958
 
4959
	pps_unlock(intel_dp);
5060 serge 4960
}
4961
 
2330 Serge 4962
static const struct drm_connector_funcs intel_dp_connector_funcs = {
6084 serge 4963
	.dpms = drm_atomic_helper_connector_dpms,
2330 Serge 4964
	.detect = intel_dp_detect,
5354 serge 4965
	.force = intel_dp_force,
2330 Serge 4966
	.fill_modes = drm_helper_probe_single_connector_modes,
4967
	.set_property = intel_dp_set_property,
6084 serge 4968
	.atomic_get_property = intel_connector_atomic_get_property,
4104 Serge 4969
	.destroy = intel_dp_connector_destroy,
6084 serge 4970
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4971
	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
2330 Serge 4972
};
4973
 
4974
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4975
	.get_modes = intel_dp_get_modes,
4976
	.mode_valid = intel_dp_mode_valid,
4977
	.best_encoder = intel_best_encoder,
4978
};
4979
 
4980
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5060 serge 4981
	.reset = intel_dp_encoder_reset,
2330 Serge 4982
	.destroy = intel_dp_encoder_destroy,
4983
};
4984
 
6084 serge 4985
enum irqreturn
5060 serge 4986
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4987
{
4988
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4989
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4990
	struct drm_device *dev = intel_dig_port->base.base.dev;
4991
	struct drm_i915_private *dev_priv = dev->dev_private;
4992
	enum intel_display_power_domain power_domain;
6084 serge 4993
	enum irqreturn ret = IRQ_NONE;
5060 serge 4994
 
6084 serge 4995
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4996
	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5060 serge 4997
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4998
 
5354 serge 4999
	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5000
		/*
5001
		 * vdd off can generate a long pulse on eDP which
5002
		 * would require vdd on to handle it, and thus we
5003
		 * would end up in an endless cycle of
5004
		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5005
		 */
5006
		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5007
			      port_name(intel_dig_port->port));
6084 serge 5008
		return IRQ_HANDLED;
5354 serge 5009
	}
5010
 
5011
	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5012
		      port_name(intel_dig_port->port),
5060 serge 5013
		      long_hpd ? "long" : "short");
5014
 
6084 serge 5015
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 5016
	intel_display_power_get(dev_priv, power_domain);
5017
 
5018
	if (long_hpd) {
6084 serge 5019
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5060 serge 5020
			goto mst_fail;
5021
 
5022
		if (!intel_dp_get_dpcd(intel_dp)) {
5023
			goto mst_fail;
5024
		}
5025
 
5026
		intel_dp_probe_oui(intel_dp);
5027
 
6084 serge 5028
		if (!intel_dp_probe_mst(intel_dp)) {
5029
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5030
			intel_dp_check_link_status(intel_dp);
5031
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5060 serge 5032
			goto mst_fail;
6084 serge 5033
		}
5060 serge 5034
	} else {
5035
		if (intel_dp->is_mst) {
5036
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5037
				goto mst_fail;
5038
		}
5039
 
5040
		if (!intel_dp->is_mst) {
5041
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5042
			intel_dp_check_link_status(intel_dp);
5043
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5044
		}
5045
	}
6084 serge 5046
 
5047
	ret = IRQ_HANDLED;
5048
 
5060 serge 5049
	goto put_power;
5050
mst_fail:
5051
	/* if we were in MST mode, and device is not there get out of MST mode */
5052
	if (intel_dp->is_mst) {
5053
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5054
		intel_dp->is_mst = false;
5055
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5056
	}
5057
put_power:
5058
	intel_display_power_put(dev_priv, power_domain);
5059
 
5060
	return ret;
2330 Serge 5061
}
5062
 
6084 serge 5063
/* check the VBT to see whether the eDP is on another port */
4560 Serge 5064
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
2330 Serge 5065
{
5066
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 5067
	union child_device_config *p_child;
2330 Serge 5068
	int i;
4560 Serge 5069
	static const short port_mapping[] = {
6084 serge 5070
		[PORT_B] = DVO_PORT_DPB,
5071
		[PORT_C] = DVO_PORT_DPC,
5072
		[PORT_D] = DVO_PORT_DPD,
5073
		[PORT_E] = DVO_PORT_DPE,
4560 Serge 5074
	};
2330 Serge 5075
 
6084 serge 5076
	/*
5077
	 * eDP not supported on g4x. so bail out early just
5078
	 * for a bit extra safety in case the VBT is bonkers.
5079
	 */
5080
	if (INTEL_INFO(dev)->gen < 5)
5081
		return false;
5082
 
4560 Serge 5083
	if (port == PORT_A)
5084
		return true;
5085
 
4104 Serge 5086
	if (!dev_priv->vbt.child_dev_num)
2330 Serge 5087
		return false;
5088
 
4104 Serge 5089
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5090
		p_child = dev_priv->vbt.child_dev + i;
2330 Serge 5091
 
4560 Serge 5092
		if (p_child->common.dvo_port == port_mapping[port] &&
5093
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5094
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
2330 Serge 5095
			return true;
5096
	}
5097
	return false;
5098
}
5099
 
5060 serge 5100
void
2330 Serge 5101
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5102
{
3243 Serge 5103
	struct intel_connector *intel_connector = to_intel_connector(connector);
5104
 
2330 Serge 5105
	intel_attach_force_audio_property(connector);
5106
	intel_attach_broadcast_rgb_property(connector);
3480 Serge 5107
	intel_dp->color_range_auto = true;
3243 Serge 5108
 
5109
	if (is_edp(intel_dp)) {
5110
		drm_mode_create_scaling_mode_property(connector->dev);
5111
		drm_object_attach_property(
5112
			&connector->base,
5113
			connector->dev->mode_config.scaling_mode_property,
5114
			DRM_MODE_SCALE_ASPECT);
5115
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5116
	}
2330 Serge 5117
}
5118
 
5060 serge 5119
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5120
{
7144 serge 5121
	intel_dp->panel_power_off_time = ktime_get();
5060 serge 5122
	intel_dp->last_power_on = jiffies;
5123
	intel_dp->last_backlight_off = jiffies;
5124
}
5125
 
3243 Serge 5126
static void
5127
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 5128
				    struct intel_dp *intel_dp)
3243 Serge 5129
{
5130
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 5131
	struct edp_power_seq cur, vbt, spec,
5132
		*final = &intel_dp->pps_delays;
6084 serge 5133
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
6937 serge 5134
	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3243 Serge 5135
 
5354 serge 5136
	lockdep_assert_held(&dev_priv->pps_mutex);
5137
 
5138
	/* already initialized? */
5139
	if (final->t11_t12 != 0)
5140
		return;
5141
 
6084 serge 5142
	if (IS_BROXTON(dev)) {
5143
		/*
5144
		 * TODO: BXT has 2 sets of PPS registers.
5145
		 * Correct Register for Broxton need to be identified
5146
		 * using VBT. hardcoding for now
5147
		 */
5148
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5149
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5150
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5151
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 5152
		pp_ctrl_reg = PCH_PP_CONTROL;
3746 Serge 5153
		pp_on_reg = PCH_PP_ON_DELAYS;
5154
		pp_off_reg = PCH_PP_OFF_DELAYS;
5155
		pp_div_reg = PCH_PP_DIVISOR;
5156
	} else {
4560 Serge 5157
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5158
 
5159
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5160
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5161
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5162
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5163
	}
5164
 
3243 Serge 5165
	/* Workaround: Need to write PP_CONTROL with the unlock key as
5166
	 * the very first thing. */
6084 serge 5167
	pp_ctl = ironlake_get_pp_control(intel_dp);
3243 Serge 5168
 
3746 Serge 5169
	pp_on = I915_READ(pp_on_reg);
5170
	pp_off = I915_READ(pp_off_reg);
6084 serge 5171
	if (!IS_BROXTON(dev)) {
5172
		I915_WRITE(pp_ctrl_reg, pp_ctl);
5173
		pp_div = I915_READ(pp_div_reg);
5174
	}
3243 Serge 5175
 
5176
	/* Pull timing values out of registers */
5177
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5178
		PANEL_POWER_UP_DELAY_SHIFT;
5179
 
5180
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5181
		PANEL_LIGHT_ON_DELAY_SHIFT;
5182
 
5183
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5184
		PANEL_LIGHT_OFF_DELAY_SHIFT;
5185
 
5186
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5187
		PANEL_POWER_DOWN_DELAY_SHIFT;
5188
 
6084 serge 5189
	if (IS_BROXTON(dev)) {
5190
		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5191
			BXT_POWER_CYCLE_DELAY_SHIFT;
5192
		if (tmp > 0)
5193
			cur.t11_t12 = (tmp - 1) * 1000;
5194
		else
5195
			cur.t11_t12 = 0;
5196
	} else {
5197
		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3243 Serge 5198
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
6084 serge 5199
	}
3243 Serge 5200
 
5201
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5202
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5203
 
4104 Serge 5204
	vbt = dev_priv->vbt.edp_pps;
3243 Serge 5205
 
5206
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5207
	 * our hw here, which are all in 100usec. */
5208
	spec.t1_t3 = 210 * 10;
5209
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5210
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5211
	spec.t10 = 500 * 10;
5212
	/* This one is special and actually in units of 100ms, but zero
5213
	 * based in the hw (so we need to add 100 ms). But the sw vbt
5214
	 * table multiplies it with 1000 to make it in units of 100usec,
5215
	 * too. */
5216
	spec.t11_t12 = (510 + 100) * 10;
5217
 
5218
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5219
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5220
 
5221
	/* Use the max of the register settings and vbt. If both are
5222
	 * unset, fall back to the spec limits. */
5354 serge 5223
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
3243 Serge 5224
				       spec.field : \
5225
				       max(cur.field, vbt.field))
5226
	assign_final(t1_t3);
5227
	assign_final(t8);
5228
	assign_final(t9);
5229
	assign_final(t10);
5230
	assign_final(t11_t12);
5231
#undef assign_final
5232
 
5354 serge 5233
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
3243 Serge 5234
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5235
	intel_dp->backlight_on_delay = get_delay(t8);
5236
	intel_dp->backlight_off_delay = get_delay(t9);
5237
	intel_dp->panel_power_down_delay = get_delay(t10);
5238
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5239
#undef get_delay
5240
 
5241
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5242
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5243
		      intel_dp->panel_power_cycle_delay);
5244
 
5245
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5246
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5247
}
5248
 
5249
static void
5250
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 5251
					      struct intel_dp *intel_dp)
3243 Serge 5252
{
5253
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5254
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5255
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
6937 serge 5256
	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5354 serge 5257
	enum port port = dp_to_dig_port(intel_dp)->port;
5258
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
3243 Serge 5259
 
5354 serge 5260
	lockdep_assert_held(&dev_priv->pps_mutex);
5261
 
6084 serge 5262
	if (IS_BROXTON(dev)) {
5263
		/*
5264
		 * TODO: BXT has 2 sets of PPS registers.
5265
		 * Correct Register for Broxton need to be identified
5266
		 * using VBT. hardcoding for now
5267
		 */
5268
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5269
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5270
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5271
 
5272
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 5273
		pp_on_reg = PCH_PP_ON_DELAYS;
5274
		pp_off_reg = PCH_PP_OFF_DELAYS;
5275
		pp_div_reg = PCH_PP_DIVISOR;
5276
	} else {
4560 Serge 5277
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5278
 
5279
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5280
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5281
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5282
	}
5283
 
5060 serge 5284
	/*
5285
	 * And finally store the new values in the power sequencer. The
5286
	 * backlight delays are set to 1 because we do manual waits on them. For
5287
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5288
	 * we'll end up waiting for the backlight off delay twice: once when we
5289
	 * do the manual sleep, and once when we disable the panel and wait for
5290
	 * the PP_STATUS bit to become zero.
5291
	 */
3243 Serge 5292
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5060 serge 5293
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5294
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3243 Serge 5295
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5296
	/* Compute the divisor for the pp clock, simply match the Bspec
5297
	 * formula. */
6084 serge 5298
	if (IS_BROXTON(dev)) {
5299
		pp_div = I915_READ(pp_ctrl_reg);
5300
		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5301
		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5302
				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5303
	} else {
5304
		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5305
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5306
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5307
	}
3243 Serge 5308
 
5309
	/* Haswell doesn't have any port selection bits for the panel
5310
	 * power sequencer any more. */
6937 serge 5311
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 5312
		port_sel = PANEL_PORT_SELECT_VLV(port);
4104 Serge 5313
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5354 serge 5314
		if (port == PORT_A)
4560 Serge 5315
			port_sel = PANEL_PORT_SELECT_DPA;
3243 Serge 5316
		else
4560 Serge 5317
			port_sel = PANEL_PORT_SELECT_DPD;
3243 Serge 5318
	}
5319
 
3746 Serge 5320
	pp_on |= port_sel;
3243 Serge 5321
 
3746 Serge 5322
	I915_WRITE(pp_on_reg, pp_on);
5323
	I915_WRITE(pp_off_reg, pp_off);
6084 serge 5324
	if (IS_BROXTON(dev))
5325
		I915_WRITE(pp_ctrl_reg, pp_div);
5326
	else
5327
		I915_WRITE(pp_div_reg, pp_div);
3746 Serge 5328
 
3243 Serge 5329
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3746 Serge 5330
		      I915_READ(pp_on_reg),
5331
		      I915_READ(pp_off_reg),
6084 serge 5332
		      IS_BROXTON(dev) ?
5333
		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
3746 Serge 5334
		      I915_READ(pp_div_reg));
3243 Serge 5335
}
5336
 
6084 serge 5337
/**
5338
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5339
 * @dev: DRM device
5340
 * @refresh_rate: RR to be programmed
5341
 *
5342
 * This function gets called when refresh rate (RR) has to be changed from
5343
 * one frequency to another. Switches can be between high and low RR
5344
 * supported by the panel or to any other RR based on media playback (in
5345
 * this case, RR value needs to be passed from user space).
5346
 *
5347
 * The caller of this function needs to take a lock on dev_priv->drrs.
5348
 */
5349
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5060 serge 5350
{
5351
	struct drm_i915_private *dev_priv = dev->dev_private;
5352
	struct intel_encoder *encoder;
6084 serge 5353
	struct intel_digital_port *dig_port = NULL;
5354
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5355
	struct intel_crtc_state *config = NULL;
5060 serge 5356
	struct intel_crtc *intel_crtc = NULL;
6084 serge 5357
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5060 serge 5358
 
5359
	if (refresh_rate <= 0) {
5360
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5361
		return;
5362
	}
5363
 
6084 serge 5364
	if (intel_dp == NULL) {
5365
		DRM_DEBUG_KMS("DRRS not supported.\n");
5060 serge 5366
		return;
5367
	}
5368
 
5369
	/*
6084 serge 5370
	 * FIXME: This needs proper synchronization with psr state for some
5371
	 * platforms that cannot have PSR and DRRS enabled at the same time.
5060 serge 5372
	 */
5373
 
6084 serge 5374
	dig_port = dp_to_dig_port(intel_dp);
5375
	encoder = &dig_port->base;
5376
	intel_crtc = to_intel_crtc(encoder->base.crtc);
5060 serge 5377
 
5378
	if (!intel_crtc) {
5379
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5380
		return;
5381
	}
5382
 
6084 serge 5383
	config = intel_crtc->config;
5060 serge 5384
 
6084 serge 5385
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5060 serge 5386
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5387
		return;
5388
	}
5389
 
6084 serge 5390
	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5391
			refresh_rate)
5060 serge 5392
		index = DRRS_LOW_RR;
5393
 
6084 serge 5394
	if (index == dev_priv->drrs.refresh_rate_type) {
5060 serge 5395
		DRM_DEBUG_KMS(
5396
			"DRRS requested for previously set RR...ignoring\n");
5397
		return;
5398
	}
5399
 
5400
	if (!intel_crtc->active) {
5401
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5402
		return;
5403
	}
5404
 
6084 serge 5405
	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5406
		switch (index) {
5407
		case DRRS_HIGH_RR:
5408
			intel_dp_set_m_n(intel_crtc, M1_N1);
5409
			break;
5410
		case DRRS_LOW_RR:
5411
			intel_dp_set_m_n(intel_crtc, M2_N2);
5412
			break;
5413
		case DRRS_MAX_RR:
5414
		default:
5415
			DRM_ERROR("Unsupported refreshrate type\n");
5416
		}
5417
	} else if (INTEL_INFO(dev)->gen > 6) {
6937 serge 5418
		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
6084 serge 5419
		u32 val;
5420
 
5060 serge 5421
		val = I915_READ(reg);
5422
		if (index > DRRS_HIGH_RR) {
6937 serge 5423
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 5424
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5425
			else
5426
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5427
		} else {
6937 serge 5428
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 5429
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5430
			else
5431
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5432
		}
5433
		I915_WRITE(reg, val);
5434
	}
5435
 
6084 serge 5436
	dev_priv->drrs.refresh_rate_type = index;
5437
 
5438
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5439
}
5440
 
5441
/**
5442
 * intel_edp_drrs_enable - init drrs struct if supported
5443
 * @intel_dp: DP struct
5444
 *
5445
 * Initializes frontbuffer_bits and drrs.dp
5446
 */
5447
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5448
{
5449
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5450
	struct drm_i915_private *dev_priv = dev->dev_private;
5451
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5452
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5453
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5454
 
5455
	if (!intel_crtc->config->has_drrs) {
5456
		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5457
		return;
5458
	}
5459
 
5460
	mutex_lock(&dev_priv->drrs.mutex);
5461
	if (WARN_ON(dev_priv->drrs.dp)) {
5462
		DRM_ERROR("DRRS already enabled\n");
5463
		goto unlock;
5464
	}
5465
 
5466
	dev_priv->drrs.busy_frontbuffer_bits = 0;
5467
 
5468
	dev_priv->drrs.dp = intel_dp;
5469
 
5470
unlock:
5471
	mutex_unlock(&dev_priv->drrs.mutex);
5472
}
5473
 
5474
/**
5475
 * intel_edp_drrs_disable - Disable DRRS
5476
 * @intel_dp: DP struct
5477
 *
5478
 */
5479
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5480
{
5481
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5482
	struct drm_i915_private *dev_priv = dev->dev_private;
5483
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5484
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5485
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5486
 
5487
	if (!intel_crtc->config->has_drrs)
5488
		return;
5489
 
5490
	mutex_lock(&dev_priv->drrs.mutex);
5491
	if (!dev_priv->drrs.dp) {
5492
		mutex_unlock(&dev_priv->drrs.mutex);
5493
		return;
5494
	}
5495
 
5496
	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5497
		intel_dp_set_drrs_state(dev_priv->dev,
5498
			intel_dp->attached_connector->panel.
5499
			fixed_mode->vrefresh);
5500
 
5501
	dev_priv->drrs.dp = NULL;
5502
	mutex_unlock(&dev_priv->drrs.mutex);
5503
 
7144 serge 5504
	cancel_delayed_work_sync(&dev_priv->drrs.work);
6084 serge 5505
}
5506
 
5507
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5508
{
5509
	struct drm_i915_private *dev_priv =
5510
		container_of(work, typeof(*dev_priv), drrs.work.work);
5511
	struct intel_dp *intel_dp;
5512
 
5513
	mutex_lock(&dev_priv->drrs.mutex);
5514
 
5515
	intel_dp = dev_priv->drrs.dp;
5516
 
5517
	if (!intel_dp)
5518
		goto unlock;
5519
 
5060 serge 5520
	/*
6084 serge 5521
	 * The delayed work can race with an invalidate hence we need to
5522
	 * recheck.
5060 serge 5523
	 */
5524
 
6084 serge 5525
	if (dev_priv->drrs.busy_frontbuffer_bits)
5526
		goto unlock;
5060 serge 5527
 
6084 serge 5528
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5529
		intel_dp_set_drrs_state(dev_priv->dev,
5530
			intel_dp->attached_connector->panel.
5531
			downclock_mode->vrefresh);
5060 serge 5532
 
6084 serge 5533
unlock:
5534
	mutex_unlock(&dev_priv->drrs.mutex);
5535
}
5060 serge 5536
 
6084 serge 5537
/**
5538
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5539
 * @dev: DRM device
5540
 * @frontbuffer_bits: frontbuffer plane tracking bits
5541
 *
5542
 * This function gets called everytime rendering on the given planes start.
5543
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5544
 *
5545
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5546
 */
5547
void intel_edp_drrs_invalidate(struct drm_device *dev,
5548
		unsigned frontbuffer_bits)
5549
{
5550
	struct drm_i915_private *dev_priv = dev->dev_private;
5551
	struct drm_crtc *crtc;
5552
	enum pipe pipe;
5553
 
5554
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5555
		return;
5556
 
7144 serge 5557
	cancel_delayed_work(&dev_priv->drrs.work);
6084 serge 5558
 
5559
	mutex_lock(&dev_priv->drrs.mutex);
5560
	if (!dev_priv->drrs.dp) {
5561
		mutex_unlock(&dev_priv->drrs.mutex);
5562
		return;
5563
	}
5564
 
5565
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5566
	pipe = to_intel_crtc(crtc)->pipe;
5567
 
5568
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5569
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5570
 
5571
	/* invalidate means busy screen hence upclock */
5572
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5573
		intel_dp_set_drrs_state(dev_priv->dev,
5574
				dev_priv->drrs.dp->attached_connector->panel.
5575
				fixed_mode->vrefresh);
5576
 
5577
	mutex_unlock(&dev_priv->drrs.mutex);
5060 serge 5578
}
5579
 
6084 serge 5580
/**
5581
 * intel_edp_drrs_flush - Restart Idleness DRRS
5582
 * @dev: DRM device
5583
 * @frontbuffer_bits: frontbuffer plane tracking bits
5584
 *
5585
 * This function gets called every time rendering on the given planes has
5586
 * completed or flip on a crtc is completed. So DRRS should be upclocked
5587
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5588
 * if no other planes are dirty.
5589
 *
5590
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5591
 */
5592
void intel_edp_drrs_flush(struct drm_device *dev,
5593
		unsigned frontbuffer_bits)
5594
{
5595
	struct drm_i915_private *dev_priv = dev->dev_private;
5596
	struct drm_crtc *crtc;
5597
	enum pipe pipe;
5598
 
5599
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5600
		return;
5601
 
7144 serge 5602
	cancel_delayed_work(&dev_priv->drrs.work);
6084 serge 5603
 
5604
	mutex_lock(&dev_priv->drrs.mutex);
5605
	if (!dev_priv->drrs.dp) {
5606
		mutex_unlock(&dev_priv->drrs.mutex);
5607
		return;
5608
	}
5609
 
5610
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5611
	pipe = to_intel_crtc(crtc)->pipe;
5612
 
5613
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5614
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5615
 
5616
	/* flush means busy screen hence upclock */
5617
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5618
		intel_dp_set_drrs_state(dev_priv->dev,
5619
				dev_priv->drrs.dp->attached_connector->panel.
5620
				fixed_mode->vrefresh);
5621
 
6320 serge 5622
	/*
5623
	 * flush also means no more activity hence schedule downclock, if all
5624
	 * other fbs are quiescent too
5625
	 */
5626
	if (!dev_priv->drrs.busy_frontbuffer_bits)
5627
		schedule_delayed_work(&dev_priv->drrs.work,
5628
				msecs_to_jiffies(1000));
6084 serge 5629
	mutex_unlock(&dev_priv->drrs.mutex);
5630
}
5631
 
5632
/**
5633
 * DOC: Display Refresh Rate Switching (DRRS)
5634
 *
5635
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5636
 * which enables swtching between low and high refresh rates,
5637
 * dynamically, based on the usage scenario. This feature is applicable
5638
 * for internal panels.
5639
 *
5640
 * Indication that the panel supports DRRS is given by the panel EDID, which
5641
 * would list multiple refresh rates for one resolution.
5642
 *
5643
 * DRRS is of 2 types - static and seamless.
5644
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5645
 * (may appear as a blink on screen) and is used in dock-undock scenario.
5646
 * Seamless DRRS involves changing RR without any visual effect to the user
5647
 * and can be used during normal system usage. This is done by programming
5648
 * certain registers.
5649
 *
5650
 * Support for static/seamless DRRS may be indicated in the VBT based on
5651
 * inputs from the panel spec.
5652
 *
5653
 * DRRS saves power by switching to low RR based on usage scenarios.
5654
 *
5655
 * eDP DRRS:-
5656
 *        The implementation is based on frontbuffer tracking implementation.
5657
 * When there is a disturbance on the screen triggered by user activity or a
5658
 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5659
 * When there is no movement on screen, after a timeout of 1 second, a switch
5660
 * to low RR is made.
5661
 *        For integration with frontbuffer tracking code,
5662
 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5663
 *
5664
 * DRRS can be further extended to support other internal panels and also
5665
 * the scenario of video playback wherein RR is set based on the rate
5666
 * requested by userspace.
5667
 */
5668
 
5669
/**
5670
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5671
 * @intel_connector: eDP connector
5672
 * @fixed_mode: preferred mode of panel
5673
 *
5674
 * This function is  called only once at driver load to initialize basic
5675
 * DRRS stuff.
5676
 *
5677
 * Returns:
5678
 * Downclock mode if panel supports it, else return NULL.
5679
 * DRRS support is determined by the presence of downclock mode (apart
5680
 * from VBT setting).
5681
 */
5060 serge 5682
static struct drm_display_mode *
6084 serge 5683
intel_dp_drrs_init(struct intel_connector *intel_connector,
5684
		struct drm_display_mode *fixed_mode)
5060 serge 5685
{
5686
	struct drm_connector *connector = &intel_connector->base;
6084 serge 5687
	struct drm_device *dev = connector->dev;
5060 serge 5688
	struct drm_i915_private *dev_priv = dev->dev_private;
5689
	struct drm_display_mode *downclock_mode = NULL;
5690
 
6084 serge 5691
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5692
	mutex_init(&dev_priv->drrs.mutex);
5693
 
5060 serge 5694
	if (INTEL_INFO(dev)->gen <= 6) {
5695
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5696
		return NULL;
5697
	}
5698
 
5699
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5354 serge 5700
		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5060 serge 5701
		return NULL;
5702
	}
5703
 
5704
	downclock_mode = intel_find_panel_downclock
5705
					(dev, fixed_mode, connector);
5706
 
5707
	if (!downclock_mode) {
6084 serge 5708
		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5060 serge 5709
		return NULL;
5710
	}
5711
 
6084 serge 5712
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5060 serge 5713
 
6084 serge 5714
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5354 serge 5715
	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5060 serge 5716
	return downclock_mode;
5717
}
5718
 
4104 Serge 5719
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5354 serge 5720
				     struct intel_connector *intel_connector)
4104 Serge 5721
{
5722
	struct drm_connector *connector = &intel_connector->base;
5723
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5060 serge 5724
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5725
	struct drm_device *dev = intel_encoder->base.dev;
4104 Serge 5726
	struct drm_i915_private *dev_priv = dev->dev_private;
5727
	struct drm_display_mode *fixed_mode = NULL;
5060 serge 5728
	struct drm_display_mode *downclock_mode = NULL;
4104 Serge 5729
	bool has_dpcd;
5730
	struct drm_display_mode *scan;
5731
	struct edid *edid;
5354 serge 5732
	enum pipe pipe = INVALID_PIPE;
4104 Serge 5733
 
5734
	if (!is_edp(intel_dp))
5735
		return true;
5736
 
5354 serge 5737
	pps_lock(intel_dp);
5738
	intel_edp_panel_vdd_sanitize(intel_dp);
5739
	pps_unlock(intel_dp);
4104 Serge 5740
 
5741
	/* Cache DPCD and EDID for edp. */
5742
	has_dpcd = intel_dp_get_dpcd(intel_dp);
5743
 
5744
	if (has_dpcd) {
5745
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5746
			dev_priv->no_aux_handshake =
5747
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5748
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5749
	} else {
5750
		/* if this fails, presume the device is a ghost */
5751
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5752
		return false;
5753
	}
5754
 
5755
	/* We now know it's not a ghost, init power sequence regs. */
5354 serge 5756
	pps_lock(intel_dp);
5757
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5758
	pps_unlock(intel_dp);
4104 Serge 5759
 
5060 serge 5760
	mutex_lock(&dev->mode_config.mutex);
5761
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4104 Serge 5762
	if (edid) {
5763
		if (drm_add_edid_modes(connector, edid)) {
5764
			drm_mode_connector_update_edid_property(connector,
5765
								edid);
5766
			drm_edid_to_eld(connector, edid);
5767
		} else {
5768
			kfree(edid);
5769
			edid = ERR_PTR(-EINVAL);
5770
		}
5771
	} else {
5772
		edid = ERR_PTR(-ENOENT);
5773
	}
5774
	intel_connector->edid = edid;
5775
 
5776
	/* prefer fixed mode from EDID if available */
5777
	list_for_each_entry(scan, &connector->probed_modes, head) {
5778
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5779
			fixed_mode = drm_mode_duplicate(dev, scan);
5060 serge 5780
			downclock_mode = intel_dp_drrs_init(
5781
						intel_connector, fixed_mode);
4104 Serge 5782
			break;
5783
		}
5784
	}
5785
 
5786
	/* fallback to VBT if available for eDP */
5787
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5788
		fixed_mode = drm_mode_duplicate(dev,
5789
					dev_priv->vbt.lfp_lvds_vbt_mode);
5790
		if (fixed_mode)
5791
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5792
	}
5060 serge 5793
	mutex_unlock(&dev->mode_config.mutex);
4104 Serge 5794
 
6937 serge 5795
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5796
//		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5797
//		register_reboot_notifier(&intel_dp->edp_notifier);
5354 serge 5798
 
5799
		/*
5800
		 * Figure out the current pipe for the initial backlight setup.
5801
		 * If the current pipe isn't valid, try the PPS pipe, and if that
5802
		 * fails just assume pipe A.
5803
		 */
5804
		if (IS_CHERRYVIEW(dev))
5805
			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5806
		else
5807
			pipe = PORT_TO_PIPE(intel_dp->DP);
5808
 
5809
		if (pipe != PIPE_A && pipe != PIPE_B)
5810
			pipe = intel_dp->pps_pipe;
5811
 
5812
		if (pipe != PIPE_A && pipe != PIPE_B)
5813
			pipe = PIPE_A;
5814
 
5815
		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5816
			      pipe_name(pipe));
5817
	}
5818
 
5060 serge 5819
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6084 serge 5820
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5354 serge 5821
	intel_panel_setup_backlight(connector, pipe);
4104 Serge 5822
 
5823
	return true;
5824
}
5825
 
5826
bool
3243 Serge 5827
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5828
			struct intel_connector *intel_connector)
2330 Serge 5829
{
3243 Serge 5830
	struct drm_connector *connector = &intel_connector->base;
5831
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5832
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5833
	struct drm_device *dev = intel_encoder->base.dev;
2330 Serge 5834
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5835
	enum port port = intel_dig_port->port;
6937 serge 5836
	int type, ret;
2330 Serge 5837
 
7144 serge 5838
	if (WARN(intel_dig_port->max_lanes < 1,
5839
		 "Not enough lanes (%d) for DP on port %c\n",
5840
		 intel_dig_port->max_lanes, port_name(port)))
5841
		return false;
5842
 
5354 serge 5843
	intel_dp->pps_pipe = INVALID_PIPE;
5844
 
5060 serge 5845
	/* intel_dp vfuncs */
5354 serge 5846
	if (INTEL_INFO(dev)->gen >= 9)
5847
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6937 serge 5848
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5060 serge 5849
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5850
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5851
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5852
	else if (HAS_PCH_SPLIT(dev))
5853
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5854
	else
5855
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5856
 
5354 serge 5857
	if (INTEL_INFO(dev)->gen >= 9)
5858
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5859
	else
6084 serge 5860
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5060 serge 5861
 
6937 serge 5862
	if (HAS_DDI(dev))
5863
		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5864
 
3031 serge 5865
	/* Preserve the current hw state. */
5866
	intel_dp->DP = I915_READ(intel_dp->output_reg);
3243 Serge 5867
	intel_dp->attached_connector = intel_connector;
2330 Serge 5868
 
4560 Serge 5869
	if (intel_dp_is_edp(dev, port))
5870
		type = DRM_MODE_CONNECTOR_eDP;
5871
	else
6084 serge 5872
		type = DRM_MODE_CONNECTOR_DisplayPort;
2330 Serge 5873
 
4104 Serge 5874
	/*
5875
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5876
	 * for DP the encoder type can be set by the caller to
5877
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5878
	 */
5879
	if (type == DRM_MODE_CONNECTOR_eDP)
5880
		intel_encoder->type = INTEL_OUTPUT_EDP;
5881
 
5354 serge 5882
	/* eDP only on port B and/or C on vlv/chv */
6937 serge 5883
	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5884
		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5354 serge 5885
		return false;
5886
 
4104 Serge 5887
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5888
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5889
			port_name(port));
5890
 
2330 Serge 5891
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5892
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5893
 
5894
	connector->interlace_allowed = true;
5895
	connector->doublescan_allowed = 0;
5896
 
3243 Serge 5897
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5060 serge 5898
			  edp_panel_vdd_work);
2330 Serge 5899
 
5900
	intel_connector_attach_encoder(intel_connector, intel_encoder);
5060 serge 5901
	drm_connector_register(connector);
2330 Serge 5902
 
3480 Serge 5903
	if (HAS_DDI(dev))
3243 Serge 5904
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5905
	else
6084 serge 5906
		intel_connector->get_hw_state = intel_connector_get_hw_state;
5060 serge 5907
	intel_connector->unregister = intel_dp_connector_unregister;
3031 serge 5908
 
5060 serge 5909
	/* Set up the hotplug pin. */
3031 serge 5910
	switch (port) {
5911
	case PORT_A:
3746 Serge 5912
		intel_encoder->hpd_pin = HPD_PORT_A;
6084 serge 5913
		break;
3031 serge 5914
	case PORT_B:
3746 Serge 5915
		intel_encoder->hpd_pin = HPD_PORT_B;
6937 serge 5916
		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6084 serge 5917
			intel_encoder->hpd_pin = HPD_PORT_A;
5918
		break;
3031 serge 5919
	case PORT_C:
3746 Serge 5920
		intel_encoder->hpd_pin = HPD_PORT_C;
6084 serge 5921
		break;
3031 serge 5922
	case PORT_D:
3746 Serge 5923
		intel_encoder->hpd_pin = HPD_PORT_D;
6084 serge 5924
		break;
5925
	case PORT_E:
5926
		intel_encoder->hpd_pin = HPD_PORT_E;
5927
		break;
3031 serge 5928
	default:
3746 Serge 5929
		BUG();
2330 Serge 5930
	}
5931
 
5060 serge 5932
	if (is_edp(intel_dp)) {
5354 serge 5933
		pps_lock(intel_dp);
5060 serge 5934
		intel_dp_init_panel_power_timestamps(intel_dp);
6937 serge 5935
		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 5936
			vlv_initial_power_sequencer_setup(intel_dp);
5937
		else
5938
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5939
		pps_unlock(intel_dp);
5060 serge 5940
	}
2330 Serge 5941
 
6937 serge 5942
	ret = intel_dp_aux_init(intel_dp, intel_connector);
5943
	if (ret)
5944
		goto fail;
3031 serge 5945
 
5060 serge 5946
	/* init MST on ports that can support it */
6084 serge 5947
	if (HAS_DP_MST(dev) &&
5948
	    (port == PORT_B || port == PORT_C || port == PORT_D))
5949
		intel_dp_mst_encoder_init(intel_dig_port,
5950
					  intel_connector->base.base.id);
5060 serge 5951
 
5354 serge 5952
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6937 serge 5953
		intel_dp_aux_fini(intel_dp);
5954
		intel_dp_mst_encoder_cleanup(intel_dig_port);
5955
		goto fail;
2330 Serge 5956
	}
5957
 
5958
	intel_dp_add_properties(intel_dp, connector);
5959
 
5960
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5961
	 * 0xd.  Failure to do so will result in spurious interrupts being
5962
	 * generated on the port when a cable is not attached.
5963
	 */
5964
	if (IS_G4X(dev) && !IS_GM45(dev)) {
5965
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5966
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5967
	}
4104 Serge 5968
 
6296 serge 5969
	i915_debugfs_connector_add(connector);
5970
 
4104 Serge 5971
	return true;
6937 serge 5972
 
5973
fail:
5974
	if (is_edp(intel_dp)) {
7144 serge 5975
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6937 serge 5976
		/*
5977
		 * vdd might still be enabled do to the delayed vdd off.
5978
		 * Make sure vdd is actually turned off here.
5979
		 */
5980
		pps_lock(intel_dp);
5981
		edp_panel_vdd_off_sync(intel_dp);
5982
		pps_unlock(intel_dp);
7144 serge 5983
	}
6937 serge 5984
	drm_connector_unregister(connector);
5985
	drm_connector_cleanup(connector);
3243 Serge 5986
 
6937 serge 5987
	return false;
5988
}
5989
 
5990
void
5991
intel_dp_init(struct drm_device *dev,
5992
	      i915_reg_t output_reg, enum port port)
3243 Serge 5993
{
5060 serge 5994
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5995
	struct intel_digital_port *intel_dig_port;
5996
	struct intel_encoder *intel_encoder;
5997
	struct drm_encoder *encoder;
5998
	struct intel_connector *intel_connector;
5999
 
4560 Serge 6000
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3243 Serge 6001
	if (!intel_dig_port)
6937 serge 6002
		return;
3243 Serge 6003
 
6084 serge 6004
	intel_connector = intel_connector_alloc();
6005
	if (!intel_connector)
6006
		goto err_connector_alloc;
3243 Serge 6007
 
6008
	intel_encoder = &intel_dig_port->base;
6009
	encoder = &intel_encoder->base;
6010
 
6937 serge 6011
	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6012
			     DRM_MODE_ENCODER_TMDS, NULL))
6013
		goto err_encoder_init;
3243 Serge 6014
 
3746 Serge 6015
	intel_encoder->compute_config = intel_dp_compute_config;
3243 Serge 6016
	intel_encoder->disable = intel_disable_dp;
6017
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4104 Serge 6018
	intel_encoder->get_config = intel_dp_get_config;
5060 serge 6019
	intel_encoder->suspend = intel_dp_encoder_suspend;
6020
	if (IS_CHERRYVIEW(dev)) {
6021
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6022
		intel_encoder->pre_enable = chv_pre_enable_dp;
6023
		intel_encoder->enable = vlv_enable_dp;
6024
		intel_encoder->post_disable = chv_post_disable_dp;
6084 serge 6025
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5060 serge 6026
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 6027
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4104 Serge 6028
		intel_encoder->pre_enable = vlv_pre_enable_dp;
6029
		intel_encoder->enable = vlv_enable_dp;
5060 serge 6030
		intel_encoder->post_disable = vlv_post_disable_dp;
4104 Serge 6031
	} else {
4560 Serge 6032
		intel_encoder->pre_enable = g4x_pre_enable_dp;
6033
		intel_encoder->enable = g4x_enable_dp;
5354 serge 6034
		if (INTEL_INFO(dev)->gen >= 5)
6035
			intel_encoder->post_disable = ilk_post_disable_dp;
4104 Serge 6036
	}
3243 Serge 6037
 
6038
	intel_dig_port->port = port;
6039
	intel_dig_port->dp.output_reg = output_reg;
7144 serge 6040
	intel_dig_port->max_lanes = 4;
3243 Serge 6041
 
6042
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5060 serge 6043
	if (IS_CHERRYVIEW(dev)) {
6044
		if (port == PORT_D)
6045
			intel_encoder->crtc_mask = 1 << 2;
6046
		else
6047
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6048
	} else {
6084 serge 6049
		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5060 serge 6050
	}
6051
	intel_encoder->cloneable = 0;
3243 Serge 6052
 
5060 serge 6053
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6084 serge 6054
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5060 serge 6055
 
6084 serge 6056
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6057
		goto err_init_connector;
6058
 
6937 serge 6059
	return;
6084 serge 6060
 
6061
err_init_connector:
6062
	drm_encoder_cleanup(encoder);
6937 serge 6063
err_encoder_init:
6084 serge 6064
	kfree(intel_connector);
6065
err_connector_alloc:
6066
	kfree(intel_dig_port);
6937 serge 6067
 
6068
	return;
3243 Serge 6069
}
5060 serge 6070
 
6071
void intel_dp_mst_suspend(struct drm_device *dev)
6072
{
6073
	struct drm_i915_private *dev_priv = dev->dev_private;
6074
	int i;
6075
 
6076
	/* disable MST */
6077
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6078
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6079
		if (!intel_dig_port)
6080
			continue;
6081
 
6082
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6083
			if (!intel_dig_port->dp.can_mst)
6084
				continue;
6085
			if (intel_dig_port->dp.is_mst)
6086
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6087
		}
6088
	}
6089
}
6090
 
6091
void intel_dp_mst_resume(struct drm_device *dev)
6092
{
6093
	struct drm_i915_private *dev_priv = dev->dev_private;
6094
	int i;
6095
 
6096
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6097
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6098
		if (!intel_dig_port)
6099
			continue;
6100
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6101
			int ret;
6102
 
6103
			if (!intel_dig_port->dp.can_mst)
6104
				continue;
6105
 
6106
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6107
			if (ret != 0) {
6108
				intel_dp_check_mst_status(&intel_dig_port->dp);
6109
			}
6110
		}
6111
	}
6112
}