Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6296 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
31
#include 
6084 serge 32
#include 
3031 serge 33
#include 
34
#include 
35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
39
 
5060 serge 40
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
2327 Serge 41
 
6084 serge 42
/* Compliance test status bits  */
43
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
44
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
45
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
46
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47
 
4560 Serge 48
struct dp_link_dpll {
6084 serge 49
	int clock;
4560 Serge 50
	struct dpll dpll;
51
};
52
 
53
static const struct dp_link_dpll gen4_dpll[] = {
6084 serge 54
	{ 162000,
4560 Serge 55
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
6084 serge 56
	{ 270000,
4560 Serge 57
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
58
};
59
 
60
static const struct dp_link_dpll pch_dpll[] = {
6084 serge 61
	{ 162000,
4560 Serge 62
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
6084 serge 63
	{ 270000,
4560 Serge 64
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
65
};
66
 
67
static const struct dp_link_dpll vlv_dpll[] = {
6084 serge 68
	{ 162000,
4560 Serge 69
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
6084 serge 70
	{ 270000,
4560 Serge 71
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
72
};
73
 
5060 serge 74
/*
75
 * CHV supports eDP 1.4 that have  more link rates.
76
 * Below only provides the fixed rate but exclude variable rate.
77
 */
78
static const struct dp_link_dpll chv_dpll[] = {
79
	/*
80
	 * CHV requires to program fractional division for m2.
81
	 * m2 is stored in fixed point format using formula below
82
	 * (m2_int << 22) | m2_fraction
83
	 */
6084 serge 84
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
5060 serge 85
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
6084 serge 86
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 87
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
6084 serge 88
	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 89
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
90
};
91
 
6084 serge 92
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
93
				  324000, 432000, 540000 };
94
static const int skl_rates[] = { 162000, 216000, 270000,
95
				  324000, 432000, 540000 };
96
static const int default_rates[] = { 162000, 270000, 540000 };
97
 
2327 Serge 98
/**
99
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100
 * @intel_dp: DP struct
101
 *
102
 * If a CPU or PCH DP output is attached to an eDP panel, this function
103
 * will return true, and false otherwise.
104
 */
105
static bool is_edp(struct intel_dp *intel_dp)
106
{
3243 Serge 107
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
108
 
109
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
2327 Serge 110
}
111
 
3243 Serge 112
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
2327 Serge 113
{
3243 Serge 114
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
115
 
116
	return intel_dig_port->base.base.dev;
2327 Serge 117
}
118
 
2330 Serge 119
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
120
{
3243 Serge 121
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2330 Serge 122
}
2327 Serge 123
 
2330 Serge 124
static void intel_dp_link_down(struct intel_dp *intel_dp);
5354 serge 125
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
5060 serge 126
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
5354 serge 127
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
128
static void vlv_steal_power_sequencer(struct drm_device *dev,
129
				      enum pipe pipe);
2330 Serge 130
 
6084 serge 131
static unsigned int intel_dp_unused_lane_mask(int lane_count)
2330 Serge 132
{
6084 serge 133
	return ~((1 << lane_count) - 1) & 0xf;
134
}
135
 
136
static int
137
intel_dp_max_link_bw(struct intel_dp  *intel_dp)
138
{
2330 Serge 139
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 140
 
2330 Serge 141
	switch (max_link_bw) {
142
	case DP_LINK_BW_1_62:
143
	case DP_LINK_BW_2_7:
6084 serge 144
	case DP_LINK_BW_5_4:
2330 Serge 145
		break;
146
	default:
4104 Serge 147
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148
		     max_link_bw);
2330 Serge 149
		max_link_bw = DP_LINK_BW_1_62;
150
		break;
151
	}
152
	return max_link_bw;
153
}
2327 Serge 154
 
5060 serge 155
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156
{
157
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158
	struct drm_device *dev = intel_dig_port->base.base.dev;
159
	u8 source_max, sink_max;
160
 
161
	source_max = 4;
162
	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163
	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164
		source_max = 2;
165
 
166
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
 
168
	return min(source_max, sink_max);
169
}
170
 
2342 Serge 171
/*
172
 * The units on the numbers in the next two are... bizarre.  Examples will
173
 * make it clearer; this one parallels an example in the eDP spec.
174
 *
175
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176
 *
177
 *     270000 * 1 * 8 / 10 == 216000
178
 *
179
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
181
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182
 * 119000.  At 18bpp that's 2142000 kilobits per second.
183
 *
184
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185
 * get the result in decakilobits instead of kilobits.
186
 */
187
 
2330 Serge 188
static int
2351 Serge 189
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 190
{
2342 Serge 191
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 192
}
2327 Serge 193
 
2330 Serge 194
static int
195
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196
{
197
	return (max_link_clock * max_lanes * 8) / 10;
198
}
2327 Serge 199
 
4560 Serge 200
static enum drm_mode_status
2330 Serge 201
intel_dp_mode_valid(struct drm_connector *connector,
202
		    struct drm_display_mode *mode)
203
{
204
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 205
	struct intel_connector *intel_connector = to_intel_connector(connector);
206
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
3746 Serge 207
	int target_clock = mode->clock;
208
	int max_rate, mode_rate, max_lanes, max_link_clock;
2327 Serge 209
 
3243 Serge 210
	if (is_edp(intel_dp) && fixed_mode) {
211
		if (mode->hdisplay > fixed_mode->hdisplay)
2330 Serge 212
			return MODE_PANEL;
2327 Serge 213
 
3243 Serge 214
		if (mode->vdisplay > fixed_mode->vdisplay)
2330 Serge 215
			return MODE_PANEL;
3746 Serge 216
 
217
		target_clock = fixed_mode->clock;
2330 Serge 218
	}
2327 Serge 219
 
6084 serge 220
	max_link_clock = intel_dp_max_link_rate(intel_dp);
5060 serge 221
	max_lanes = intel_dp_max_lane_count(intel_dp);
3746 Serge 222
 
223
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224
	mode_rate = intel_dp_link_required(target_clock, 18);
225
 
226
	if (mode_rate > max_rate)
2330 Serge 227
		return MODE_CLOCK_HIGH;
2327 Serge 228
 
2330 Serge 229
	if (mode->clock < 10000)
230
		return MODE_CLOCK_LOW;
231
 
3031 serge 232
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233
		return MODE_H_ILLEGAL;
234
 
2330 Serge 235
	return MODE_OK;
236
}
237
 
5354 serge 238
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
2330 Serge 239
{
240
	int	i;
241
	uint32_t v = 0;
242
 
243
	if (src_bytes > 4)
244
		src_bytes = 4;
245
	for (i = 0; i < src_bytes; i++)
246
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
247
	return v;
248
}
249
 
6084 serge 250
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
2330 Serge 251
{
252
	int i;
253
	if (dst_bytes > 4)
254
		dst_bytes = 4;
255
	for (i = 0; i < dst_bytes; i++)
256
		dst[i] = src >> ((3-i) * 8);
257
}
258
 
4560 Serge 259
static void
260
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 261
				    struct intel_dp *intel_dp);
4560 Serge 262
static void
263
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 264
					      struct intel_dp *intel_dp);
4560 Serge 265
 
5354 serge 266
static void pps_lock(struct intel_dp *intel_dp)
267
{
268
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
269
	struct intel_encoder *encoder = &intel_dig_port->base;
270
	struct drm_device *dev = encoder->base.dev;
271
	struct drm_i915_private *dev_priv = dev->dev_private;
272
	enum intel_display_power_domain power_domain;
273
 
274
	/*
275
	 * See vlv_power_sequencer_reset() why we need
276
	 * a power domain reference here.
277
	 */
6084 serge 278
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 279
	intel_display_power_get(dev_priv, power_domain);
280
 
281
	mutex_lock(&dev_priv->pps_mutex);
282
}
283
 
284
static void pps_unlock(struct intel_dp *intel_dp)
285
{
286
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
287
	struct intel_encoder *encoder = &intel_dig_port->base;
288
	struct drm_device *dev = encoder->base.dev;
289
	struct drm_i915_private *dev_priv = dev->dev_private;
290
	enum intel_display_power_domain power_domain;
291
 
292
	mutex_unlock(&dev_priv->pps_mutex);
293
 
6084 serge 294
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 295
	intel_display_power_put(dev_priv, power_domain);
296
}
297
 
298
static void
299
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
300
{
301
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
302
	struct drm_device *dev = intel_dig_port->base.base.dev;
303
	struct drm_i915_private *dev_priv = dev->dev_private;
304
	enum pipe pipe = intel_dp->pps_pipe;
6084 serge 305
	bool pll_enabled, release_cl_override = false;
306
	enum dpio_phy phy = DPIO_PHY(pipe);
307
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
5354 serge 308
	uint32_t DP;
309
 
310
	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
311
		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
312
		 pipe_name(pipe), port_name(intel_dig_port->port)))
313
		return;
314
 
315
	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
316
		      pipe_name(pipe), port_name(intel_dig_port->port));
317
 
318
	/* Preserve the BIOS-computed detected bit. This is
319
	 * supposed to be read-only.
320
	 */
321
	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
322
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
323
	DP |= DP_PORT_WIDTH(1);
324
	DP |= DP_LINK_TRAIN_PAT_1;
325
 
326
	if (IS_CHERRYVIEW(dev))
327
		DP |= DP_PIPE_SELECT_CHV(pipe);
328
	else if (pipe == PIPE_B)
329
		DP |= DP_PIPEB_SELECT;
330
 
331
	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
332
 
333
	/*
334
	 * The DPLL for the pipe must be enabled for this to work.
335
	 * So enable temporarily it if it's not already enabled.
336
	 */
6084 serge 337
	if (!pll_enabled) {
338
		release_cl_override = IS_CHERRYVIEW(dev) &&
339
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
340
 
5354 serge 341
		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
342
				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
6084 serge 343
	}
5354 serge 344
 
345
	/*
346
	 * Similar magic as in intel_dp_enable_port().
347
	 * We _must_ do this port enable + disable trick
348
	 * to make this power seqeuencer lock onto the port.
349
	 * Otherwise even VDD force bit won't work.
350
	 */
351
	I915_WRITE(intel_dp->output_reg, DP);
352
	POSTING_READ(intel_dp->output_reg);
353
 
354
	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
355
	POSTING_READ(intel_dp->output_reg);
356
 
357
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
358
	POSTING_READ(intel_dp->output_reg);
359
 
6084 serge 360
	if (!pll_enabled) {
5354 serge 361
		vlv_force_pll_off(dev, pipe);
6084 serge 362
 
363
		if (release_cl_override)
364
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
365
	}
5354 serge 366
}
367
 
4560 Serge 368
static enum pipe
369
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
370
{
371
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
372
	struct drm_device *dev = intel_dig_port->base.base.dev;
373
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 374
	struct intel_encoder *encoder;
375
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
4560 Serge 376
	enum pipe pipe;
377
 
5354 serge 378
	lockdep_assert_held(&dev_priv->pps_mutex);
4560 Serge 379
 
5354 serge 380
	/* We should never land here with regular DP ports */
381
	WARN_ON(!is_edp(intel_dp));
382
 
383
	if (intel_dp->pps_pipe != INVALID_PIPE)
384
		return intel_dp->pps_pipe;
385
 
386
	/*
387
	 * We don't have power sequencer currently.
388
	 * Pick one that's not used by other ports.
389
	 */
390
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
391
			    base.head) {
392
		struct intel_dp *tmp;
393
 
394
		if (encoder->type != INTEL_OUTPUT_EDP)
395
			continue;
396
 
397
		tmp = enc_to_intel_dp(&encoder->base);
398
 
399
		if (tmp->pps_pipe != INVALID_PIPE)
400
			pipes &= ~(1 << tmp->pps_pipe);
401
	}
402
 
403
	/*
404
	 * Didn't find one. This should not happen since there
405
	 * are two power sequencers and up to two eDP ports.
406
	 */
407
	if (WARN_ON(pipes == 0))
408
		pipe = PIPE_A;
409
	else
410
		pipe = ffs(pipes) - 1;
411
 
412
	vlv_steal_power_sequencer(dev, pipe);
413
	intel_dp->pps_pipe = pipe;
414
 
415
	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416
		      pipe_name(intel_dp->pps_pipe),
417
		      port_name(intel_dig_port->port));
418
 
419
	/* init power sequencer on this pipe and port */
420
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
421
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
422
 
423
	/*
424
	 * Even vdd force doesn't work until we've made
425
	 * the power sequencer lock in on the port.
426
	 */
427
	vlv_power_sequencer_kick(intel_dp);
428
 
429
	return intel_dp->pps_pipe;
430
}
431
 
432
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
433
			       enum pipe pipe);
434
 
435
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
436
			       enum pipe pipe)
437
{
438
	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
439
}
440
 
441
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
442
				enum pipe pipe)
443
{
444
	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
445
}
446
 
447
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
448
			 enum pipe pipe)
449
{
450
	return true;
451
}
452
 
453
static enum pipe
454
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
455
		     enum port port,
456
		     vlv_pipe_check pipe_check)
457
{
458
	enum pipe pipe;
459
 
4560 Serge 460
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462
			PANEL_PORT_SELECT_MASK;
5354 serge 463
 
464
		if (port_sel != PANEL_PORT_SELECT_VLV(port))
465
			continue;
466
 
467
		if (!pipe_check(dev_priv, pipe))
468
			continue;
469
 
6084 serge 470
		return pipe;
4560 Serge 471
	}
472
 
5354 serge 473
	return INVALID_PIPE;
4560 Serge 474
}
475
 
5354 serge 476
static void
477
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
478
{
479
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480
	struct drm_device *dev = intel_dig_port->base.base.dev;
481
	struct drm_i915_private *dev_priv = dev->dev_private;
482
	enum port port = intel_dig_port->port;
483
 
484
	lockdep_assert_held(&dev_priv->pps_mutex);
485
 
486
	/* try to find a pipe with this port selected */
487
	/* first pick one where the panel is on */
488
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489
						  vlv_pipe_has_pp_on);
490
	/* didn't find one? pick one where vdd is on */
491
	if (intel_dp->pps_pipe == INVALID_PIPE)
492
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493
							  vlv_pipe_has_vdd_on);
494
	/* didn't find one? pick one with just the correct port */
495
	if (intel_dp->pps_pipe == INVALID_PIPE)
496
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497
							  vlv_pipe_any);
498
 
499
	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500
	if (intel_dp->pps_pipe == INVALID_PIPE) {
501
		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
502
			      port_name(port));
503
		return;
504
	}
505
 
506
	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507
		      port_name(port), pipe_name(intel_dp->pps_pipe));
508
 
509
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
510
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
511
}
512
 
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514
{
515
	struct drm_device *dev = dev_priv->dev;
516
	struct intel_encoder *encoder;
517
 
518
	if (WARN_ON(!IS_VALLEYVIEW(dev)))
519
		return;
520
 
521
	/*
522
	 * We can't grab pps_mutex here due to deadlock with power_domain
523
	 * mutex when power_domain functions are called while holding pps_mutex.
524
	 * That also means that in order to use pps_pipe the code needs to
525
	 * hold both a power domain reference and pps_mutex, and the power domain
526
	 * reference get/put must be done while _not_ holding pps_mutex.
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
528
	 * should use them always.
529
	 */
530
 
531
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
532
		struct intel_dp *intel_dp;
533
 
534
		if (encoder->type != INTEL_OUTPUT_EDP)
535
			continue;
536
 
537
		intel_dp = enc_to_intel_dp(&encoder->base);
538
		intel_dp->pps_pipe = INVALID_PIPE;
539
	}
540
}
541
 
4560 Serge 542
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
543
{
544
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
545
 
6084 serge 546
	if (IS_BROXTON(dev))
547
		return BXT_PP_CONTROL(0);
548
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 549
		return PCH_PP_CONTROL;
550
	else
551
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
552
}
553
 
554
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
555
{
556
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
557
 
6084 serge 558
	if (IS_BROXTON(dev))
559
		return BXT_PP_STATUS(0);
560
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 561
		return PCH_PP_STATUS;
562
	else
563
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
564
}
565
 
5354 serge 566
#if 0
567
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
568
   This function only applicable when panel PM state is not to be tracked */
569
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
570
			      void *unused)
571
{
572
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
573
						 edp_notifier);
574
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
575
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 576
 
5354 serge 577
	if (!is_edp(intel_dp) || code != SYS_RESTART)
578
		return 0;
579
 
580
	pps_lock(intel_dp);
581
 
582
	if (IS_VALLEYVIEW(dev)) {
583
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
6084 serge 584
		u32 pp_ctrl_reg, pp_div_reg;
585
		u32 pp_div;
5354 serge 586
 
587
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
588
		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
589
		pp_div = I915_READ(pp_div_reg);
590
		pp_div &= PP_REFERENCE_DIVIDER_MASK;
591
 
592
		/* 0x1F write to PP_DIV_REG sets max cycle delay */
593
		I915_WRITE(pp_div_reg, pp_div | 0x1F);
594
		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
595
		msleep(intel_dp->panel_power_cycle_delay);
596
	}
597
 
598
	pps_unlock(intel_dp);
599
 
600
	return 0;
601
}
602
#endif
603
 
5060 serge 604
static bool edp_have_panel_power(struct intel_dp *intel_dp)
2342 Serge 605
{
3243 Serge 606
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 607
	struct drm_i915_private *dev_priv = dev->dev_private;
608
 
5354 serge 609
	lockdep_assert_held(&dev_priv->pps_mutex);
610
 
611
	if (IS_VALLEYVIEW(dev) &&
612
	    intel_dp->pps_pipe == INVALID_PIPE)
613
		return false;
614
 
4560 Serge 615
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
2342 Serge 616
}
617
 
5060 serge 618
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
2342 Serge 619
{
3243 Serge 620
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 621
	struct drm_i915_private *dev_priv = dev->dev_private;
622
 
5354 serge 623
	lockdep_assert_held(&dev_priv->pps_mutex);
624
 
625
	if (IS_VALLEYVIEW(dev) &&
626
	    intel_dp->pps_pipe == INVALID_PIPE)
627
		return false;
628
 
629
	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
2342 Serge 630
}
631
 
632
static void
633
intel_dp_check_edp(struct intel_dp *intel_dp)
634
{
3243 Serge 635
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 636
	struct drm_i915_private *dev_priv = dev->dev_private;
637
 
638
	if (!is_edp(intel_dp))
639
		return;
3746 Serge 640
 
5060 serge 641
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
2342 Serge 642
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
643
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
4560 Serge 644
			      I915_READ(_pp_stat_reg(intel_dp)),
645
			      I915_READ(_pp_ctrl_reg(intel_dp)));
2342 Serge 646
	}
647
}
648
 
3480 Serge 649
static uint32_t
650
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651
{
652
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653
	struct drm_device *dev = intel_dig_port->base.base.dev;
654
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 655
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
3480 Serge 656
	uint32_t status;
657
	bool done;
658
 
659
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660
	if (has_aux_irq)
661
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
4104 Serge 662
					  msecs_to_jiffies_timeout(10));
3480 Serge 663
	else
664
		done = wait_for_atomic(C, 10) == 0;
665
	if (!done)
666
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667
			  has_aux_irq);
668
#undef C
669
 
670
	return status;
671
}
672
 
5060 serge 673
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
4104 Serge 674
{
675
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676
	struct drm_device *dev = intel_dig_port->base.base.dev;
677
 
5060 serge 678
	/*
679
	 * The clock divider is based off the hrawclk, and would like to run at
680
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
4104 Serge 681
	 */
5060 serge 682
	return index ? 0 : intel_hrawclk(dev) / 2;
683
}
684
 
685
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686
{
687
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 689
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 690
 
691
	if (index)
692
		return 0;
693
 
694
	if (intel_dig_port->port == PORT_A) {
6084 serge 695
		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
 
5060 serge 697
	} else {
698
		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699
	}
700
}
701
 
702
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703
{
704
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705
	struct drm_device *dev = intel_dig_port->base.base.dev;
706
	struct drm_i915_private *dev_priv = dev->dev_private;
707
 
708
	if (intel_dig_port->port == PORT_A) {
709
		if (index)
710
			return 0;
6084 serge 711
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
4104 Serge 712
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713
		/* Workaround for non-ULT HSW */
714
		switch (index) {
715
		case 0: return 63;
716
		case 1: return 72;
717
		default: return 0;
718
		}
5060 serge 719
	} else  {
4104 Serge 720
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721
	}
722
}
723
 
5060 serge 724
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725
{
726
	return index ? 0 : 100;
727
}
728
 
5354 serge 729
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730
{
731
	/*
732
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
733
	 * derive the clock from CDCLK automatically). We still implement the
734
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
735
	 */
736
	return index ? 0 : 1;
737
}
738
 
5060 serge 739
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740
				      bool has_aux_irq,
741
				      int send_bytes,
742
				      uint32_t aux_clock_divider)
743
{
744
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745
	struct drm_device *dev = intel_dig_port->base.base.dev;
746
	uint32_t precharge, timeout;
747
 
748
	if (IS_GEN6(dev))
749
		precharge = 3;
750
	else
751
		precharge = 5;
752
 
753
	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755
	else
756
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
 
758
	return DP_AUX_CH_CTL_SEND_BUSY |
759
	       DP_AUX_CH_CTL_DONE |
760
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
762
	       timeout |
763
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
764
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767
}
768
 
5354 serge 769
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770
				      bool has_aux_irq,
771
				      int send_bytes,
772
				      uint32_t unused)
773
{
774
	return DP_AUX_CH_CTL_SEND_BUSY |
775
	       DP_AUX_CH_CTL_DONE |
776
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
778
	       DP_AUX_CH_CTL_TIME_OUT_1600us |
779
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
780
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781
	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782
}
783
 
2330 Serge 784
static int
785
intel_dp_aux_ch(struct intel_dp *intel_dp,
5354 serge 786
		const uint8_t *send, int send_bytes,
2330 Serge 787
		uint8_t *recv, int recv_size)
788
{
3243 Serge 789
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 791
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 792
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
2330 Serge 793
	uint32_t ch_data = ch_ctl + 4;
4104 Serge 794
	uint32_t aux_clock_divider;
3480 Serge 795
	int i, ret, recv_bytes;
2330 Serge 796
	uint32_t status;
5060 serge 797
	int try, clock = 0;
798
	bool has_aux_irq = HAS_AUX_IRQ(dev);
799
	bool vdd;
2330 Serge 800
 
5354 serge 801
	pps_lock(intel_dp);
5060 serge 802
 
5354 serge 803
	/*
804
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805
	 * In such cases we want to leave VDD enabled and it's up to upper layers
806
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807
	 * ourselves.
808
	 */
809
	vdd = edp_panel_vdd_on(intel_dp);
810
 
3480 Serge 811
	/* dp aux is extremely sensitive to irq latency, hence request the
812
	 * lowest possible wakeup latency and so prevent the cpu from going into
813
	 * deep sleep states.
814
	 */
815
 
2342 Serge 816
	intel_dp_check_edp(intel_dp);
2330 Serge 817
 
818
	/* Try to wait for any previous AUX channel activity */
819
	for (try = 0; try < 3; try++) {
3480 Serge 820
		status = I915_READ_NOTRACE(ch_ctl);
2330 Serge 821
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
822
			break;
823
		msleep(1);
824
	}
825
 
826
	if (try == 3) {
6084 serge 827
		static u32 last_status = -1;
828
		const u32 status = I915_READ(ch_ctl);
829
 
830
		if (status != last_status) {
831
			WARN(1, "dp_aux_ch not started status 0x%08x\n",
832
			     status);
833
			last_status = status;
834
		}
835
 
3480 Serge 836
		ret = -EBUSY;
837
		goto out;
2330 Serge 838
	}
839
 
4560 Serge 840
	/* Only 5 data registers! */
841
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
842
		ret = -E2BIG;
843
		goto out;
844
	}
845
 
5060 serge 846
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
847
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
848
							  has_aux_irq,
849
							  send_bytes,
850
							  aux_clock_divider);
851
 
6084 serge 852
		/* Must try at least 3 times according to DP spec */
853
		for (try = 0; try < 5; try++) {
854
			/* Load the send data into the aux channel data registers */
855
			for (i = 0; i < send_bytes; i += 4)
856
				I915_WRITE(ch_data + i,
5354 serge 857
					   intel_dp_pack_aux(send + i,
858
							     send_bytes - i));
2330 Serge 859
 
6084 serge 860
			/* Send the command and wait for it to complete */
5060 serge 861
			I915_WRITE(ch_ctl, send_ctl);
2330 Serge 862
 
6084 serge 863
			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
3480 Serge 864
 
6084 serge 865
			/* Clear done status and any errors */
866
			I915_WRITE(ch_ctl,
867
				   status |
868
				   DP_AUX_CH_CTL_DONE |
869
				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
870
				   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 871
 
6084 serge 872
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
873
				continue;
874
 
875
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
876
			 *   400us delay required for errors and timeouts
877
			 *   Timeout errors from the HW already meet this
878
			 *   requirement so skip to next iteration
879
			 */
880
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
881
				usleep_range(400, 500);
882
				continue;
883
			}
884
			if (status & DP_AUX_CH_CTL_DONE)
885
				goto done;
886
		}
2330 Serge 887
	}
888
 
889
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
3480 Serge 891
		ret = -EBUSY;
892
		goto out;
2330 Serge 893
	}
894
 
6084 serge 895
done:
2330 Serge 896
	/* Check for timeout or receive error.
897
	 * Timeouts occur when the sink is not connected
898
	 */
899
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
900
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
3480 Serge 901
		ret = -EIO;
902
		goto out;
2330 Serge 903
	}
904
 
905
	/* Timeouts occur when the device isn't connected, so they're
906
	 * "normal" -- don't fill the kernel log with these */
907
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
908
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
3480 Serge 909
		ret = -ETIMEDOUT;
910
		goto out;
2330 Serge 911
	}
912
 
913
	/* Unload any bytes sent back from the other side */
914
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
915
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
916
	if (recv_bytes > recv_size)
917
		recv_bytes = recv_size;
918
 
919
	for (i = 0; i < recv_bytes; i += 4)
5354 serge 920
		intel_dp_unpack_aux(I915_READ(ch_data + i),
6084 serge 921
				    recv + i, recv_bytes - i);
2330 Serge 922
 
3480 Serge 923
	ret = recv_bytes;
924
out:
925
 
5060 serge 926
	if (vdd)
927
		edp_panel_vdd_off(intel_dp, false);
928
 
5354 serge 929
	pps_unlock(intel_dp);
930
 
3480 Serge 931
	return ret;
2330 Serge 932
}
933
 
5060 serge 934
#define BARE_ADDRESS_SIZE	3
935
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
936
static ssize_t
937
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2330 Serge 938
{
5060 serge 939
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
940
	uint8_t txbuf[20], rxbuf[20];
941
	size_t txsize, rxsize;
2330 Serge 942
	int ret;
943
 
6084 serge 944
	txbuf[0] = (msg->request << 4) |
945
		((msg->address >> 16) & 0xf);
946
	txbuf[1] = (msg->address >> 8) & 0xff;
5060 serge 947
	txbuf[2] = msg->address & 0xff;
948
	txbuf[3] = msg->size - 1;
949
 
950
	switch (msg->request & ~DP_AUX_I2C_MOT) {
951
	case DP_AUX_NATIVE_WRITE:
952
	case DP_AUX_I2C_WRITE:
6084 serge 953
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
5060 serge 954
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
6084 serge 955
		rxsize = 2; /* 0 or 1 data bytes */
5060 serge 956
 
957
		if (WARN_ON(txsize > 20))
6084 serge 958
			return -E2BIG;
4560 Serge 959
 
5060 serge 960
		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961
 
962
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963
		if (ret > 0) {
964
			msg->reply = rxbuf[0] >> 4;
965
 
6084 serge 966
			if (ret > 1) {
967
				/* Number of bytes written in a short write. */
968
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
969
			} else {
970
				/* Return payload size. */
971
				ret = msg->size;
972
			}
5060 serge 973
		}
6084 serge 974
		break;
2330 Serge 975
 
5060 serge 976
	case DP_AUX_NATIVE_READ:
977
	case DP_AUX_I2C_READ:
978
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
979
		rxsize = msg->size + 1;
2330 Serge 980
 
5060 serge 981
		if (WARN_ON(rxsize > 20))
6084 serge 982
			return -E2BIG;
4560 Serge 983
 
5060 serge 984
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985
		if (ret > 0) {
986
			msg->reply = rxbuf[0] >> 4;
987
			/*
988
			 * Assume happy day, and copy the data. The caller is
989
			 * expected to check msg->reply before touching it.
990
			 *
991
			 * Return payload size.
992
			 */
993
			ret--;
994
			memcpy(msg->buffer, rxbuf + 1, ret);
995
		}
996
		break;
2330 Serge 997
 
5060 serge 998
	default:
999
		ret = -EINVAL;
1000
		break;
1001
	}
2330 Serge 1002
 
6084 serge 1003
	return ret;
2330 Serge 1004
}
1005
 
5060 serge 1006
static void
1007
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
2330 Serge 1008
{
5060 serge 1009
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 1010
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1011
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1012
	enum port port = intel_dig_port->port;
6084 serge 1013
	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
5060 serge 1014
	const char *name = NULL;
6084 serge 1015
	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
2330 Serge 1016
	int ret;
1017
 
6084 serge 1018
	/* On SKL we don't have Aux for port E so we rely on VBT to set
1019
	 * a proper alternate aux channel.
1020
	 */
1021
	if (IS_SKYLAKE(dev) && port == PORT_E) {
1022
		switch (info->alternate_aux_channel) {
1023
		case DP_AUX_B:
1024
			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1025
			break;
1026
		case DP_AUX_C:
1027
			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1028
			break;
1029
		case DP_AUX_D:
1030
			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1031
			break;
1032
		case DP_AUX_A:
1033
		default:
1034
			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1035
		}
1036
	}
1037
 
5060 serge 1038
	switch (port) {
1039
	case PORT_A:
1040
		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1041
		name = "DPDDC-A";
2330 Serge 1042
		break;
5060 serge 1043
	case PORT_B:
1044
		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1045
		name = "DPDDC-B";
2330 Serge 1046
		break;
5060 serge 1047
	case PORT_C:
1048
		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1049
		name = "DPDDC-C";
1050
		break;
1051
	case PORT_D:
1052
		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1053
		name = "DPDDC-D";
1054
		break;
6084 serge 1055
	case PORT_E:
1056
		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1057
		name = "DPDDC-E";
1058
		break;
2330 Serge 1059
	default:
5060 serge 1060
		BUG();
2330 Serge 1061
	}
1062
 
5354 serge 1063
	/*
1064
	 * The AUX_CTL register is usually DP_CTL + 0x10.
1065
	 *
1066
	 * On Haswell and Broadwell though:
1067
	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1068
	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1069
	 *
1070
	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1071
	 */
6084 serge 1072
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
5060 serge 1073
		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
2330 Serge 1074
 
5060 serge 1075
	intel_dp->aux.name = name;
1076
	intel_dp->aux.dev = dev->dev;
1077
	intel_dp->aux.transfer = intel_dp_aux_transfer;
2330 Serge 1078
 
6084 serge 1079
	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1080
					"");
5060 serge 1081
 
1082
	ret = drm_dp_aux_register(&intel_dp->aux);
6084 serge 1083
	if (ret < 0) {
5060 serge 1084
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1085
			  name, ret);
1086
		return;
2330 Serge 1087
	}
6103 serge 1088
 
1089
	ret = sysfs_create_link(&connector->base.kdev->kobj,
1090
				&intel_dp->aux.ddc.dev.kobj,
1091
				intel_dp->aux.ddc.dev.kobj.name);
1092
	if (ret < 0) {
1093
		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1094
		drm_dp_aux_unregister(&intel_dp->aux);
1095
	}
5060 serge 1096
}
2330 Serge 1097
 
5060 serge 1098
static void
1099
intel_dp_connector_unregister(struct intel_connector *intel_connector)
1100
{
1101
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
4560 Serge 1102
 
6103 serge 1103
	if (!intel_connector->mst_port)
1104
		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1105
				  intel_dp->aux.ddc.dev.kobj.name);
5060 serge 1106
	intel_connector_unregister(intel_connector);
2330 Serge 1107
}
1108
 
5060 serge 1109
static void
6084 serge 1110
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5354 serge 1111
{
1112
	u32 ctrl1;
1113
 
6084 serge 1114
	memset(&pipe_config->dpll_hw_state, 0,
1115
	       sizeof(pipe_config->dpll_hw_state));
1116
 
5354 serge 1117
	pipe_config->ddi_pll_sel = SKL_DPLL0;
1118
	pipe_config->dpll_hw_state.cfgcr1 = 0;
1119
	pipe_config->dpll_hw_state.cfgcr2 = 0;
1120
 
1121
	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
6084 serge 1122
	switch (pipe_config->port_clock / 2) {
1123
	case 81000:
1124
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5354 serge 1125
					      SKL_DPLL0);
1126
		break;
6084 serge 1127
	case 135000:
1128
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5354 serge 1129
					      SKL_DPLL0);
1130
		break;
6084 serge 1131
	case 270000:
1132
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5354 serge 1133
					      SKL_DPLL0);
1134
		break;
6084 serge 1135
	case 162000:
1136
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1137
					      SKL_DPLL0);
1138
		break;
1139
	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1140
	results in CDCLK change. Need to handle the change of CDCLK by
1141
	disabling pipes and re-enabling them */
1142
	case 108000:
1143
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1144
					      SKL_DPLL0);
1145
		break;
1146
	case 216000:
1147
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1148
					      SKL_DPLL0);
1149
		break;
1150
 
5354 serge 1151
	}
1152
	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1153
}
1154
 
6084 serge 1155
void
1156
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
2330 Serge 1157
{
6084 serge 1158
	memset(&pipe_config->dpll_hw_state, 0,
1159
	       sizeof(pipe_config->dpll_hw_state));
1160
 
1161
	switch (pipe_config->port_clock / 2) {
1162
	case 81000:
5060 serge 1163
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1164
		break;
6084 serge 1165
	case 135000:
5060 serge 1166
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1167
		break;
6084 serge 1168
	case 270000:
5060 serge 1169
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1170
		break;
1171
	}
2330 Serge 1172
}
1173
 
6084 serge 1174
static int
1175
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1176
{
1177
	if (intel_dp->num_sink_rates) {
1178
		*sink_rates = intel_dp->sink_rates;
1179
		return intel_dp->num_sink_rates;
1180
	}
1181
 
1182
	*sink_rates = default_rates;
1183
 
1184
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1185
}
1186
 
1187
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1188
{
1189
	/* WaDisableHBR2:skl */
1190
	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1191
		return false;
1192
 
1193
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1194
	    (INTEL_INFO(dev)->gen >= 9))
1195
		return true;
1196
	else
1197
		return false;
1198
}
1199
 
1200
static int
1201
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1202
{
1203
	int size;
1204
 
1205
	if (IS_BROXTON(dev)) {
1206
		*source_rates = bxt_rates;
1207
		size = ARRAY_SIZE(bxt_rates);
1208
	} else if (IS_SKYLAKE(dev)) {
1209
		*source_rates = skl_rates;
1210
		size = ARRAY_SIZE(skl_rates);
1211
	} else {
1212
		*source_rates = default_rates;
1213
		size = ARRAY_SIZE(default_rates);
1214
	}
1215
 
1216
	/* This depends on the fact that 5.4 is last value in the array */
1217
	if (!intel_dp_source_supports_hbr2(dev))
1218
		size--;
1219
 
1220
	return size;
1221
}
1222
 
4104 Serge 1223
static void
1224
intel_dp_set_clock(struct intel_encoder *encoder,
6084 serge 1225
		   struct intel_crtc_state *pipe_config)
4104 Serge 1226
{
1227
	struct drm_device *dev = encoder->base.dev;
4560 Serge 1228
	const struct dp_link_dpll *divisor = NULL;
1229
	int i, count = 0;
4104 Serge 1230
 
1231
	if (IS_G4X(dev)) {
4560 Serge 1232
		divisor = gen4_dpll;
1233
		count = ARRAY_SIZE(gen4_dpll);
4104 Serge 1234
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 1235
		divisor = pch_dpll;
1236
		count = ARRAY_SIZE(pch_dpll);
5060 serge 1237
	} else if (IS_CHERRYVIEW(dev)) {
1238
		divisor = chv_dpll;
1239
		count = ARRAY_SIZE(chv_dpll);
4560 Serge 1240
	} else if (IS_VALLEYVIEW(dev)) {
1241
		divisor = vlv_dpll;
1242
		count = ARRAY_SIZE(vlv_dpll);
5060 serge 1243
	}
4560 Serge 1244
 
1245
	if (divisor && count) {
1246
		for (i = 0; i < count; i++) {
6084 serge 1247
			if (pipe_config->port_clock == divisor[i].clock) {
4560 Serge 1248
				pipe_config->dpll = divisor[i].dpll;
5060 serge 1249
				pipe_config->clock_set = true;
4560 Serge 1250
				break;
1251
			}
1252
		}
4104 Serge 1253
	}
1254
}
1255
 
6084 serge 1256
static int intersect_rates(const int *source_rates, int source_len,
1257
			   const int *sink_rates, int sink_len,
1258
			   int *common_rates)
1259
{
1260
	int i = 0, j = 0, k = 0;
1261
 
1262
	while (i < source_len && j < sink_len) {
1263
		if (source_rates[i] == sink_rates[j]) {
1264
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1265
				return k;
1266
			common_rates[k] = source_rates[i];
1267
			++k;
1268
			++i;
1269
			++j;
1270
		} else if (source_rates[i] < sink_rates[j]) {
1271
			++i;
1272
		} else {
1273
			++j;
1274
		}
1275
	}
1276
	return k;
1277
}
1278
 
1279
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1280
				 int *common_rates)
1281
{
1282
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283
	const int *source_rates, *sink_rates;
1284
	int source_len, sink_len;
1285
 
1286
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1287
	source_len = intel_dp_source_rates(dev, &source_rates);
1288
 
1289
	return intersect_rates(source_rates, source_len,
1290
			       sink_rates, sink_len,
1291
			       common_rates);
1292
}
1293
 
1294
static void snprintf_int_array(char *str, size_t len,
1295
			       const int *array, int nelem)
1296
{
1297
	int i;
1298
 
1299
	str[0] = '\0';
1300
 
1301
	for (i = 0; i < nelem; i++) {
1302
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1303
		if (r >= len)
1304
			return;
1305
		str += r;
1306
		len -= r;
1307
	}
1308
}
1309
 
1310
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1311
{
1312
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1313
	const int *source_rates, *sink_rates;
1314
	int source_len, sink_len, common_len;
1315
	int common_rates[DP_MAX_SUPPORTED_RATES];
1316
	char str[128]; /* FIXME: too big for stack? */
1317
 
1318
	if ((drm_debug & DRM_UT_KMS) == 0)
1319
		return;
1320
 
1321
	source_len = intel_dp_source_rates(dev, &source_rates);
1322
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1323
	DRM_DEBUG_KMS("source rates: %s\n", str);
1324
 
1325
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1326
	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1327
	DRM_DEBUG_KMS("sink rates: %s\n", str);
1328
 
1329
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1330
	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1331
	DRM_DEBUG_KMS("common rates: %s\n", str);
1332
}
1333
 
1334
static int rate_to_index(int find, const int *rates)
1335
{
1336
	int i = 0;
1337
 
1338
	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1339
		if (find == rates[i])
1340
			break;
1341
 
1342
	return i;
1343
}
1344
 
1345
int
1346
intel_dp_max_link_rate(struct intel_dp *intel_dp)
1347
{
1348
	int rates[DP_MAX_SUPPORTED_RATES] = {};
1349
	int len;
1350
 
1351
	len = intel_dp_common_rates(intel_dp, rates);
1352
	if (WARN_ON(len <= 0))
1353
		return 162000;
1354
 
1355
	return rates[rate_to_index(0, rates) - 1];
1356
}
1357
 
1358
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1359
{
1360
	return rate_to_index(rate, intel_dp->sink_rates);
1361
}
1362
 
1363
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1364
				  uint8_t *link_bw, uint8_t *rate_select)
1365
{
1366
	if (intel_dp->num_sink_rates) {
1367
		*link_bw = 0;
1368
		*rate_select =
1369
			intel_dp_rate_select(intel_dp, port_clock);
1370
	} else {
1371
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1372
		*rate_select = 0;
1373
	}
1374
}
1375
 
3243 Serge 1376
bool
3746 Serge 1377
intel_dp_compute_config(struct intel_encoder *encoder,
6084 serge 1378
			struct intel_crtc_state *pipe_config)
2330 Serge 1379
{
3746 Serge 1380
	struct drm_device *dev = encoder->base.dev;
1381
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1382
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
3746 Serge 1383
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1384
	enum port port = dp_to_dig_port(intel_dp)->port;
6084 serge 1385
	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
3243 Serge 1386
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2330 Serge 1387
	int lane_count, clock;
5060 serge 1388
	int min_lane_count = 1;
1389
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1390
	/* Conveniently, the link BW constants become indices with a shift...*/
1391
	int min_clock = 0;
6084 serge 1392
	int max_clock;
3031 serge 1393
	int bpp, mode_rate;
4104 Serge 1394
	int link_avail, link_clock;
6084 serge 1395
	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1396
	int common_len;
1397
	uint8_t link_bw, rate_select;
2330 Serge 1398
 
6084 serge 1399
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1400
 
1401
	/* No common link rates between source and sink */
1402
	WARN_ON(common_len <= 0);
1403
 
1404
	max_clock = common_len - 1;
1405
 
4104 Serge 1406
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
3746 Serge 1407
		pipe_config->has_pch_encoder = true;
1408
 
1409
	pipe_config->has_dp_encoder = true;
5354 serge 1410
	pipe_config->has_drrs = false;
6084 serge 1411
	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
3746 Serge 1412
 
3243 Serge 1413
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1414
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1415
				       adjusted_mode);
6084 serge 1416
 
1417
		if (INTEL_INFO(dev)->gen >= 9) {
1418
			int ret;
1419
			ret = skl_update_scaler_crtc(pipe_config);
1420
			if (ret)
1421
				return ret;
1422
		}
1423
 
4104 Serge 1424
		if (!HAS_PCH_SPLIT(dev))
1425
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1426
						 intel_connector->panel.fitting_mode);
1427
		else
1428
			intel_pch_panel_fitting(intel_crtc, pipe_config,
1429
						intel_connector->panel.fitting_mode);
2330 Serge 1430
	}
1431
 
3031 serge 1432
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1433
		return false;
1434
 
1435
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
6084 serge 1436
		      "max bw %d pixel clock %iKHz\n",
1437
		      max_lane_count, common_rates[max_clock],
4560 Serge 1438
		      adjusted_mode->crtc_clock);
3031 serge 1439
 
3746 Serge 1440
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1441
	 * bpc in between. */
4104 Serge 1442
	bpp = pipe_config->pipe_bpp;
5060 serge 1443
	if (is_edp(intel_dp)) {
3746 Serge 1444
 
6084 serge 1445
		/* Get bpp from vbt only for panels that dont have bpp in edid */
1446
		if (intel_connector->base.display_info.bpc == 0 &&
1447
			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1448
			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1449
				      dev_priv->vbt.edp_bpp);
1450
			bpp = dev_priv->vbt.edp_bpp;
1451
		}
1452
 
5354 serge 1453
		/*
1454
		 * Use the maximum clock and number of lanes the eDP panel
1455
		 * advertizes being capable of. The panels are generally
1456
		 * designed to support only a single clock and lane
1457
		 * configuration, and typically these values correspond to the
1458
		 * native resolution of the panel.
1459
		 */
6084 serge 1460
		min_lane_count = max_lane_count;
5354 serge 1461
		min_clock = max_clock;
5060 serge 1462
	}
1463
 
3746 Serge 1464
	for (; bpp >= 6*3; bpp -= 2*3) {
4560 Serge 1465
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1466
						   bpp);
3746 Serge 1467
 
5060 serge 1468
		for (clock = min_clock; clock <= max_clock; clock++) {
6084 serge 1469
			for (lane_count = min_lane_count;
1470
				lane_count <= max_lane_count;
1471
				lane_count <<= 1) {
1472
 
1473
				link_clock = common_rates[clock];
3746 Serge 1474
				link_avail = intel_dp_max_data_rate(link_clock,
1475
								    lane_count);
1476
 
1477
				if (mode_rate <= link_avail) {
1478
					goto found;
1479
				}
1480
			}
1481
		}
1482
	}
1483
 
6084 serge 1484
	return false;
3031 serge 1485
 
3746 Serge 1486
found:
3480 Serge 1487
	if (intel_dp->color_range_auto) {
1488
		/*
1489
		 * See:
1490
		 * CEA-861-E - 5.1 Default Encoding Parameters
1491
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1492
		 */
6084 serge 1493
		pipe_config->limited_color_range =
1494
			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1495
	} else {
1496
		pipe_config->limited_color_range =
1497
			intel_dp->limited_color_range;
3480 Serge 1498
	}
1499
 
6084 serge 1500
	pipe_config->lane_count = lane_count;
3480 Serge 1501
 
3746 Serge 1502
	pipe_config->pipe_bpp = bpp;
6084 serge 1503
	pipe_config->port_clock = common_rates[clock];
3746 Serge 1504
 
6084 serge 1505
	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1506
			      &link_bw, &rate_select);
1507
 
1508
	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1509
		      link_bw, rate_select, pipe_config->lane_count,
4104 Serge 1510
		      pipe_config->port_clock, bpp);
6084 serge 1511
	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1512
		      mode_rate, link_avail);
2330 Serge 1513
 
3746 Serge 1514
	intel_link_compute_m_n(bpp, lane_count,
4560 Serge 1515
			       adjusted_mode->crtc_clock,
1516
			       pipe_config->port_clock,
3746 Serge 1517
			       &pipe_config->dp_m_n);
2330 Serge 1518
 
5060 serge 1519
	if (intel_connector->panel.downclock_mode != NULL &&
6084 serge 1520
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
5354 serge 1521
			pipe_config->has_drrs = true;
5060 serge 1522
			intel_link_compute_m_n(bpp, lane_count,
1523
				intel_connector->panel.downclock_mode->clock,
1524
				pipe_config->port_clock,
1525
				&pipe_config->dp_m2_n2);
1526
	}
1527
 
5354 serge 1528
	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
6084 serge 1529
		skl_edp_set_pll_config(pipe_config);
1530
	else if (IS_BROXTON(dev))
1531
		/* handled in ddi */;
5354 serge 1532
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6084 serge 1533
		hsw_dp_set_ddi_pll_sel(pipe_config);
5060 serge 1534
	else
6084 serge 1535
		intel_dp_set_clock(encoder, pipe_config);
4104 Serge 1536
 
3746 Serge 1537
	return true;
2327 Serge 1538
}
1539
 
4104 Serge 1540
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
3480 Serge 1541
{
4104 Serge 1542
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1543
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1544
	struct drm_device *dev = crtc->base.dev;
3480 Serge 1545
	struct drm_i915_private *dev_priv = dev->dev_private;
1546
	u32 dpa_ctl;
1547
 
6084 serge 1548
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1549
		      crtc->config->port_clock);
3480 Serge 1550
	dpa_ctl = I915_READ(DP_A);
1551
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1552
 
6084 serge 1553
	if (crtc->config->port_clock == 162000) {
3480 Serge 1554
		/* For a long time we've carried around a ILK-DevA w/a for the
1555
		 * 160MHz clock. If we're really unlucky, it's still required.
1556
		 */
1557
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1558
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
4104 Serge 1559
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
3480 Serge 1560
	} else {
1561
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
4104 Serge 1562
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3480 Serge 1563
	}
1564
 
1565
	I915_WRITE(DP_A, dpa_ctl);
1566
 
1567
	POSTING_READ(DP_A);
1568
	udelay(500);
1569
}
1570
 
6084 serge 1571
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1572
			      const struct intel_crtc_state *pipe_config)
1573
{
1574
	intel_dp->link_rate = pipe_config->port_clock;
1575
	intel_dp->lane_count = pipe_config->lane_count;
1576
}
1577
 
5060 serge 1578
static void intel_dp_prepare(struct intel_encoder *encoder)
2330 Serge 1579
{
4104 Serge 1580
	struct drm_device *dev = encoder->base.dev;
2342 Serge 1581
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1582
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1583
	enum port port = dp_to_dig_port(intel_dp)->port;
1584
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6084 serge 1585
	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
2327 Serge 1586
 
6084 serge 1587
	intel_dp_set_link_params(intel_dp, crtc->config);
1588
 
2342 Serge 1589
	/*
1590
	 * There are four kinds of DP registers:
1591
	 *
1592
	 * 	IBX PCH
1593
	 * 	SNB CPU
1594
	 *	IVB CPU
1595
	 * 	CPT PCH
1596
	 *
1597
	 * IBX PCH and CPU are the same for almost everything,
1598
	 * except that the CPU DP PLL is configured in this
1599
	 * register
1600
	 *
1601
	 * CPT PCH is quite different, having many bits moved
1602
	 * to the TRANS_DP_CTL register instead. That
1603
	 * configuration happens (oddly) in ironlake_pch_enable
1604
	 */
2327 Serge 1605
 
2342 Serge 1606
	/* Preserve the BIOS-computed detected bit. This is
1607
	 * supposed to be read-only.
1608
	 */
1609
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 1610
 
2342 Serge 1611
	/* Handle DP bits in common between all three register formats */
1612
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
6084 serge 1613
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
2342 Serge 1614
 
6084 serge 1615
	if (crtc->config->has_audio)
2330 Serge 1616
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2327 Serge 1617
 
2342 Serge 1618
	/* Split out the IBX/CPU vs CPT settings */
1619
 
6084 serge 1620
	if (IS_GEN7(dev) && port == PORT_A) {
2342 Serge 1621
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1622
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1623
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1624
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1625
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1626
 
4560 Serge 1627
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 1628
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1629
 
4104 Serge 1630
		intel_dp->DP |= crtc->pipe << 29;
6084 serge 1631
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1632
		u32 trans_dp;
2342 Serge 1633
 
6084 serge 1634
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1635
 
1636
		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1637
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1638
			trans_dp |= TRANS_DP_ENH_FRAMING;
1639
		else
1640
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1641
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1642
	} else {
1643
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1644
		    crtc->config->limited_color_range)
1645
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1646
 
2342 Serge 1647
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1648
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1649
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1650
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1651
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1652
 
4560 Serge 1653
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
6084 serge 1654
			intel_dp->DP |= DP_ENHANCED_FRAMING;
2342 Serge 1655
 
6084 serge 1656
		if (IS_CHERRYVIEW(dev))
5060 serge 1657
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
6084 serge 1658
		else if (crtc->pipe == PIPE_B)
1659
			intel_dp->DP |= DP_PIPEB_SELECT;
2342 Serge 1660
	}
2330 Serge 1661
}
2327 Serge 1662
 
5060 serge 1663
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1664
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2342 Serge 1665
 
5060 serge 1666
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1667
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2342 Serge 1668
 
5060 serge 1669
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1670
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2342 Serge 1671
 
5060 serge 1672
static void wait_panel_status(struct intel_dp *intel_dp,
2342 Serge 1673
				       u32 mask,
1674
				       u32 value)
1675
{
3243 Serge 1676
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 1677
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 1678
	u32 pp_stat_reg, pp_ctrl_reg;
2342 Serge 1679
 
5354 serge 1680
	lockdep_assert_held(&dev_priv->pps_mutex);
1681
 
4560 Serge 1682
	pp_stat_reg = _pp_stat_reg(intel_dp);
1683
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1684
 
2342 Serge 1685
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
6084 serge 1686
			mask, value,
3746 Serge 1687
			I915_READ(pp_stat_reg),
1688
			I915_READ(pp_ctrl_reg));
2342 Serge 1689
 
3746 Serge 1690
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
2342 Serge 1691
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
3746 Serge 1692
				I915_READ(pp_stat_reg),
1693
				I915_READ(pp_ctrl_reg));
2342 Serge 1694
	}
4560 Serge 1695
 
1696
	DRM_DEBUG_KMS("Wait complete\n");
2342 Serge 1697
}
1698
 
5060 serge 1699
static void wait_panel_on(struct intel_dp *intel_dp)
2342 Serge 1700
{
1701
	DRM_DEBUG_KMS("Wait for panel power on\n");
5060 serge 1702
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2342 Serge 1703
}
1704
 
5060 serge 1705
static void wait_panel_off(struct intel_dp *intel_dp)
2342 Serge 1706
{
1707
	DRM_DEBUG_KMS("Wait for panel power off time\n");
5060 serge 1708
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2342 Serge 1709
}
1710
 
5060 serge 1711
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2342 Serge 1712
{
1713
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
5060 serge 1714
 
1715
	/* When we disable the VDD override bit last we have to do the manual
1716
	 * wait. */
1717
	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1718
				       intel_dp->panel_power_cycle_delay);
1719
 
1720
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2342 Serge 1721
}
1722
 
5060 serge 1723
static void wait_backlight_on(struct intel_dp *intel_dp)
1724
{
1725
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1726
				       intel_dp->backlight_on_delay);
1727
}
2342 Serge 1728
 
5060 serge 1729
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1730
{
1731
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1732
				       intel_dp->backlight_off_delay);
1733
}
1734
 
2342 Serge 1735
/* Read the current pp_control value, unlocking the register if it
1736
 * is locked
1737
 */
1738
 
3746 Serge 1739
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2342 Serge 1740
{
3746 Serge 1741
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1742
	struct drm_i915_private *dev_priv = dev->dev_private;
1743
	u32 control;
2342 Serge 1744
 
5354 serge 1745
	lockdep_assert_held(&dev_priv->pps_mutex);
1746
 
4560 Serge 1747
	control = I915_READ(_pp_ctrl_reg(intel_dp));
6084 serge 1748
	if (!IS_BROXTON(dev)) {
1749
		control &= ~PANEL_UNLOCK_MASK;
1750
		control |= PANEL_UNLOCK_REGS;
1751
	}
2342 Serge 1752
	return control;
1753
}
1754
 
5354 serge 1755
/*
1756
 * Must be paired with edp_panel_vdd_off().
1757
 * Must hold pps_mutex around the whole on/off sequence.
1758
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1759
 */
1760
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1761
{
3243 Serge 1762
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5060 serge 1763
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1764
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2330 Serge 1765
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1766
	enum intel_display_power_domain power_domain;
2330 Serge 1767
	u32 pp;
3746 Serge 1768
	u32 pp_stat_reg, pp_ctrl_reg;
5060 serge 1769
	bool need_to_disable = !intel_dp->want_panel_vdd;
2327 Serge 1770
 
5354 serge 1771
	lockdep_assert_held(&dev_priv->pps_mutex);
1772
 
2342 Serge 1773
	if (!is_edp(intel_dp))
5060 serge 1774
		return false;
2327 Serge 1775
 
6084 serge 1776
	cancel_delayed_work(&intel_dp->panel_vdd_work);
2342 Serge 1777
	intel_dp->want_panel_vdd = true;
1778
 
5060 serge 1779
	if (edp_have_panel_vdd(intel_dp))
1780
		return need_to_disable;
2342 Serge 1781
 
6084 serge 1782
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 1783
	intel_display_power_get(dev_priv, power_domain);
4560 Serge 1784
 
5354 serge 1785
	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1786
		      port_name(intel_dig_port->port));
4560 Serge 1787
 
5060 serge 1788
	if (!edp_have_panel_power(intel_dp))
1789
		wait_panel_power_cycle(intel_dp);
2342 Serge 1790
 
3746 Serge 1791
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1792
	pp |= EDP_FORCE_VDD;
2342 Serge 1793
 
4560 Serge 1794
	pp_stat_reg = _pp_stat_reg(intel_dp);
1795
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1796
 
1797
	I915_WRITE(pp_ctrl_reg, pp);
1798
	POSTING_READ(pp_ctrl_reg);
1799
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1800
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2342 Serge 1801
	/*
1802
	 * If the panel wasn't on, delay before accessing aux channel
1803
	 */
5060 serge 1804
	if (!edp_have_panel_power(intel_dp)) {
5354 serge 1805
		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1806
			      port_name(intel_dig_port->port));
2342 Serge 1807
		msleep(intel_dp->panel_power_up_delay);
1808
	}
5060 serge 1809
 
1810
	return need_to_disable;
2330 Serge 1811
}
2327 Serge 1812
 
5354 serge 1813
/*
1814
 * Must be paired with intel_edp_panel_vdd_off() or
1815
 * intel_edp_panel_off().
1816
 * Nested calls to these functions are not allowed since
1817
 * we drop the lock. Caller must use some higher level
1818
 * locking to prevent nested calls from other threads.
1819
 */
5060 serge 1820
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1821
{
5354 serge 1822
	bool vdd;
5060 serge 1823
 
5354 serge 1824
	if (!is_edp(intel_dp))
1825
		return;
1826
 
1827
	pps_lock(intel_dp);
1828
	vdd = edp_panel_vdd_on(intel_dp);
1829
	pps_unlock(intel_dp);
1830
 
6084 serge 1831
	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
5354 serge 1832
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 1833
}
1834
 
1835
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1836
{
3243 Serge 1837
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1838
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1839
	struct intel_digital_port *intel_dig_port =
1840
		dp_to_dig_port(intel_dp);
1841
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1842
	enum intel_display_power_domain power_domain;
2330 Serge 1843
	u32 pp;
3746 Serge 1844
	u32 pp_stat_reg, pp_ctrl_reg;
2327 Serge 1845
 
5354 serge 1846
	lockdep_assert_held(&dev_priv->pps_mutex);
3480 Serge 1847
 
5354 serge 1848
	WARN_ON(intel_dp->want_panel_vdd);
5060 serge 1849
 
5354 serge 1850
	if (!edp_have_panel_vdd(intel_dp))
1851
		return;
4560 Serge 1852
 
5354 serge 1853
	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1854
		      port_name(intel_dig_port->port));
1855
 
6084 serge 1856
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1857
	pp &= ~EDP_FORCE_VDD;
2327 Serge 1858
 
6084 serge 1859
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1860
	pp_stat_reg = _pp_stat_reg(intel_dp);
3746 Serge 1861
 
6084 serge 1862
	I915_WRITE(pp_ctrl_reg, pp);
1863
	POSTING_READ(pp_ctrl_reg);
3746 Serge 1864
 
2330 Serge 1865
	/* Make sure sequencer is idle before allowing subsequent activity */
6084 serge 1866
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1867
	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
4560 Serge 1868
 
6084 serge 1869
	if ((pp & POWER_TARGET_ON) == 0)
1870
		intel_dp->last_power_cycle = jiffies;
4560 Serge 1871
 
6084 serge 1872
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1873
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 1874
}
2327 Serge 1875
 
5060 serge 1876
static void edp_panel_vdd_work(struct work_struct *__work)
3243 Serge 1877
{
3482 Serge 1878
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1879
						 struct intel_dp, panel_vdd_work);
1880
 
5354 serge 1881
	pps_lock(intel_dp);
1882
	if (!intel_dp->want_panel_vdd)
6084 serge 1883
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 1884
	pps_unlock(intel_dp);
3243 Serge 1885
}
2342 Serge 1886
 
5060 serge 1887
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2330 Serge 1888
{
5060 serge 1889
	unsigned long delay;
1890
 
1891
	/*
1892
	 * Queue the timer to fire a long time from now (relative to the power
1893
	 * down delay) to keep the panel power up across a sequence of
1894
	 * operations.
1895
	 */
1896
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
5367 serge 1897
//   schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
5060 serge 1898
}
1899
 
5354 serge 1900
/*
1901
 * Must be paired with edp_panel_vdd_on().
1902
 * Must hold pps_mutex around the whole on/off sequence.
1903
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1904
 */
5060 serge 1905
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1906
{
5354 serge 1907
	struct drm_i915_private *dev_priv =
1908
		intel_dp_to_dev(intel_dp)->dev_private;
1909
 
1910
	lockdep_assert_held(&dev_priv->pps_mutex);
1911
 
2342 Serge 1912
	if (!is_edp(intel_dp))
1913
		return;
1914
 
6084 serge 1915
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
5354 serge 1916
	     port_name(dp_to_dig_port(intel_dp)->port));
2342 Serge 1917
 
1918
	intel_dp->want_panel_vdd = false;
1919
 
5060 serge 1920
	if (sync)
1921
		edp_panel_vdd_off_sync(intel_dp);
1922
	else
1923
		edp_panel_vdd_schedule_off(intel_dp);
2342 Serge 1924
}
1925
 
5354 serge 1926
static void edp_panel_on(struct intel_dp *intel_dp)
2342 Serge 1927
{
3243 Serge 1928
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1929
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1930
	u32 pp;
3746 Serge 1931
	u32 pp_ctrl_reg;
2327 Serge 1932
 
5354 serge 1933
	lockdep_assert_held(&dev_priv->pps_mutex);
1934
 
2342 Serge 1935
	if (!is_edp(intel_dp))
1936
		return;
2327 Serge 1937
 
5354 serge 1938
	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1939
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 1940
 
5354 serge 1941
	if (WARN(edp_have_panel_power(intel_dp),
1942
		 "eDP port %c panel power already on\n",
1943
		 port_name(dp_to_dig_port(intel_dp)->port)))
2342 Serge 1944
		return;
1945
 
5060 serge 1946
	wait_panel_power_cycle(intel_dp);
2342 Serge 1947
 
4560 Serge 1948
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1949
	pp = ironlake_get_pp_control(intel_dp);
2342 Serge 1950
	if (IS_GEN5(dev)) {
6084 serge 1951
		/* ILK workaround: disable reset around power sequence */
1952
		pp &= ~PANEL_POWER_RESET;
4560 Serge 1953
		I915_WRITE(pp_ctrl_reg, pp);
1954
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1955
	}
2327 Serge 1956
 
2342 Serge 1957
	pp |= POWER_TARGET_ON;
1958
	if (!IS_GEN5(dev))
1959
		pp |= PANEL_POWER_RESET;
1960
 
3746 Serge 1961
	I915_WRITE(pp_ctrl_reg, pp);
1962
	POSTING_READ(pp_ctrl_reg);
1963
 
5060 serge 1964
	wait_panel_on(intel_dp);
1965
	intel_dp->last_power_on = jiffies;
2327 Serge 1966
 
2342 Serge 1967
	if (IS_GEN5(dev)) {
6084 serge 1968
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
4560 Serge 1969
		I915_WRITE(pp_ctrl_reg, pp);
1970
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1971
	}
2330 Serge 1972
}
2327 Serge 1973
 
5354 serge 1974
void intel_edp_panel_on(struct intel_dp *intel_dp)
2330 Serge 1975
{
5354 serge 1976
	if (!is_edp(intel_dp))
1977
		return;
1978
 
1979
	pps_lock(intel_dp);
1980
	edp_panel_on(intel_dp);
1981
	pps_unlock(intel_dp);
1982
}
1983
 
1984
 
1985
static void edp_panel_off(struct intel_dp *intel_dp)
1986
{
5060 serge 1987
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1988
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3243 Serge 1989
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1990
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1991
	enum intel_display_power_domain power_domain;
2342 Serge 1992
	u32 pp;
3746 Serge 1993
	u32 pp_ctrl_reg;
2327 Serge 1994
 
5354 serge 1995
	lockdep_assert_held(&dev_priv->pps_mutex);
1996
 
2342 Serge 1997
	if (!is_edp(intel_dp))
1998
		return;
2327 Serge 1999
 
5354 serge 2000
	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2001
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 2002
 
5354 serge 2003
	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2004
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 2005
 
3746 Serge 2006
	pp = ironlake_get_pp_control(intel_dp);
3031 serge 2007
	/* We need to switch off panel power _and_ force vdd, for otherwise some
2008
	 * panels get very unhappy and cease to work. */
5060 serge 2009
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2010
		EDP_BLC_ENABLE);
2327 Serge 2011
 
4560 Serge 2012
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2013
 
5060 serge 2014
	intel_dp->want_panel_vdd = false;
2015
 
3746 Serge 2016
	I915_WRITE(pp_ctrl_reg, pp);
2017
	POSTING_READ(pp_ctrl_reg);
2018
 
5060 serge 2019
	intel_dp->last_power_cycle = jiffies;
2020
	wait_panel_off(intel_dp);
2021
 
2022
	/* We got a reference when we enabled the VDD. */
6084 serge 2023
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 2024
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 2025
}
2327 Serge 2026
 
5354 serge 2027
void intel_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 2028
{
5354 serge 2029
	if (!is_edp(intel_dp))
2030
		return;
2031
 
2032
	pps_lock(intel_dp);
2033
	edp_panel_off(intel_dp);
2034
	pps_unlock(intel_dp);
2035
}
2036
 
2037
/* Enable backlight in the panel power control. */
2038
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2039
{
3243 Serge 2040
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2041
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2042
	struct drm_i915_private *dev_priv = dev->dev_private;
2043
	u32 pp;
3746 Serge 2044
	u32 pp_ctrl_reg;
2327 Serge 2045
 
2330 Serge 2046
	/*
2047
	 * If we enable the backlight right away following a panel power
2048
	 * on, we may see slight flicker as the panel syncs with the eDP
2049
	 * link.  So delay a bit to make sure the image is solid before
2050
	 * allowing it to appear.
2051
	 */
5060 serge 2052
	wait_backlight_on(intel_dp);
5354 serge 2053
 
2054
	pps_lock(intel_dp);
2055
 
3746 Serge 2056
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2057
	pp |= EDP_BLC_ENABLE;
3243 Serge 2058
 
4560 Serge 2059
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2060
 
2061
	I915_WRITE(pp_ctrl_reg, pp);
2062
	POSTING_READ(pp_ctrl_reg);
5354 serge 2063
 
2064
	pps_unlock(intel_dp);
2330 Serge 2065
}
2327 Serge 2066
 
5354 serge 2067
/* Enable backlight PWM and backlight PP control. */
2068
void intel_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 2069
{
5354 serge 2070
	if (!is_edp(intel_dp))
2071
		return;
2072
 
2073
	DRM_DEBUG_KMS("\n");
2074
 
2075
	intel_panel_enable_backlight(intel_dp->attached_connector);
2076
	_intel_edp_backlight_on(intel_dp);
2077
}
2078
 
2079
/* Disable backlight in the panel power control. */
2080
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2081
{
3243 Serge 2082
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2083
	struct drm_i915_private *dev_priv = dev->dev_private;
2084
	u32 pp;
3746 Serge 2085
	u32 pp_ctrl_reg;
2327 Serge 2086
 
2342 Serge 2087
	if (!is_edp(intel_dp))
2088
		return;
2089
 
5354 serge 2090
	pps_lock(intel_dp);
2091
 
3746 Serge 2092
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2093
	pp &= ~EDP_BLC_ENABLE;
3746 Serge 2094
 
4560 Serge 2095
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2096
 
2097
	I915_WRITE(pp_ctrl_reg, pp);
2098
	POSTING_READ(pp_ctrl_reg);
5354 serge 2099
 
2100
	pps_unlock(intel_dp);
2101
 
5060 serge 2102
	intel_dp->last_backlight_off = jiffies;
2103
	edp_wait_backlight_off(intel_dp);
5354 serge 2104
}
5060 serge 2105
 
5354 serge 2106
/* Disable backlight PP control and backlight PWM. */
2107
void intel_edp_backlight_off(struct intel_dp *intel_dp)
2108
{
2109
	if (!is_edp(intel_dp))
2110
		return;
2111
 
2112
	DRM_DEBUG_KMS("\n");
2113
 
2114
	_intel_edp_backlight_off(intel_dp);
5060 serge 2115
	intel_panel_disable_backlight(intel_dp->attached_connector);
2330 Serge 2116
}
2327 Serge 2117
 
5354 serge 2118
/*
2119
 * Hook for controlling the panel power control backlight through the bl_power
2120
 * sysfs attribute. Take care to handle multiple calls.
2121
 */
2122
static void intel_edp_backlight_power(struct intel_connector *connector,
2123
				      bool enable)
2124
{
2125
	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2126
	bool is_enabled;
2127
 
2128
	pps_lock(intel_dp);
2129
	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2130
	pps_unlock(intel_dp);
2131
 
2132
	if (is_enabled == enable)
2133
		return;
2134
 
2135
	DRM_DEBUG_KMS("panel power control backlight %s\n",
2136
		      enable ? "enable" : "disable");
2137
 
2138
	if (enable)
2139
		_intel_edp_backlight_on(intel_dp);
2140
	else
2141
		_intel_edp_backlight_off(intel_dp);
2142
}
2143
 
3031 serge 2144
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 2145
{
3243 Serge 2146
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2147
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2148
	struct drm_device *dev = crtc->dev;
2330 Serge 2149
	struct drm_i915_private *dev_priv = dev->dev_private;
2150
	u32 dpa_ctl;
2327 Serge 2151
 
3031 serge 2152
	assert_pipe_disabled(dev_priv,
2153
			     to_intel_crtc(crtc)->pipe);
2154
 
2330 Serge 2155
	DRM_DEBUG_KMS("\n");
2156
	dpa_ctl = I915_READ(DP_A);
3031 serge 2157
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2158
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2159
 
2160
	/* We don't adjust intel_dp->DP while tearing down the link, to
2161
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2162
	 * enable bits here to ensure that we don't enable too much. */
2163
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2164
	intel_dp->DP |= DP_PLL_ENABLE;
2165
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2166
	POSTING_READ(DP_A);
2167
	udelay(200);
2168
}
2327 Serge 2169
 
3031 serge 2170
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 2171
{
3243 Serge 2172
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2173
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2174
	struct drm_device *dev = crtc->dev;
2330 Serge 2175
	struct drm_i915_private *dev_priv = dev->dev_private;
2176
	u32 dpa_ctl;
2327 Serge 2177
 
3031 serge 2178
	assert_pipe_disabled(dev_priv,
2179
			     to_intel_crtc(crtc)->pipe);
2180
 
2330 Serge 2181
	dpa_ctl = I915_READ(DP_A);
3031 serge 2182
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2183
	     "dp pll off, should be on\n");
2184
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2185
 
2186
	/* We can't rely on the value tracked for the DP register in
2187
	 * intel_dp->DP because link_down must not change that (otherwise link
2188
	 * re-training will fail. */
2330 Serge 2189
	dpa_ctl &= ~DP_PLL_ENABLE;
2190
	I915_WRITE(DP_A, dpa_ctl);
2191
	POSTING_READ(DP_A);
2192
	udelay(200);
2193
}
2327 Serge 2194
 
2330 Serge 2195
/* If the sink supports it, try to set the power state appropriately */
3243 Serge 2196
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2330 Serge 2197
{
2198
	int ret, i;
2327 Serge 2199
 
2330 Serge 2200
	/* Should have a valid DPCD by this point */
2201
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2202
		return;
2327 Serge 2203
 
2330 Serge 2204
	if (mode != DRM_MODE_DPMS_ON) {
5060 serge 2205
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2206
					 DP_SET_POWER_D3);
2330 Serge 2207
	} else {
2208
		/*
2209
		 * When turning on, we need to retry for 1ms to give the sink
2210
		 * time to wake up.
2211
		 */
2212
		for (i = 0; i < 3; i++) {
5060 serge 2213
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2214
						 DP_SET_POWER_D0);
2330 Serge 2215
			if (ret == 1)
2216
				break;
2217
			msleep(1);
2218
		}
2219
	}
5354 serge 2220
 
2221
	if (ret != 1)
2222
		DRM_DEBUG_KMS("failed to %s sink power state\n",
2223
			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2330 Serge 2224
}
2327 Serge 2225
 
3031 serge 2226
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2227
				  enum pipe *pipe)
2330 Serge 2228
{
3031 serge 2229
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2230
	enum port port = dp_to_dig_port(intel_dp)->port;
3031 serge 2231
	struct drm_device *dev = encoder->base.dev;
2232
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2233
	enum intel_display_power_domain power_domain;
2234
	u32 tmp;
2327 Serge 2235
 
5060 serge 2236
	power_domain = intel_display_port_power_domain(encoder);
5354 serge 2237
	if (!intel_display_power_is_enabled(dev_priv, power_domain))
5060 serge 2238
		return false;
2239
 
2240
	tmp = I915_READ(intel_dp->output_reg);
2241
 
3031 serge 2242
	if (!(tmp & DP_PORT_EN))
2243
		return false;
2342 Serge 2244
 
6084 serge 2245
	if (IS_GEN7(dev) && port == PORT_A) {
3031 serge 2246
		*pipe = PORT_TO_PIPE_CPT(tmp);
6084 serge 2247
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2248
		enum pipe p;
2327 Serge 2249
 
6084 serge 2250
		for_each_pipe(dev_priv, p) {
2251
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2252
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2253
				*pipe = p;
3031 serge 2254
				return true;
2255
			}
2256
		}
3243 Serge 2257
 
2258
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2259
			      intel_dp->output_reg);
6084 serge 2260
	} else if (IS_CHERRYVIEW(dev)) {
2261
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2262
	} else {
2263
		*pipe = PORT_TO_PIPE(tmp);
3031 serge 2264
	}
2265
 
2266
	return true;
2330 Serge 2267
}
2327 Serge 2268
 
4104 Serge 2269
static void intel_dp_get_config(struct intel_encoder *encoder,
6084 serge 2270
				struct intel_crtc_state *pipe_config)
4104 Serge 2271
{
2272
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2273
	u32 tmp, flags = 0;
2274
	struct drm_device *dev = encoder->base.dev;
2275
	struct drm_i915_private *dev_priv = dev->dev_private;
2276
	enum port port = dp_to_dig_port(intel_dp)->port;
2277
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2278
	int dotclock;
4104 Serge 2279
 
5060 serge 2280
	tmp = I915_READ(intel_dp->output_reg);
2281
 
6084 serge 2282
	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2283
 
2284
	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2285
		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2286
 
2287
		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
4104 Serge 2288
			flags |= DRM_MODE_FLAG_PHSYNC;
2289
		else
2290
			flags |= DRM_MODE_FLAG_NHSYNC;
2291
 
6084 serge 2292
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
4104 Serge 2293
			flags |= DRM_MODE_FLAG_PVSYNC;
2294
		else
2295
			flags |= DRM_MODE_FLAG_NVSYNC;
2296
	} else {
6084 serge 2297
		if (tmp & DP_SYNC_HS_HIGH)
4104 Serge 2298
			flags |= DRM_MODE_FLAG_PHSYNC;
2299
		else
2300
			flags |= DRM_MODE_FLAG_NHSYNC;
2301
 
6084 serge 2302
		if (tmp & DP_SYNC_VS_HIGH)
4104 Serge 2303
			flags |= DRM_MODE_FLAG_PVSYNC;
2304
		else
2305
			flags |= DRM_MODE_FLAG_NVSYNC;
2306
	}
2307
 
6084 serge 2308
	pipe_config->base.adjusted_mode.flags |= flags;
4104 Serge 2309
 
5139 serge 2310
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2311
	    tmp & DP_COLOR_RANGE_16_235)
2312
		pipe_config->limited_color_range = true;
2313
 
4560 Serge 2314
	pipe_config->has_dp_encoder = true;
2315
 
6084 serge 2316
	pipe_config->lane_count =
2317
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2318
 
4560 Serge 2319
	intel_dp_get_m_n(crtc, pipe_config);
2320
 
2321
	if (port == PORT_A) {
4104 Serge 2322
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2323
			pipe_config->port_clock = 162000;
2324
		else
2325
			pipe_config->port_clock = 270000;
2326
	}
4280 Serge 2327
 
4560 Serge 2328
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2329
					    &pipe_config->dp_m_n);
2330
 
2331
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2332
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2333
 
6084 serge 2334
	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
4560 Serge 2335
 
4280 Serge 2336
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2337
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2338
		/*
2339
		 * This is a big fat ugly hack.
2340
		 *
2341
		 * Some machines in UEFI boot mode provide us a VBT that has 18
2342
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2343
		 * unknown we fail to light up. Yet the same BIOS boots up with
2344
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2345
		 * max, not what it tells us to use.
2346
		 *
2347
		 * Note: This will still be broken if the eDP panel is not lit
2348
		 * up by the BIOS, and thus we can't get the mode at module
2349
		 * load.
2350
		 */
2351
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2352
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2353
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2354
	}
4104 Serge 2355
}
2356
 
3031 serge 2357
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 2358
{
3031 serge 2359
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2360
	struct drm_device *dev = encoder->base.dev;
5354 serge 2361
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2327 Serge 2362
 
6084 serge 2363
	if (crtc->config->has_audio)
5354 serge 2364
		intel_audio_codec_disable(encoder);
2365
 
6084 serge 2366
	if (HAS_PSR(dev) && !HAS_DDI(dev))
2367
		intel_psr_disable(intel_dp);
2368
 
3031 serge 2369
	/* Make sure the panel is off before trying to change the mode. But also
2370
	 * ensure that we have vdd while we switch off the panel. */
5060 serge 2371
	intel_edp_panel_vdd_on(intel_dp);
2372
	intel_edp_backlight_off(intel_dp);
4560 Serge 2373
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
5060 serge 2374
	intel_edp_panel_off(intel_dp);
2330 Serge 2375
 
5354 serge 2376
	/* disable the port before the pipe on g4x */
2377
	if (INTEL_INFO(dev)->gen < 5)
3031 serge 2378
		intel_dp_link_down(intel_dp);
2379
}
2330 Serge 2380
 
5354 serge 2381
static void ilk_post_disable_dp(struct intel_encoder *encoder)
3031 serge 2382
{
2383
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2384
	enum port port = dp_to_dig_port(intel_dp)->port;
5060 serge 2385
 
2386
	intel_dp_link_down(intel_dp);
5354 serge 2387
	if (port == PORT_A)
6084 serge 2388
		ironlake_edp_pll_off(intel_dp);
5060 serge 2389
}
2390
 
2391
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2392
{
2393
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2394
 
2395
	intel_dp_link_down(intel_dp);
2396
}
2397
 
6084 serge 2398
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2399
				     bool reset)
5060 serge 2400
{
6084 serge 2401
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2402
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2403
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2404
	enum pipe pipe = crtc->pipe;
2405
	uint32_t val;
3031 serge 2406
 
6084 serge 2407
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2408
	if (reset)
2409
		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2410
	else
2411
		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2412
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
5060 serge 2413
 
6084 serge 2414
	if (crtc->config->lane_count > 2) {
2415
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2416
		if (reset)
2417
			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2418
		else
2419
			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2420
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2421
	}
5060 serge 2422
 
2423
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2424
	val |= CHV_PCS_REQ_SOFTRESET_EN;
6084 serge 2425
	if (reset)
2426
		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2427
	else
2428
		val |= DPIO_PCS_CLK_SOFT_RESET;
5060 serge 2429
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2430
 
6084 serge 2431
	if (crtc->config->lane_count > 2) {
2432
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2433
		val |= CHV_PCS_REQ_SOFTRESET_EN;
2434
		if (reset)
2435
			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2436
		else
2437
			val |= DPIO_PCS_CLK_SOFT_RESET;
2438
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2439
	}
2440
}
5060 serge 2441
 
6084 serge 2442
static void chv_post_disable_dp(struct intel_encoder *encoder)
2443
{
2444
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2445
	struct drm_device *dev = encoder->base.dev;
2446
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2447
 
6084 serge 2448
	intel_dp_link_down(intel_dp);
5060 serge 2449
 
6084 serge 2450
	mutex_lock(&dev_priv->sb_lock);
2451
 
2452
	/* Assert data lane reset */
2453
	chv_data_lane_soft_reset(encoder, true);
2454
 
2455
	mutex_unlock(&dev_priv->sb_lock);
2330 Serge 2456
}
2457
 
5354 serge 2458
static void
2459
_intel_dp_set_link_train(struct intel_dp *intel_dp,
2460
			 uint32_t *DP,
2461
			 uint8_t dp_train_pat)
2462
{
2463
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2464
	struct drm_device *dev = intel_dig_port->base.base.dev;
2465
	struct drm_i915_private *dev_priv = dev->dev_private;
2466
	enum port port = intel_dig_port->port;
2467
 
2468
	if (HAS_DDI(dev)) {
2469
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2470
 
2471
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2472
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2473
		else
2474
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2475
 
2476
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2477
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2478
		case DP_TRAINING_PATTERN_DISABLE:
2479
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2480
 
2481
			break;
2482
		case DP_TRAINING_PATTERN_1:
2483
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2484
			break;
2485
		case DP_TRAINING_PATTERN_2:
2486
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2487
			break;
2488
		case DP_TRAINING_PATTERN_3:
2489
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2490
			break;
2491
		}
2492
		I915_WRITE(DP_TP_CTL(port), temp);
2493
 
6084 serge 2494
	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2495
		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
5354 serge 2496
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2497
 
2498
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2499
		case DP_TRAINING_PATTERN_DISABLE:
2500
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2501
			break;
2502
		case DP_TRAINING_PATTERN_1:
2503
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2504
			break;
2505
		case DP_TRAINING_PATTERN_2:
2506
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2507
			break;
2508
		case DP_TRAINING_PATTERN_3:
2509
			DRM_ERROR("DP training pattern 3 not supported\n");
2510
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2511
			break;
2512
		}
2513
 
2514
	} else {
2515
		if (IS_CHERRYVIEW(dev))
2516
			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2517
		else
2518
			*DP &= ~DP_LINK_TRAIN_MASK;
2519
 
2520
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2521
		case DP_TRAINING_PATTERN_DISABLE:
2522
			*DP |= DP_LINK_TRAIN_OFF;
2523
			break;
2524
		case DP_TRAINING_PATTERN_1:
2525
			*DP |= DP_LINK_TRAIN_PAT_1;
2526
			break;
2527
		case DP_TRAINING_PATTERN_2:
2528
			*DP |= DP_LINK_TRAIN_PAT_2;
2529
			break;
2530
		case DP_TRAINING_PATTERN_3:
2531
			if (IS_CHERRYVIEW(dev)) {
2532
				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2533
			} else {
2534
				DRM_ERROR("DP training pattern 3 not supported\n");
2535
				*DP |= DP_LINK_TRAIN_PAT_2;
2536
			}
2537
			break;
2538
		}
2539
	}
2540
}
2541
 
2542
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2543
{
2544
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2545
	struct drm_i915_private *dev_priv = dev->dev_private;
2546
 
2547
	/* enable with pattern 1 (as per spec) */
2548
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2549
				 DP_TRAINING_PATTERN_1);
2550
 
2551
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2552
	POSTING_READ(intel_dp->output_reg);
2553
 
2554
	/*
2555
	 * Magic for VLV/CHV. We _must_ first set up the register
2556
	 * without actually enabling the port, and then do another
2557
	 * write to enable the port. Otherwise link training will
2558
	 * fail when the power sequencer is freshly used for this port.
2559
	 */
2560
	intel_dp->DP |= DP_PORT_EN;
2561
 
2562
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2563
	POSTING_READ(intel_dp->output_reg);
2564
}
2565
 
3031 serge 2566
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 2567
{
3031 serge 2568
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2569
	struct drm_device *dev = encoder->base.dev;
2330 Serge 2570
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2571
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2330 Serge 2572
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2573
 
3031 serge 2574
	if (WARN_ON(dp_reg & DP_PORT_EN))
2575
		return;
2342 Serge 2576
 
5354 serge 2577
	pps_lock(intel_dp);
2578
 
2579
	if (IS_VALLEYVIEW(dev))
2580
		vlv_init_panel_power_sequencer(intel_dp);
2581
 
2582
	intel_dp_enable_port(intel_dp);
2583
 
2584
	edp_panel_vdd_on(intel_dp);
2585
	edp_panel_on(intel_dp);
2586
	edp_panel_vdd_off(intel_dp, true);
2587
 
2588
	pps_unlock(intel_dp);
2589
 
6084 serge 2590
	if (IS_VALLEYVIEW(dev)) {
2591
		unsigned int lane_mask = 0x0;
5354 serge 2592
 
6084 serge 2593
		if (IS_CHERRYVIEW(dev))
2594
			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2595
 
2596
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2597
				    lane_mask);
2598
	}
2599
 
3031 serge 2600
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
6084 serge 2601
	intel_dp_start_link_train(intel_dp);
3746 Serge 2602
	intel_dp_stop_link_train(intel_dp);
5354 serge 2603
 
6084 serge 2604
	if (crtc->config->has_audio) {
5354 serge 2605
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2606
				 pipe_name(crtc->pipe));
2607
		intel_audio_codec_enable(encoder);
2608
	}
4560 Serge 2609
}
2610
 
2611
static void g4x_enable_dp(struct intel_encoder *encoder)
2612
{
2613
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2614
 
2615
	intel_enable_dp(encoder);
5060 serge 2616
	intel_edp_backlight_on(intel_dp);
2330 Serge 2617
}
2618
 
4104 Serge 2619
static void vlv_enable_dp(struct intel_encoder *encoder)
2620
{
4560 Serge 2621
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2622
 
5060 serge 2623
	intel_edp_backlight_on(intel_dp);
6084 serge 2624
	intel_psr_enable(intel_dp);
4104 Serge 2625
}
2626
 
4560 Serge 2627
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
3031 serge 2628
{
2629
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2630
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3031 serge 2631
 
5060 serge 2632
	intel_dp_prepare(encoder);
2633
 
2634
	/* Only ilk+ has port A */
2635
	if (dport->port == PORT_A) {
2636
		ironlake_set_pll_cpu_edp(intel_dp);
3031 serge 2637
		ironlake_edp_pll_on(intel_dp);
5060 serge 2638
	}
3031 serge 2639
}
2640
 
5354 serge 2641
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2642
{
2643
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2644
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2645
	enum pipe pipe = intel_dp->pps_pipe;
2646
	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2647
 
2648
	edp_panel_vdd_off_sync(intel_dp);
2649
 
2650
	/*
2651
	 * VLV seems to get confused when multiple power seqeuencers
2652
	 * have the same port selected (even if only one has power/vdd
2653
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2654
	 * CHV on the other hand doesn't seem to mind having the same port
2655
	 * selected in multiple power seqeuencers, but let's clear the
2656
	 * port select always when logically disconnecting a power sequencer
2657
	 * from a port.
2658
	 */
2659
	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2660
		      pipe_name(pipe), port_name(intel_dig_port->port));
2661
	I915_WRITE(pp_on_reg, 0);
2662
	POSTING_READ(pp_on_reg);
2663
 
2664
	intel_dp->pps_pipe = INVALID_PIPE;
2665
}
2666
 
2667
static void vlv_steal_power_sequencer(struct drm_device *dev,
2668
				      enum pipe pipe)
2669
{
2670
	struct drm_i915_private *dev_priv = dev->dev_private;
2671
	struct intel_encoder *encoder;
2672
 
2673
	lockdep_assert_held(&dev_priv->pps_mutex);
2674
 
2675
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2676
		return;
2677
 
2678
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2679
			    base.head) {
2680
		struct intel_dp *intel_dp;
2681
		enum port port;
2682
 
2683
		if (encoder->type != INTEL_OUTPUT_EDP)
2684
			continue;
2685
 
2686
		intel_dp = enc_to_intel_dp(&encoder->base);
2687
		port = dp_to_dig_port(intel_dp)->port;
2688
 
2689
		if (intel_dp->pps_pipe != pipe)
2690
			continue;
2691
 
2692
		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2693
			      pipe_name(pipe), port_name(port));
2694
 
6084 serge 2695
		WARN(encoder->base.crtc,
5354 serge 2696
		     "stealing pipe %c power sequencer from active eDP port %c\n",
2697
		     pipe_name(pipe), port_name(port));
2698
 
2699
		/* make sure vdd is off before we steal it */
2700
		vlv_detach_power_sequencer(intel_dp);
2701
	}
2702
}
2703
 
2704
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2705
{
2706
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2707
	struct intel_encoder *encoder = &intel_dig_port->base;
2708
	struct drm_device *dev = encoder->base.dev;
2709
	struct drm_i915_private *dev_priv = dev->dev_private;
2710
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2711
 
2712
	lockdep_assert_held(&dev_priv->pps_mutex);
2713
 
2714
	if (!is_edp(intel_dp))
2715
		return;
2716
 
2717
	if (intel_dp->pps_pipe == crtc->pipe)
2718
		return;
2719
 
2720
	/*
2721
	 * If another power sequencer was being used on this
2722
	 * port previously make sure to turn off vdd there while
2723
	 * we still have control of it.
2724
	 */
2725
	if (intel_dp->pps_pipe != INVALID_PIPE)
2726
		vlv_detach_power_sequencer(intel_dp);
2727
 
2728
	/*
2729
	 * We may be stealing the power
2730
	 * sequencer from another port.
2731
	 */
2732
	vlv_steal_power_sequencer(dev, crtc->pipe);
2733
 
2734
	/* now it's all ours */
2735
	intel_dp->pps_pipe = crtc->pipe;
2736
 
2737
	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2738
		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2739
 
2740
	/* init power sequencer on this pipe and port */
2741
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2742
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2743
}
2744
 
4104 Serge 2745
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2746
{
2747
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2748
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2749
	struct drm_device *dev = encoder->base.dev;
2750
	struct drm_i915_private *dev_priv = dev->dev_private;
2751
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2752
	enum dpio_channel port = vlv_dport_to_channel(dport);
6084 serge 2753
	int pipe = intel_crtc->pipe;
2754
	u32 val;
4104 Serge 2755
 
6084 serge 2756
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 2757
 
4560 Serge 2758
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
6084 serge 2759
	val = 0;
2760
	if (pipe)
2761
		val |= (1<<21);
2762
	else
2763
		val &= ~(1<<21);
2764
	val |= 0x001000c4;
4560 Serge 2765
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2766
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2767
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
4104 Serge 2768
 
6084 serge 2769
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2770
 
2771
	intel_enable_dp(encoder);
4539 Serge 2772
}
4104 Serge 2773
 
4560 Serge 2774
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
4104 Serge 2775
{
2776
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2777
	struct drm_device *dev = encoder->base.dev;
2778
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2779
	struct intel_crtc *intel_crtc =
2780
		to_intel_crtc(encoder->base.crtc);
2781
	enum dpio_channel port = vlv_dport_to_channel(dport);
2782
	int pipe = intel_crtc->pipe;
4104 Serge 2783
 
5060 serge 2784
	intel_dp_prepare(encoder);
2785
 
4104 Serge 2786
	/* Program Tx lane resets to default */
6084 serge 2787
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 2788
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
4104 Serge 2789
			 DPIO_PCS_TX_LANE2_RESET |
2790
			 DPIO_PCS_TX_LANE1_RESET);
4560 Serge 2791
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
4104 Serge 2792
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2793
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2794
			 (1<
2795
				 DPIO_PCS_CLK_SOFT_RESET);
2796
 
2797
	/* Fix up inter-pair skew failure */
4560 Serge 2798
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2799
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2800
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
6084 serge 2801
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2802
}
2803
 
5060 serge 2804
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2805
{
2806
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2807
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2808
	struct drm_device *dev = encoder->base.dev;
2809
	struct drm_i915_private *dev_priv = dev->dev_private;
2810
	struct intel_crtc *intel_crtc =
2811
		to_intel_crtc(encoder->base.crtc);
2812
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2813
	int pipe = intel_crtc->pipe;
6084 serge 2814
	int data, i, stagger;
5060 serge 2815
	u32 val;
2816
 
6084 serge 2817
	mutex_lock(&dev_priv->sb_lock);
5060 serge 2818
 
5354 serge 2819
	/* allow hardware to manage TX FIFO reset source */
2820
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2821
	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2822
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2823
 
6084 serge 2824
	if (intel_crtc->config->lane_count > 2) {
2825
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2826
		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2827
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2828
	}
5354 serge 2829
 
5060 serge 2830
	/* Program Tx lane latency optimal setting*/
6084 serge 2831
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 2832
		/* Set the upar bit */
6084 serge 2833
		if (intel_crtc->config->lane_count == 1)
2834
			data = 0x0;
2835
		else
2836
			data = (i == 1) ? 0x0 : 0x1;
5060 serge 2837
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2838
				data << DPIO_UPAR_SHIFT);
2839
	}
2840
 
2841
	/* Data lane stagger programming */
6084 serge 2842
	if (intel_crtc->config->port_clock > 270000)
2843
		stagger = 0x18;
2844
	else if (intel_crtc->config->port_clock > 135000)
2845
		stagger = 0xd;
2846
	else if (intel_crtc->config->port_clock > 67500)
2847
		stagger = 0x7;
2848
	else if (intel_crtc->config->port_clock > 33750)
2849
		stagger = 0x4;
2850
	else
2851
		stagger = 0x2;
5060 serge 2852
 
6084 serge 2853
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2854
	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2855
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
5060 serge 2856
 
6084 serge 2857
	if (intel_crtc->config->lane_count > 2) {
2858
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2859
		val |= DPIO_TX2_STAGGER_MASK(0x1f);
2860
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2861
	}
2862
 
2863
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2864
		       DPIO_LANESTAGGER_STRAP(stagger) |
2865
		       DPIO_LANESTAGGER_STRAP_OVRD |
2866
		       DPIO_TX1_STAGGER_MASK(0x1f) |
2867
		       DPIO_TX1_STAGGER_MULT(6) |
2868
		       DPIO_TX2_STAGGER_MULT(0));
2869
 
2870
	if (intel_crtc->config->lane_count > 2) {
2871
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2872
			       DPIO_LANESTAGGER_STRAP(stagger) |
2873
			       DPIO_LANESTAGGER_STRAP_OVRD |
2874
			       DPIO_TX1_STAGGER_MASK(0x1f) |
2875
			       DPIO_TX1_STAGGER_MULT(7) |
2876
			       DPIO_TX2_STAGGER_MULT(5));
2877
	}
2878
 
2879
	/* Deassert data lane reset */
2880
	chv_data_lane_soft_reset(encoder, false);
2881
 
2882
	mutex_unlock(&dev_priv->sb_lock);
2883
 
5060 serge 2884
	intel_enable_dp(encoder);
6084 serge 2885
 
2886
	/* Second common lane will stay alive on its own now */
2887
	if (dport->release_cl2_override) {
2888
		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2889
		dport->release_cl2_override = false;
2890
	}
5060 serge 2891
}
2892
 
2893
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2894
{
2895
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2896
	struct drm_device *dev = encoder->base.dev;
2897
	struct drm_i915_private *dev_priv = dev->dev_private;
2898
	struct intel_crtc *intel_crtc =
2899
		to_intel_crtc(encoder->base.crtc);
2900
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2901
	enum pipe pipe = intel_crtc->pipe;
6084 serge 2902
	unsigned int lane_mask =
2903
		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
5060 serge 2904
	u32 val;
2905
 
5354 serge 2906
	intel_dp_prepare(encoder);
2907
 
6084 serge 2908
	/*
2909
	 * Must trick the second common lane into life.
2910
	 * Otherwise we can't even access the PLL.
2911
	 */
2912
	if (ch == DPIO_CH0 && pipe == PIPE_B)
2913
		dport->release_cl2_override =
2914
			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
5060 serge 2915
 
6084 serge 2916
	chv_phy_powergate_lanes(encoder, true, lane_mask);
2917
 
2918
	mutex_lock(&dev_priv->sb_lock);
2919
 
2920
	/* Assert data lane reset */
2921
	chv_data_lane_soft_reset(encoder, true);
2922
 
5060 serge 2923
	/* program left/right clock distribution */
2924
	if (pipe != PIPE_B) {
2925
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2926
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2927
		if (ch == DPIO_CH0)
2928
			val |= CHV_BUFLEFTENA1_FORCE;
2929
		if (ch == DPIO_CH1)
2930
			val |= CHV_BUFRIGHTENA1_FORCE;
2931
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2932
	} else {
2933
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2934
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2935
		if (ch == DPIO_CH0)
2936
			val |= CHV_BUFLEFTENA2_FORCE;
2937
		if (ch == DPIO_CH1)
2938
			val |= CHV_BUFRIGHTENA2_FORCE;
2939
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2940
	}
2941
 
2942
	/* program clock channel usage */
2943
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2944
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2945
	if (pipe != PIPE_B)
2946
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2947
	else
2948
		val |= CHV_PCS_USEDCLKCHANNEL;
2949
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2950
 
6084 serge 2951
	if (intel_crtc->config->lane_count > 2) {
2952
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2953
		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2954
		if (pipe != PIPE_B)
2955
			val &= ~CHV_PCS_USEDCLKCHANNEL;
2956
		else
2957
			val |= CHV_PCS_USEDCLKCHANNEL;
2958
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2959
	}
5060 serge 2960
 
2961
	/*
2962
	 * This a a bit weird since generally CL
2963
	 * matches the pipe, but here we need to
2964
	 * pick the CL based on the port.
2965
	 */
2966
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2967
	if (pipe != PIPE_B)
2968
		val &= ~CHV_CMN_USEDCLKCHANNEL;
2969
	else
2970
		val |= CHV_CMN_USEDCLKCHANNEL;
2971
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2972
 
6084 serge 2973
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 2974
}
2975
 
6084 serge 2976
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2977
{
2978
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2979
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2980
	u32 val;
2981
 
2982
	mutex_lock(&dev_priv->sb_lock);
2983
 
2984
	/* disable left/right clock distribution */
2985
	if (pipe != PIPE_B) {
2986
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2987
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2988
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2989
	} else {
2990
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2991
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2992
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2993
	}
2994
 
2995
	mutex_unlock(&dev_priv->sb_lock);
2996
 
2997
	/*
2998
	 * Leave the power down bit cleared for at least one
2999
	 * lane so that chv_powergate_phy_ch() will power
3000
	 * on something when the channel is otherwise unused.
3001
	 * When the port is off and the override is removed
3002
	 * the lanes power down anyway, so otherwise it doesn't
3003
	 * really matter what the state of power down bits is
3004
	 * after this.
3005
	 */
3006
	chv_phy_powergate_lanes(encoder, false, 0x0);
3007
}
3008
 
2330 Serge 3009
/*
3010
 * Native read with retry for link status and receiver capability reads for
3011
 * cases where the sink may still be asleep.
5060 serge 3012
 *
3013
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3014
 * supposed to retry 3 times per the spec.
2330 Serge 3015
 */
5060 serge 3016
static ssize_t
3017
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3018
			void *buffer, size_t size)
2330 Serge 3019
{
5060 serge 3020
	ssize_t ret;
3021
	int i;
2330 Serge 3022
 
5354 serge 3023
	/*
3024
	 * Sometime we just get the same incorrect byte repeated
3025
	 * over the entire buffer. Doing just one throw away read
3026
	 * initially seems to "solve" it.
3027
	 */
3028
	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3029
 
2330 Serge 3030
	for (i = 0; i < 3; i++) {
5060 serge 3031
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3032
		if (ret == size)
3033
			return ret;
2330 Serge 3034
		msleep(1);
3035
	}
3036
 
5060 serge 3037
	return ret;
2330 Serge 3038
}
3039
 
3040
/*
3041
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3042
 * link status information
3043
 */
3044
static bool
2342 Serge 3045
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3046
{
5060 serge 3047
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 3048
				       DP_LANE0_1_STATUS,
3049
				       link_status,
5060 serge 3050
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2330 Serge 3051
}
3052
 
5060 serge 3053
/* These are source-specific values. */
2330 Serge 3054
static uint8_t
2342 Serge 3055
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 3056
{
3243 Serge 3057
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 3058
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3059
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3060
 
6084 serge 3061
	if (IS_BROXTON(dev))
3062
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3063
	else if (INTEL_INFO(dev)->gen >= 9) {
3064
		if (dev_priv->edp_low_vswing && port == PORT_A)
3065
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5354 serge 3066
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6084 serge 3067
	} else if (IS_VALLEYVIEW(dev))
5354 serge 3068
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4104 Serge 3069
	else if (IS_GEN7(dev) && port == PORT_A)
5354 serge 3070
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4104 Serge 3071
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
5354 serge 3072
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2342 Serge 3073
	else
5354 serge 3074
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2342 Serge 3075
}
3076
 
3077
static uint8_t
3078
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3079
{
3243 Serge 3080
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 3081
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3082
 
5354 serge 3083
	if (INTEL_INFO(dev)->gen >= 9) {
2342 Serge 3084
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3085
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3086
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3087
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3088
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3089
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3090
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
6084 serge 3091
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3092
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3093
		default:
5354 serge 3094
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3095
		}
5354 serge 3096
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3097
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3098
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3099
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3100
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3101
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3102
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3103
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3104
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3105
		default:
3106
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3107
		}
4104 Serge 3108
	} else if (IS_VALLEYVIEW(dev)) {
3243 Serge 3109
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3110
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3111
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3112
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3113
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3114
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3115
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3116
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3117
		default:
5354 serge 3118
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
4104 Serge 3119
		}
3120
	} else if (IS_GEN7(dev) && port == PORT_A) {
3121
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3122
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3123
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3124
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3125
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3126
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2342 Serge 3127
		default:
5354 serge 3128
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2342 Serge 3129
		}
3130
	} else {
6084 serge 3131
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3132
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3133
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3134
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3135
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3136
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3137
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3138
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
6084 serge 3139
		default:
5354 serge 3140
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
6084 serge 3141
		}
2330 Serge 3142
	}
3143
}
3144
 
6084 serge 3145
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
4104 Serge 3146
{
3147
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3148
	struct drm_i915_private *dev_priv = dev->dev_private;
3149
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
4560 Serge 3150
	struct intel_crtc *intel_crtc =
3151
		to_intel_crtc(dport->base.base.crtc);
4104 Serge 3152
	unsigned long demph_reg_value, preemph_reg_value,
3153
		uniqtranscale_reg_value;
3154
	uint8_t train_set = intel_dp->train_set[0];
4560 Serge 3155
	enum dpio_channel port = vlv_dport_to_channel(dport);
3156
	int pipe = intel_crtc->pipe;
4104 Serge 3157
 
3158
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3159
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4104 Serge 3160
		preemph_reg_value = 0x0004000;
3161
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3162
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3163
			demph_reg_value = 0x2B405555;
3164
			uniqtranscale_reg_value = 0x552AB83A;
3165
			break;
5354 serge 3166
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3167
			demph_reg_value = 0x2B404040;
3168
			uniqtranscale_reg_value = 0x5548B83A;
3169
			break;
5354 serge 3170
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3171
			demph_reg_value = 0x2B245555;
3172
			uniqtranscale_reg_value = 0x5560B83A;
3173
			break;
5354 serge 3174
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3175
			demph_reg_value = 0x2B405555;
3176
			uniqtranscale_reg_value = 0x5598DA3A;
3177
			break;
3178
		default:
3179
			return 0;
3180
		}
3181
		break;
5354 serge 3182
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4104 Serge 3183
		preemph_reg_value = 0x0002000;
3184
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3185
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3186
			demph_reg_value = 0x2B404040;
3187
			uniqtranscale_reg_value = 0x5552B83A;
3188
			break;
5354 serge 3189
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3190
			demph_reg_value = 0x2B404848;
3191
			uniqtranscale_reg_value = 0x5580B83A;
3192
			break;
5354 serge 3193
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3194
			demph_reg_value = 0x2B404040;
3195
			uniqtranscale_reg_value = 0x55ADDA3A;
3196
			break;
3197
		default:
3198
			return 0;
3199
		}
3200
		break;
5354 serge 3201
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4104 Serge 3202
		preemph_reg_value = 0x0000000;
3203
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3204
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3205
			demph_reg_value = 0x2B305555;
3206
			uniqtranscale_reg_value = 0x5570B83A;
3207
			break;
5354 serge 3208
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3209
			demph_reg_value = 0x2B2B4040;
3210
			uniqtranscale_reg_value = 0x55ADDA3A;
3211
			break;
3212
		default:
3213
			return 0;
3214
		}
3215
		break;
5354 serge 3216
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4104 Serge 3217
		preemph_reg_value = 0x0006000;
3218
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3219
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3220
			demph_reg_value = 0x1B405555;
3221
			uniqtranscale_reg_value = 0x55ADDA3A;
3222
			break;
3223
		default:
3224
			return 0;
3225
		}
3226
		break;
3227
	default:
3228
		return 0;
3229
	}
3230
 
6084 serge 3231
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 3232
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3233
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3234
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
4104 Serge 3235
			 uniqtranscale_reg_value);
4560 Serge 3236
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3237
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3238
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3239
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
6084 serge 3240
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 3241
 
3242
	return 0;
3243
}
3244
 
6084 serge 3245
static bool chv_need_uniq_trans_scale(uint8_t train_set)
5060 serge 3246
{
6084 serge 3247
	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3248
		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3249
}
3250
 
3251
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3252
{
5060 serge 3253
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3254
	struct drm_i915_private *dev_priv = dev->dev_private;
3255
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3256
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3257
	u32 deemph_reg_value, margin_reg_value, val;
3258
	uint8_t train_set = intel_dp->train_set[0];
3259
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3260
	enum pipe pipe = intel_crtc->pipe;
3261
	int i;
3262
 
3263
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3264
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
5060 serge 3265
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3266
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3267
			deemph_reg_value = 128;
3268
			margin_reg_value = 52;
3269
			break;
5354 serge 3270
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3271
			deemph_reg_value = 128;
3272
			margin_reg_value = 77;
3273
			break;
5354 serge 3274
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3275
			deemph_reg_value = 128;
3276
			margin_reg_value = 102;
3277
			break;
5354 serge 3278
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
5060 serge 3279
			deemph_reg_value = 128;
3280
			margin_reg_value = 154;
3281
			/* FIXME extra to set for 1200 */
3282
			break;
3283
		default:
3284
			return 0;
3285
		}
3286
		break;
5354 serge 3287
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
5060 serge 3288
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3289
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3290
			deemph_reg_value = 85;
3291
			margin_reg_value = 78;
3292
			break;
5354 serge 3293
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3294
			deemph_reg_value = 85;
3295
			margin_reg_value = 116;
3296
			break;
5354 serge 3297
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3298
			deemph_reg_value = 85;
3299
			margin_reg_value = 154;
3300
			break;
3301
		default:
3302
			return 0;
3303
		}
3304
		break;
5354 serge 3305
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
5060 serge 3306
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3307
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3308
			deemph_reg_value = 64;
3309
			margin_reg_value = 104;
3310
			break;
5354 serge 3311
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3312
			deemph_reg_value = 64;
3313
			margin_reg_value = 154;
3314
			break;
3315
		default:
3316
			return 0;
3317
		}
3318
		break;
5354 serge 3319
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
5060 serge 3320
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3321
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3322
			deemph_reg_value = 43;
3323
			margin_reg_value = 154;
3324
			break;
3325
		default:
3326
			return 0;
3327
		}
3328
		break;
3329
	default:
3330
		return 0;
3331
	}
3332
 
6084 serge 3333
	mutex_lock(&dev_priv->sb_lock);
5060 serge 3334
 
3335
	/* Clear calc init */
3336
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3337
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
5354 serge 3338
	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3339
	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
5060 serge 3340
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3341
 
6084 serge 3342
	if (intel_crtc->config->lane_count > 2) {
3343
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3344
		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3345
		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3346
		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3347
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3348
	}
5060 serge 3349
 
5354 serge 3350
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3351
	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3352
	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3353
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3354
 
6084 serge 3355
	if (intel_crtc->config->lane_count > 2) {
3356
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3357
		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3358
		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3359
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3360
	}
5354 serge 3361
 
5060 serge 3362
	/* Program swing deemph */
6084 serge 3363
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3364
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3365
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3366
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3367
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3368
	}
3369
 
3370
	/* Program swing margin */
6084 serge 3371
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3372
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
6084 serge 3373
 
5354 serge 3374
		val &= ~DPIO_SWING_MARGIN000_MASK;
3375
		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
6084 serge 3376
 
3377
		/*
3378
		 * Supposedly this value shouldn't matter when unique transition
3379
		 * scale is disabled, but in fact it does matter. Let's just
3380
		 * always program the same value and hope it's OK.
3381
		 */
3382
		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3383
		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3384
 
5060 serge 3385
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3386
	}
3387
 
6084 serge 3388
	/*
3389
	 * The document said it needs to set bit 27 for ch0 and bit 26
3390
	 * for ch1. Might be a typo in the doc.
3391
	 * For now, for this unique transition scale selection, set bit
3392
	 * 27 for ch0 and ch1.
3393
	 */
3394
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3395
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
6084 serge 3396
		if (chv_need_uniq_trans_scale(train_set))
3397
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3398
		else
3399
			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
5060 serge 3400
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3401
	}
3402
 
3403
	/* Start swing calculation */
3404
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3405
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3406
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3407
 
6084 serge 3408
	if (intel_crtc->config->lane_count > 2) {
3409
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3410
		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3411
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3412
	}
5060 serge 3413
 
6084 serge 3414
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3415
 
3416
	return 0;
3417
}
3418
 
2330 Serge 3419
static void
4560 Serge 3420
intel_get_adjust_train(struct intel_dp *intel_dp,
3421
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3422
{
3423
	uint8_t v = 0;
3424
	uint8_t p = 0;
3425
	int lane;
2342 Serge 3426
	uint8_t voltage_max;
3427
	uint8_t preemph_max;
2330 Serge 3428
 
3429
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3243 Serge 3430
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3431
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2330 Serge 3432
 
3433
		if (this_v > v)
3434
			v = this_v;
3435
		if (this_p > p)
3436
			p = this_p;
3437
	}
3438
 
2342 Serge 3439
	voltage_max = intel_dp_voltage_max(intel_dp);
3440
	if (v >= voltage_max)
3441
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2330 Serge 3442
 
2342 Serge 3443
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3444
	if (p >= preemph_max)
3445
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2330 Serge 3446
 
3447
	for (lane = 0; lane < 4; lane++)
3448
		intel_dp->train_set[lane] = v | p;
3449
}
3450
 
3451
static uint32_t
6084 serge 3452
gen4_signal_levels(uint8_t train_set)
2330 Serge 3453
{
3454
	uint32_t	signal_levels = 0;
3455
 
3456
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3457
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2330 Serge 3458
	default:
3459
		signal_levels |= DP_VOLTAGE_0_4;
3460
		break;
5354 serge 3461
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2330 Serge 3462
		signal_levels |= DP_VOLTAGE_0_6;
3463
		break;
5354 serge 3464
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2330 Serge 3465
		signal_levels |= DP_VOLTAGE_0_8;
3466
		break;
5354 serge 3467
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2330 Serge 3468
		signal_levels |= DP_VOLTAGE_1_2;
3469
		break;
3470
	}
3471
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3472
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3473
	default:
3474
		signal_levels |= DP_PRE_EMPHASIS_0;
3475
		break;
5354 serge 3476
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3477
		signal_levels |= DP_PRE_EMPHASIS_3_5;
3478
		break;
5354 serge 3479
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3480
		signal_levels |= DP_PRE_EMPHASIS_6;
3481
		break;
5354 serge 3482
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
2330 Serge 3483
		signal_levels |= DP_PRE_EMPHASIS_9_5;
3484
		break;
3485
	}
3486
	return signal_levels;
3487
}
3488
 
3489
/* Gen6's DP voltage swing and pre-emphasis control */
3490
static uint32_t
6084 serge 3491
gen6_edp_signal_levels(uint8_t train_set)
2330 Serge 3492
{
3493
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3494
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3495
	switch (signal_levels) {
5354 serge 3496
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3497
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3498
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5354 serge 3499
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3500
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
5354 serge 3501
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3502
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3503
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
5354 serge 3504
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3505
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3506
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
5354 serge 3507
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3508
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3509
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3510
	default:
3511
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3512
			      "0x%x\n", signal_levels);
3513
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3514
	}
3515
}
3516
 
2342 Serge 3517
/* Gen7's DP voltage swing and pre-emphasis control */
3518
static uint32_t
6084 serge 3519
gen7_edp_signal_levels(uint8_t train_set)
2342 Serge 3520
{
3521
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3522
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3523
	switch (signal_levels) {
5354 serge 3524
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3525
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
5354 serge 3526
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3527
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
5354 serge 3528
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2342 Serge 3529
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3530
 
5354 serge 3531
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3532
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
5354 serge 3533
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3534
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3535
 
5354 serge 3536
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3537
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
5354 serge 3538
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3539
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3540
 
3541
	default:
3542
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3543
			      "0x%x\n", signal_levels);
3544
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3545
	}
3546
}
3547
 
3480 Serge 3548
/* Properly updates "DP" with the correct signal levels. */
3549
static void
3550
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3551
{
3552
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 3553
	enum port port = intel_dig_port->port;
3480 Serge 3554
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 3555
	uint32_t signal_levels, mask = 0;
3480 Serge 3556
	uint8_t train_set = intel_dp->train_set[0];
3557
 
6084 serge 3558
	if (HAS_DDI(dev)) {
3559
		signal_levels = ddi_signal_levels(intel_dp);
3560
 
3561
		if (IS_BROXTON(dev))
3562
			signal_levels = 0;
3563
		else
3564
			mask = DDI_BUF_EMP_MASK;
5060 serge 3565
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 3566
		signal_levels = chv_signal_levels(intel_dp);
4104 Serge 3567
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 3568
		signal_levels = vlv_signal_levels(intel_dp);
4104 Serge 3569
	} else if (IS_GEN7(dev) && port == PORT_A) {
6084 serge 3570
		signal_levels = gen7_edp_signal_levels(train_set);
3480 Serge 3571
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4104 Serge 3572
	} else if (IS_GEN6(dev) && port == PORT_A) {
6084 serge 3573
		signal_levels = gen6_edp_signal_levels(train_set);
3480 Serge 3574
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3575
	} else {
6084 serge 3576
		signal_levels = gen4_signal_levels(train_set);
3480 Serge 3577
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3578
	}
3579
 
6084 serge 3580
	if (mask)
3581
		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3480 Serge 3582
 
6084 serge 3583
	DRM_DEBUG_KMS("Using vswing level %d\n",
3584
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3585
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3586
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3587
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3588
 
3480 Serge 3589
	*DP = (*DP & ~mask) | signal_levels;
3590
}
3591
 
2330 Serge 3592
static bool
3593
intel_dp_set_link_train(struct intel_dp *intel_dp,
4560 Serge 3594
			uint32_t *DP,
2330 Serge 3595
			uint8_t dp_train_pat)
3596
{
3243 Serge 3597
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3598
	struct drm_i915_private *dev_priv =
3599
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3600
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3601
	int ret, len;
2330 Serge 3602
 
5354 serge 3603
	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3243 Serge 3604
 
4560 Serge 3605
	I915_WRITE(intel_dp->output_reg, *DP);
2330 Serge 3606
	POSTING_READ(intel_dp->output_reg);
3607
 
4560 Serge 3608
	buf[0] = dp_train_pat;
3609
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3610
	    DP_TRAINING_PATTERN_DISABLE) {
3611
		/* don't write DP_TRAINING_LANEx_SET on disable */
3612
		len = 1;
3613
	} else {
3614
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3615
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3616
		len = intel_dp->lane_count + 1;
3617
	}
2330 Serge 3618
 
5060 serge 3619
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
6084 serge 3620
				buf, len);
4560 Serge 3621
 
3622
	return ret == len;
3623
}
3624
 
3625
static bool
3626
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3627
			uint8_t dp_train_pat)
3628
{
6084 serge 3629
	if (!intel_dp->train_set_valid)
3630
		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
4560 Serge 3631
	intel_dp_set_signal_levels(intel_dp, DP);
3632
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3633
}
3634
 
3635
static bool
3636
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3637
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3638
{
3639
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3640
	struct drm_i915_private *dev_priv =
3641
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3642
	int ret;
3643
 
3644
	intel_get_adjust_train(intel_dp, link_status);
3645
	intel_dp_set_signal_levels(intel_dp, DP);
3646
 
3647
	I915_WRITE(intel_dp->output_reg, *DP);
3648
	POSTING_READ(intel_dp->output_reg);
3649
 
5060 serge 3650
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3651
				intel_dp->train_set, intel_dp->lane_count);
2330 Serge 3652
 
4560 Serge 3653
	return ret == intel_dp->lane_count;
2330 Serge 3654
}
3655
 
3746 Serge 3656
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3657
{
3658
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3659
	struct drm_device *dev = intel_dig_port->base.base.dev;
3660
	struct drm_i915_private *dev_priv = dev->dev_private;
3661
	enum port port = intel_dig_port->port;
3662
	uint32_t val;
3663
 
3664
	if (!HAS_DDI(dev))
3665
		return;
3666
 
3667
	val = I915_READ(DP_TP_CTL(port));
3668
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3669
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3670
	I915_WRITE(DP_TP_CTL(port), val);
3671
 
3672
	/*
3673
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3674
	 * we need to set idle transmission mode is to work around a HW issue
3675
	 * where we enable the pipe while not in idle link-training mode.
3676
	 * In this case there is requirement to wait for a minimum number of
3677
	 * idle patterns to be sent.
3678
	 */
3679
	if (port == PORT_A)
3680
		return;
3681
 
3682
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3683
		     1))
3684
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3685
}
3686
 
2330 Serge 3687
/* Enable corresponding port and start training pattern 1 */
6084 serge 3688
static void
3689
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
2330 Serge 3690
{
3243 Serge 3691
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3692
	struct drm_device *dev = encoder->dev;
2330 Serge 3693
	int i;
3694
	uint8_t voltage;
2342 Serge 3695
	int voltage_tries, loop_tries;
2330 Serge 3696
	uint32_t DP = intel_dp->DP;
4560 Serge 3697
	uint8_t link_config[2];
6084 serge 3698
	uint8_t link_bw, rate_select;
2330 Serge 3699
 
3480 Serge 3700
	if (HAS_DDI(dev))
3243 Serge 3701
		intel_ddi_prepare_link_retrain(encoder);
3702
 
6084 serge 3703
	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3704
			      &link_bw, &rate_select);
3705
 
2330 Serge 3706
	/* Write the link configuration data */
6084 serge 3707
	link_config[0] = link_bw;
4560 Serge 3708
	link_config[1] = intel_dp->lane_count;
3709
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3710
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
5060 serge 3711
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
6084 serge 3712
	if (intel_dp->num_sink_rates)
3713
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3714
				  &rate_select, 1);
2330 Serge 3715
 
4560 Serge 3716
	link_config[0] = 0;
3717
	link_config[1] = DP_SET_ANSI_8B10B;
5060 serge 3718
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
4560 Serge 3719
 
2330 Serge 3720
	DP |= DP_PORT_EN;
2342 Serge 3721
 
4560 Serge 3722
	/* clock recovery */
3723
	if (!intel_dp_reset_link_train(intel_dp, &DP,
3724
				       DP_TRAINING_PATTERN_1 |
3725
				       DP_LINK_SCRAMBLING_DISABLE)) {
3726
		DRM_ERROR("failed to enable link training\n");
3727
		return;
3728
	}
3729
 
2330 Serge 3730
	voltage = 0xff;
2342 Serge 3731
	voltage_tries = 0;
3732
	loop_tries = 0;
2330 Serge 3733
	for (;;) {
6084 serge 3734
		uint8_t link_status[DP_LINK_STATUS_SIZE];
2342 Serge 3735
 
3243 Serge 3736
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2342 Serge 3737
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3738
			DRM_ERROR("failed to get link status\n");
2330 Serge 3739
			break;
2342 Serge 3740
		}
2330 Serge 3741
 
3243 Serge 3742
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2342 Serge 3743
			DRM_DEBUG_KMS("clock recovery OK\n");
2330 Serge 3744
			break;
3745
		}
3746
 
6084 serge 3747
		/*
3748
		 * if we used previously trained voltage and pre-emphasis values
3749
		 * and we don't get clock recovery, reset link training values
3750
		 */
3751
		if (intel_dp->train_set_valid) {
3752
			DRM_DEBUG_KMS("clock recovery not ok, reset");
3753
			/* clear the flag as we are not reusing train set */
3754
			intel_dp->train_set_valid = false;
3755
			if (!intel_dp_reset_link_train(intel_dp, &DP,
3756
						       DP_TRAINING_PATTERN_1 |
3757
						       DP_LINK_SCRAMBLING_DISABLE)) {
3758
				DRM_ERROR("failed to enable link training\n");
3759
				return;
3760
			}
3761
			continue;
3762
		}
3763
 
2330 Serge 3764
		/* Check to see if we've tried the max voltage */
3765
		for (i = 0; i < intel_dp->lane_count; i++)
3766
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3767
				break;
3480 Serge 3768
		if (i == intel_dp->lane_count) {
2342 Serge 3769
			++loop_tries;
3770
			if (loop_tries == 5) {
4560 Serge 3771
				DRM_ERROR("too many full retries, give up\n");
6084 serge 3772
				break;
2342 Serge 3773
			}
4560 Serge 3774
			intel_dp_reset_link_train(intel_dp, &DP,
3775
						  DP_TRAINING_PATTERN_1 |
3776
						  DP_LINK_SCRAMBLING_DISABLE);
2342 Serge 3777
			voltage_tries = 0;
3778
			continue;
3779
		}
2330 Serge 3780
 
3781
		/* Check to see if we've tried the same voltage 5 times */
3782
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2342 Serge 3783
			++voltage_tries;
3784
			if (voltage_tries == 5) {
4560 Serge 3785
				DRM_ERROR("too many voltage retries, give up\n");
2330 Serge 3786
				break;
2342 Serge 3787
			}
2330 Serge 3788
		} else
2342 Serge 3789
			voltage_tries = 0;
2330 Serge 3790
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3791
 
4560 Serge 3792
		/* Update training set as requested by target */
3793
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3794
			DRM_ERROR("failed to update link training\n");
3795
			break;
3796
		}
2330 Serge 3797
	}
3798
 
3799
	intel_dp->DP = DP;
3800
}
3801
 
6084 serge 3802
static void
3803
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
2330 Serge 3804
{
6084 serge 3805
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3806
	struct drm_device *dev = dig_port->base.base.dev;
2330 Serge 3807
	bool channel_eq = false;
3808
	int tries, cr_tries;
3809
	uint32_t DP = intel_dp->DP;
5060 serge 3810
	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2330 Serge 3811
 
6084 serge 3812
	/*
3813
	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3814
	 *
3815
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3816
	 * also mandatory for downstream devices that support HBR2.
3817
	 *
3818
	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3819
	 * supported but still not enabled.
3820
	 */
3821
	if (intel_dp_source_supports_hbr2(dev) &&
3822
	    drm_dp_tps3_supported(intel_dp->dpcd))
5060 serge 3823
		training_pattern = DP_TRAINING_PATTERN_3;
6084 serge 3824
	else if (intel_dp->link_rate == 540000)
3825
		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
5060 serge 3826
 
2330 Serge 3827
	/* channel equalization */
4560 Serge 3828
	if (!intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3829
				     training_pattern |
4560 Serge 3830
				     DP_LINK_SCRAMBLING_DISABLE)) {
3831
		DRM_ERROR("failed to start channel equalization\n");
3832
		return;
3833
	}
3834
 
2330 Serge 3835
	tries = 0;
3836
	cr_tries = 0;
3837
	channel_eq = false;
3838
	for (;;) {
6084 serge 3839
		uint8_t link_status[DP_LINK_STATUS_SIZE];
2330 Serge 3840
 
3841
		if (cr_tries > 5) {
3842
			DRM_ERROR("failed to train DP, aborting\n");
3843
			break;
3844
		}
3845
 
3243 Serge 3846
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
4560 Serge 3847
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3848
			DRM_ERROR("failed to get link status\n");
2330 Serge 3849
			break;
4560 Serge 3850
		}
2330 Serge 3851
 
3852
		/* Make sure clock is still ok */
6084 serge 3853
		if (!drm_dp_clock_recovery_ok(link_status,
3854
					      intel_dp->lane_count)) {
3855
			intel_dp->train_set_valid = false;
3856
			intel_dp_link_training_clock_recovery(intel_dp);
4560 Serge 3857
			intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3858
						training_pattern |
4560 Serge 3859
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 3860
			cr_tries++;
3861
			continue;
3862
		}
3863
 
6084 serge 3864
		if (drm_dp_channel_eq_ok(link_status,
3865
					 intel_dp->lane_count)) {
2330 Serge 3866
			channel_eq = true;
3867
			break;
3868
		}
3869
 
3870
		/* Try 5 times, then try clock recovery if that fails */
3871
		if (tries > 5) {
6084 serge 3872
			intel_dp->train_set_valid = false;
3873
			intel_dp_link_training_clock_recovery(intel_dp);
4560 Serge 3874
			intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3875
						training_pattern |
4560 Serge 3876
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 3877
			tries = 0;
3878
			cr_tries++;
3879
			continue;
3880
		}
3881
 
4560 Serge 3882
		/* Update training set as requested by target */
3883
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3884
			DRM_ERROR("failed to update link training\n");
3885
			break;
3886
		}
2330 Serge 3887
		++tries;
3888
	}
3889
 
3746 Serge 3890
	intel_dp_set_idle_link_train(intel_dp);
3891
 
3892
	intel_dp->DP = DP;
3893
 
6084 serge 3894
	if (channel_eq) {
3895
		intel_dp->train_set_valid = true;
3746 Serge 3896
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
6084 serge 3897
	}
2330 Serge 3898
}
3899
 
3746 Serge 3900
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3901
{
4560 Serge 3902
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3746 Serge 3903
				DP_TRAINING_PATTERN_DISABLE);
3904
}
3905
 
6084 serge 3906
void
3907
intel_dp_start_link_train(struct intel_dp *intel_dp)
3908
{
3909
	intel_dp_link_training_clock_recovery(intel_dp);
3910
	intel_dp_link_training_channel_equalization(intel_dp);
3911
}
3912
 
2330 Serge 3913
static void
3914
intel_dp_link_down(struct intel_dp *intel_dp)
3915
{
3243 Serge 3916
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3917
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4104 Serge 3918
	enum port port = intel_dig_port->port;
3243 Serge 3919
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 3920
	struct drm_i915_private *dev_priv = dev->dev_private;
3921
	uint32_t DP = intel_dp->DP;
3922
 
5060 serge 3923
	if (WARN_ON(HAS_DDI(dev)))
3243 Serge 3924
		return;
3925
 
3031 serge 3926
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 3927
		return;
3928
 
3929
	DRM_DEBUG_KMS("\n");
3930
 
6084 serge 3931
	if ((IS_GEN7(dev) && port == PORT_A) ||
3932
	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2330 Serge 3933
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
6084 serge 3934
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
2330 Serge 3935
	} else {
5354 serge 3936
		if (IS_CHERRYVIEW(dev))
3937
			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3938
		else
6084 serge 3939
			DP &= ~DP_LINK_TRAIN_MASK;
3940
		DP |= DP_LINK_TRAIN_PAT_IDLE;
2330 Serge 3941
	}
6084 serge 3942
	I915_WRITE(intel_dp->output_reg, DP);
2330 Serge 3943
	POSTING_READ(intel_dp->output_reg);
3944
 
6084 serge 3945
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3946
	I915_WRITE(intel_dp->output_reg, DP);
3947
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3948
 
6084 serge 3949
	/*
3950
	 * HW workaround for IBX, we need to move the port
3951
	 * to transcoder A after disabling it to allow the
3952
	 * matching HDMI port to be enabled on transcoder A.
3953
	 */
3954
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3955
		/* always enable with pattern 1 (as per spec) */
3956
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3957
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
2330 Serge 3958
		I915_WRITE(intel_dp->output_reg, DP);
6084 serge 3959
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3960
 
6084 serge 3961
		DP &= ~DP_PORT_EN;
3962
		I915_WRITE(intel_dp->output_reg, DP);
3963
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3964
	}
3965
 
2342 Serge 3966
	msleep(intel_dp->panel_power_down_delay);
2330 Serge 3967
}
3968
 
3969
static bool
3970
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3971
{
4560 Serge 3972
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3973
	struct drm_device *dev = dig_port->base.base.dev;
3974
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3975
	uint8_t rev;
4560 Serge 3976
 
5060 serge 3977
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3978
				    sizeof(intel_dp->dpcd)) < 0)
3031 serge 3979
		return false; /* aux transfer failed */
3980
 
5354 serge 3981
	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3480 Serge 3982
 
3031 serge 3983
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3984
		return false; /* DPCD not present */
3985
 
4104 Serge 3986
	/* Check if the panel supports PSR */
3987
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4560 Serge 3988
	if (is_edp(intel_dp)) {
5060 serge 3989
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
6084 serge 3990
					intel_dp->psr_dpcd,
3991
					sizeof(intel_dp->psr_dpcd));
4560 Serge 3992
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3993
			dev_priv->psr.sink_support = true;
6084 serge 3994
			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4560 Serge 3995
		}
6084 serge 3996
 
3997
		if (INTEL_INFO(dev)->gen >= 9 &&
3998
			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3999
			uint8_t frame_sync_cap;
4000
 
4001
			dev_priv->psr.sink_support = true;
4002
			intel_dp_dpcd_read_wake(&intel_dp->aux,
4003
					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4004
					&frame_sync_cap, 1);
4005
			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4006
			/* PSR2 needs frame sync as well */
4007
			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4008
			DRM_DEBUG_KMS("PSR2 %s on sink",
4009
				dev_priv->psr.psr2_support ? "supported" : "not supported");
4010
		}
4560 Serge 4011
	}
4012
 
6084 serge 4013
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4014
		      yesno(intel_dp_source_supports_hbr2(dev)),
4015
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
5060 serge 4016
 
6084 serge 4017
	/* Intermediate frequency support */
4018
	if (is_edp(intel_dp) &&
4019
	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4020
	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4021
	    (rev >= 0x03)) { /* eDp v1.4 or higher */
4022
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4023
		int i;
4024
 
4025
		intel_dp_dpcd_read_wake(&intel_dp->aux,
4026
				DP_SUPPORTED_LINK_RATES,
4027
				sink_rates,
4028
				sizeof(sink_rates));
4029
 
4030
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4031
			int val = le16_to_cpu(sink_rates[i]);
4032
 
4033
			if (val == 0)
4034
				break;
4035
 
4036
			/* Value read is in kHz while drm clock is saved in deca-kHz */
4037
			intel_dp->sink_rates[i] = (val * 200) / 10;
4038
		}
4039
		intel_dp->num_sink_rates = i;
4040
	}
4041
 
4042
	intel_dp_print_rates(intel_dp);
4043
 
3031 serge 4044
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4045
	      DP_DWN_STRM_PORT_PRESENT))
4046
		return true; /* native DP sink */
4047
 
4048
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4049
		return true; /* no per-port downstream info */
4050
 
5060 serge 4051
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
6084 serge 4052
				    intel_dp->downstream_ports,
5060 serge 4053
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3031 serge 4054
		return false; /* downstream port status fetch failed */
4055
 
6084 serge 4056
	return true;
3031 serge 4057
}
2330 Serge 4058
 
3031 serge 4059
static void
4060
intel_dp_probe_oui(struct intel_dp *intel_dp)
4061
{
4062
	u8 buf[3];
4063
 
4064
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4065
		return;
4066
 
5060 serge 4067
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3031 serge 4068
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4069
			      buf[0], buf[1], buf[2]);
4070
 
5060 serge 4071
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3031 serge 4072
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4073
			      buf[0], buf[1], buf[2]);
2330 Serge 4074
}
4075
 
2342 Serge 4076
static bool
5060 serge 4077
intel_dp_probe_mst(struct intel_dp *intel_dp)
4078
{
4079
	u8 buf[1];
4080
 
4081
	if (!intel_dp->can_mst)
4082
		return false;
4083
 
4084
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4085
		return false;
4086
 
4087
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4088
		if (buf[0] & DP_MST_CAP) {
4089
			DRM_DEBUG_KMS("Sink is MST capable\n");
4090
			intel_dp->is_mst = true;
4091
		} else {
4092
			DRM_DEBUG_KMS("Sink is not MST capable\n");
4093
			intel_dp->is_mst = false;
4094
		}
4095
	}
4096
 
4097
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4098
	return intel_dp->is_mst;
4099
}
4100
 
6084 serge 4101
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
5060 serge 4102
{
6084 serge 4103
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4104
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
5354 serge 4105
	u8 buf;
6084 serge 4106
	int ret = 0;
5060 serge 4107
 
6084 serge 4108
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4109
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4110
		ret = -EIO;
4111
		goto out;
4112
	}
4113
 
4114
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4115
			       buf & ~DP_TEST_SINK_START) < 0) {
4116
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4117
		ret = -EIO;
4118
		goto out;
4119
	}
4120
 
4121
	intel_dp->sink_crc.started = false;
4122
 out:
4123
	hsw_enable_ips(intel_crtc);
4124
	return ret;
4125
}
4126
 
4127
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4128
{
4129
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4130
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4131
	u8 buf;
4132
	int ret;
4133
 
4134
	if (intel_dp->sink_crc.started) {
4135
		ret = intel_dp_sink_crc_stop(intel_dp);
4136
		if (ret)
4137
			return ret;
4138
	}
4139
 
5354 serge 4140
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4141
		return -EIO;
5060 serge 4142
 
5354 serge 4143
	if (!(buf & DP_TEST_CRC_SUPPORTED))
5060 serge 4144
		return -ENOTTY;
4145
 
6084 serge 4146
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4147
 
5354 serge 4148
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4149
		return -EIO;
4150
 
6084 serge 4151
	hsw_disable_ips(intel_crtc);
4152
 
5060 serge 4153
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
6084 serge 4154
			       buf | DP_TEST_SINK_START) < 0) {
4155
		hsw_enable_ips(intel_crtc);
5354 serge 4156
		return -EIO;
6084 serge 4157
	}
5060 serge 4158
 
6084 serge 4159
	intel_dp->sink_crc.started = true;
4160
	return 0;
4161
}
5354 serge 4162
 
6084 serge 4163
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4164
{
4165
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4166
	struct drm_device *dev = dig_port->base.base.dev;
4167
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4168
	u8 buf;
4169
	int count, ret;
4170
	int attempts = 6;
4171
	bool old_equal_new;
4172
 
4173
	ret = intel_dp_sink_crc_start(intel_dp);
4174
	if (ret)
4175
		return ret;
4176
 
5354 serge 4177
	do {
6084 serge 4178
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4179
 
5354 serge 4180
		if (drm_dp_dpcd_readb(&intel_dp->aux,
6084 serge 4181
				      DP_TEST_SINK_MISC, &buf) < 0) {
4182
			ret = -EIO;
4183
			goto stop;
4184
		}
4185
		count = buf & DP_TEST_COUNT_MASK;
5060 serge 4186
 
6084 serge 4187
		/*
4188
		 * Count might be reset during the loop. In this case
4189
		 * last known count needs to be reset as well.
4190
		 */
4191
		if (count == 0)
4192
			intel_dp->sink_crc.last_count = 0;
4193
 
4194
		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4195
			ret = -EIO;
4196
			goto stop;
4197
		}
4198
 
4199
		old_equal_new = (count == intel_dp->sink_crc.last_count &&
4200
				 !memcmp(intel_dp->sink_crc.last_crc, crc,
4201
					 6 * sizeof(u8)));
4202
 
4203
	} while (--attempts && (count == 0 || old_equal_new));
4204
 
4205
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4206
	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4207
 
5354 serge 4208
	if (attempts == 0) {
6084 serge 4209
		if (old_equal_new) {
4210
			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4211
		} else {
4212
			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4213
			ret = -ETIMEDOUT;
4214
			goto stop;
4215
		}
5354 serge 4216
	}
4217
 
6084 serge 4218
stop:
4219
	intel_dp_sink_crc_stop(intel_dp);
4220
	return ret;
5060 serge 4221
}
4222
 
4223
static bool
2342 Serge 4224
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4225
{
5060 serge 4226
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 4227
				       DP_DEVICE_SERVICE_IRQ_VECTOR,
5060 serge 4228
				       sink_irq_vector, 1) == 1;
4229
}
4230
 
4231
static bool
4232
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4233
{
2342 Serge 4234
	int ret;
4235
 
5060 serge 4236
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4237
					     DP_SINK_COUNT_ESI,
4238
					     sink_irq_vector, 14);
4239
	if (ret != 14)
2342 Serge 4240
		return false;
4241
 
4242
	return true;
4243
}
4244
 
6084 serge 4245
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
2342 Serge 4246
{
6084 serge 4247
	uint8_t test_result = DP_TEST_ACK;
4248
	return test_result;
2342 Serge 4249
}
4250
 
6084 serge 4251
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4252
{
4253
	uint8_t test_result = DP_TEST_NAK;
4254
	return test_result;
4255
}
4256
 
4257
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4258
{
4259
	uint8_t test_result = DP_TEST_NAK;
4260
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4261
	struct drm_connector *connector = &intel_connector->base;
4262
 
4263
	if (intel_connector->detect_edid == NULL ||
4264
	    connector->edid_corrupt ||
4265
	    intel_dp->aux.i2c_defer_count > 6) {
4266
		/* Check EDID read for NACKs, DEFERs and corruption
4267
		 * (DP CTS 1.2 Core r1.1)
4268
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4269
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4270
		 *    4.2.2.6 : EDID corruption detected
4271
		 * Use failsafe mode for all cases
4272
		 */
4273
		if (intel_dp->aux.i2c_nack_count > 0 ||
4274
			intel_dp->aux.i2c_defer_count > 0)
4275
			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4276
				      intel_dp->aux.i2c_nack_count,
4277
				      intel_dp->aux.i2c_defer_count);
4278
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4279
	} else {
4280
		struct edid *block = intel_connector->detect_edid;
4281
 
4282
		/* We have to write the checksum
4283
		 * of the last block read
4284
		 */
4285
		block += intel_connector->detect_edid->extensions;
4286
 
4287
		if (!drm_dp_dpcd_write(&intel_dp->aux,
4288
					DP_TEST_EDID_CHECKSUM,
4289
					&block->checksum,
4290
					1))
4291
			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4292
 
4293
		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4294
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4295
	}
4296
 
4297
	/* Set test active flag here so userspace doesn't interrupt things */
4298
	intel_dp->compliance_test_active = 1;
4299
 
4300
	return test_result;
4301
}
4302
 
4303
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4304
{
4305
	uint8_t test_result = DP_TEST_NAK;
4306
	return test_result;
4307
}
4308
 
4309
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4310
{
4311
	uint8_t response = DP_TEST_NAK;
4312
	uint8_t rxdata = 0;
4313
	int status = 0;
4314
 
4315
	intel_dp->compliance_test_active = 0;
4316
	intel_dp->compliance_test_type = 0;
4317
	intel_dp->compliance_test_data = 0;
4318
 
4319
	intel_dp->aux.i2c_nack_count = 0;
4320
	intel_dp->aux.i2c_defer_count = 0;
4321
 
4322
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4323
	if (status <= 0) {
4324
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4325
		goto update_status;
4326
	}
4327
 
4328
	switch (rxdata) {
4329
	case DP_TEST_LINK_TRAINING:
4330
		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4331
		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4332
		response = intel_dp_autotest_link_training(intel_dp);
4333
		break;
4334
	case DP_TEST_LINK_VIDEO_PATTERN:
4335
		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4336
		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4337
		response = intel_dp_autotest_video_pattern(intel_dp);
4338
		break;
4339
	case DP_TEST_LINK_EDID_READ:
4340
		DRM_DEBUG_KMS("EDID test requested\n");
4341
		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4342
		response = intel_dp_autotest_edid(intel_dp);
4343
		break;
4344
	case DP_TEST_LINK_PHY_TEST_PATTERN:
4345
		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4346
		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4347
		response = intel_dp_autotest_phy_pattern(intel_dp);
4348
		break;
4349
	default:
4350
		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4351
		break;
4352
	}
4353
 
4354
update_status:
4355
	status = drm_dp_dpcd_write(&intel_dp->aux,
4356
				   DP_TEST_RESPONSE,
4357
				   &response, 1);
4358
	if (status <= 0)
4359
		DRM_DEBUG_KMS("Could not write test response to sink\n");
4360
}
4361
 
5060 serge 4362
static int
4363
intel_dp_check_mst_status(struct intel_dp *intel_dp)
4364
{
4365
	bool bret;
4366
 
4367
	if (intel_dp->is_mst) {
4368
		u8 esi[16] = { 0 };
4369
		int ret = 0;
4370
		int retry;
4371
		bool handled;
4372
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4373
go_again:
4374
		if (bret == true) {
4375
 
4376
			/* check link status - esi[10] = 0x200c */
6084 serge 4377
			if (intel_dp->active_mst_links &&
4378
			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5060 serge 4379
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4380
				intel_dp_start_link_train(intel_dp);
4381
				intel_dp_stop_link_train(intel_dp);
4382
			}
4383
 
6084 serge 4384
			DRM_DEBUG_KMS("got esi %3ph\n", esi);
5060 serge 4385
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4386
 
4387
			if (handled) {
4388
				for (retry = 0; retry < 3; retry++) {
4389
					int wret;
4390
					wret = drm_dp_dpcd_write(&intel_dp->aux,
4391
								 DP_SINK_COUNT_ESI+1,
4392
								 &esi[1], 3);
4393
					if (wret == 3) {
4394
						break;
4395
					}
4396
				}
4397
 
4398
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4399
				if (bret == true) {
6084 serge 4400
					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
5060 serge 4401
					goto go_again;
4402
				}
4403
			} else
4404
				ret = 0;
4405
 
4406
			return ret;
4407
		} else {
4408
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4409
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4410
			intel_dp->is_mst = false;
4411
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4412
			/* send a hotplug event */
4413
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4414
		}
4415
	}
4416
	return -EINVAL;
4417
}
4418
 
2330 Serge 4419
/*
4420
 * According to DP spec
4421
 * 5.1.2:
4422
 *  1. Read DPCD
4423
 *  2. Configure link according to Receiver Capabilities
4424
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4425
 *  4. Check link status on receipt of hot-plug interrupt
4426
 */
6084 serge 4427
static void
2330 Serge 4428
intel_dp_check_link_status(struct intel_dp *intel_dp)
4429
{
5060 serge 4430
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3243 Serge 4431
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2342 Serge 4432
	u8 sink_irq_vector;
4433
	u8 link_status[DP_LINK_STATUS_SIZE];
4434
 
5060 serge 4435
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4436
 
6084 serge 4437
	if (!intel_encoder->base.crtc)
2330 Serge 4438
		return;
4439
 
5060 serge 4440
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4441
		return;
4442
 
2330 Serge 4443
	/* Try to read receiver status if the link appears to be up */
2342 Serge 4444
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 4445
		return;
4446
	}
4447
 
4448
	/* Now read the DPCD to see if it's actually running */
4449
	if (!intel_dp_get_dpcd(intel_dp)) {
4450
		return;
4451
	}
4452
 
2342 Serge 4453
	/* Try to read the source of the interrupt */
4454
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4455
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4456
		/* Clear interrupt source */
5060 serge 4457
		drm_dp_dpcd_writeb(&intel_dp->aux,
6084 serge 4458
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4459
				   sink_irq_vector);
2342 Serge 4460
 
4461
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
6084 serge 4462
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
2342 Serge 4463
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4464
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4465
	}
4466
 
3243 Serge 4467
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2330 Serge 4468
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
5060 serge 4469
			      intel_encoder->base.name);
2330 Serge 4470
		intel_dp_start_link_train(intel_dp);
3746 Serge 4471
		intel_dp_stop_link_train(intel_dp);
2330 Serge 4472
	}
4473
}
4474
 
3031 serge 4475
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 4476
static enum drm_connector_status
4477
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4478
{
3031 serge 4479
	uint8_t *dpcd = intel_dp->dpcd;
4480
	uint8_t type;
4481
 
4482
	if (!intel_dp_get_dpcd(intel_dp))
4483
		return connector_status_disconnected;
4484
 
4485
	/* if there's no downstream port, we're done */
4486
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 4487
		return connector_status_connected;
3031 serge 4488
 
4489
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4560 Serge 4490
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4491
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3031 serge 4492
		uint8_t reg;
5060 serge 4493
 
4494
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4495
					    ®, 1) < 0)
3031 serge 4496
			return connector_status_unknown;
5060 serge 4497
 
3031 serge 4498
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4499
					      : connector_status_disconnected;
4500
	}
4501
 
4502
	/* If no HPD, poke DDC gently */
5060 serge 4503
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3031 serge 4504
		return connector_status_connected;
4505
 
4506
	/* Well we tried, say unknown for unreliable port types */
4560 Serge 4507
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6084 serge 4508
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4560 Serge 4509
		if (type == DP_DS_PORT_TYPE_VGA ||
4510
		    type == DP_DS_PORT_TYPE_NON_EDID)
6084 serge 4511
			return connector_status_unknown;
4560 Serge 4512
	} else {
4513
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4514
			DP_DWN_STRM_PORT_TYPE_MASK;
4515
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4516
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5060 serge 4517
			return connector_status_unknown;
4560 Serge 4518
	}
3031 serge 4519
 
4520
	/* Anything else is out of spec, warn and ignore */
4521
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 4522
	return connector_status_disconnected;
4523
}
4524
 
4525
static enum drm_connector_status
5354 serge 4526
edp_detect(struct intel_dp *intel_dp)
2330 Serge 4527
{
3243 Serge 4528
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 4529
	enum drm_connector_status status;
4530
 
6084 serge 4531
	status = intel_panel_detect(dev);
4532
	if (status == connector_status_unknown)
4533
		status = connector_status_connected;
5354 serge 4534
 
6084 serge 4535
	return status;
5354 serge 4536
}
2330 Serge 4537
 
6084 serge 4538
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4539
				       struct intel_digital_port *port)
5354 serge 4540
{
6084 serge 4541
	u32 bit;
5354 serge 4542
 
6084 serge 4543
	switch (port->port) {
4544
	case PORT_A:
4545
		return true;
4546
	case PORT_B:
4547
		bit = SDE_PORTB_HOTPLUG;
4548
		break;
4549
	case PORT_C:
4550
		bit = SDE_PORTC_HOTPLUG;
4551
		break;
4552
	case PORT_D:
4553
		bit = SDE_PORTD_HOTPLUG;
4554
		break;
4555
	default:
4556
		MISSING_CASE(port->port);
4557
		return false;
4558
	}
3480 Serge 4559
 
6084 serge 4560
	return I915_READ(SDEISR) & bit;
2330 Serge 4561
}
4562
 
6084 serge 4563
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4564
				       struct intel_digital_port *port)
2330 Serge 4565
{
6084 serge 4566
	u32 bit;
2330 Serge 4567
 
6084 serge 4568
	switch (port->port) {
4569
	case PORT_A:
4570
		return true;
3480 Serge 4571
	case PORT_B:
6084 serge 4572
		bit = SDE_PORTB_HOTPLUG_CPT;
2330 Serge 4573
		break;
3480 Serge 4574
	case PORT_C:
6084 serge 4575
		bit = SDE_PORTC_HOTPLUG_CPT;
2330 Serge 4576
		break;
3480 Serge 4577
	case PORT_D:
6084 serge 4578
		bit = SDE_PORTD_HOTPLUG_CPT;
2330 Serge 4579
		break;
6084 serge 4580
	case PORT_E:
4581
		bit = SDE_PORTE_HOTPLUG_SPT;
4582
		break;
2330 Serge 4583
	default:
6084 serge 4584
		MISSING_CASE(port->port);
4585
		return false;
2330 Serge 4586
	}
6084 serge 4587
 
4588
	return I915_READ(SDEISR) & bit;
4589
}
4590
 
4591
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4592
				       struct intel_digital_port *port)
4593
{
4594
	u32 bit;
4595
 
4596
	switch (port->port) {
4597
	case PORT_B:
4598
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4599
		break;
4600
	case PORT_C:
4601
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4602
		break;
4603
	case PORT_D:
4604
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4605
		break;
4606
	default:
4607
		MISSING_CASE(port->port);
4608
		return false;
4560 Serge 4609
	}
2330 Serge 4610
 
6084 serge 4611
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5097 serge 4612
}
4613
 
6084 serge 4614
static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4615
				       struct intel_digital_port *port)
4616
{
4617
	u32 bit;
4618
 
4619
	switch (port->port) {
4620
	case PORT_B:
4621
		bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4622
		break;
4623
	case PORT_C:
4624
		bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4625
		break;
4626
	case PORT_D:
4627
		bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4628
		break;
4629
	default:
4630
		MISSING_CASE(port->port);
4631
		return false;
4632
	}
4633
 
4634
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4635
}
4636
 
4637
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4638
				       struct intel_digital_port *intel_dig_port)
4639
{
4640
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4641
	enum port port;
4642
    u32 bit = 0;
4643
 
4644
 
4645
	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4646
}
4647
 
4648
/*
4649
 * intel_digital_port_connected - is the specified port connected?
4650
 * @dev_priv: i915 private structure
4651
 * @port: the port to test
4652
 *
4653
 * Return %true if @port is connected, %false otherwise.
4654
 */
4655
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4656
					 struct intel_digital_port *port)
4657
{
4658
	if (HAS_PCH_IBX(dev_priv))
4659
		return ibx_digital_port_connected(dev_priv, port);
4660
	if (HAS_PCH_SPLIT(dev_priv))
4661
		return cpt_digital_port_connected(dev_priv, port);
4662
	else if (IS_BROXTON(dev_priv))
4663
		return bxt_digital_port_connected(dev_priv, port);
4664
	else if (IS_VALLEYVIEW(dev_priv))
4665
		return vlv_digital_port_connected(dev_priv, port);
4666
	else
4667
		return g4x_digital_port_connected(dev_priv, port);
4668
}
4669
 
5097 serge 4670
static enum drm_connector_status
6084 serge 4671
ironlake_dp_detect(struct intel_dp *intel_dp)
4672
{
4673
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4674
	struct drm_i915_private *dev_priv = dev->dev_private;
4675
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4676
 
4677
	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4678
		return connector_status_disconnected;
4679
 
4680
	return intel_dp_detect_dpcd(intel_dp);
4681
}
4682
 
4683
static enum drm_connector_status
5097 serge 4684
g4x_dp_detect(struct intel_dp *intel_dp)
4685
{
4686
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4687
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4688
 
4689
	/* Can't disconnect eDP, but you can close the lid... */
4690
	if (is_edp(intel_dp)) {
4691
		enum drm_connector_status status;
4692
 
4693
		status = intel_panel_detect(dev);
4694
		if (status == connector_status_unknown)
4695
			status = connector_status_connected;
4696
		return status;
4697
	}
4698
 
6084 serge 4699
	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
2330 Serge 4700
		return connector_status_disconnected;
4701
 
4702
	return intel_dp_detect_dpcd(intel_dp);
4703
}
4704
 
2342 Serge 4705
static struct edid *
5354 serge 4706
intel_dp_get_edid(struct intel_dp *intel_dp)
2342 Serge 4707
{
5354 serge 4708
	struct intel_connector *intel_connector = intel_dp->attached_connector;
3243 Serge 4709
 
4710
	/* use cached edid if we have one */
4711
	if (intel_connector->edid) {
4712
		/* invalid edid */
4713
		if (IS_ERR(intel_connector->edid))
3031 serge 4714
			return NULL;
4715
 
4560 Serge 4716
		return drm_edid_duplicate(intel_connector->edid);
5354 serge 4717
	} else
4718
		return drm_get_edid(&intel_connector->base,
4719
				    &intel_dp->aux.ddc);
4720
}
3031 serge 4721
 
5354 serge 4722
static void
4723
intel_dp_set_edid(struct intel_dp *intel_dp)
4724
{
4725
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4726
	struct edid *edid;
4727
 
4728
	edid = intel_dp_get_edid(intel_dp);
4729
	intel_connector->detect_edid = edid;
4730
 
4731
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4732
		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4733
	else
4734
		intel_dp->has_audio = drm_detect_monitor_audio(edid);
2342 Serge 4735
}
4736
 
5354 serge 4737
static void
4738
intel_dp_unset_edid(struct intel_dp *intel_dp)
2342 Serge 4739
{
5354 serge 4740
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2342 Serge 4741
 
5354 serge 4742
	kfree(intel_connector->detect_edid);
4743
	intel_connector->detect_edid = NULL;
3243 Serge 4744
 
5354 serge 4745
	intel_dp->has_audio = false;
4746
}
3031 serge 4747
 
2330 Serge 4748
static enum drm_connector_status
4749
intel_dp_detect(struct drm_connector *connector, bool force)
4750
{
4751
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 4752
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4753
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4754
	struct drm_device *dev = connector->dev;
2330 Serge 4755
	enum drm_connector_status status;
5060 serge 4756
	enum intel_display_power_domain power_domain;
4757
	bool ret;
6084 serge 4758
	u8 sink_irq_vector;
2330 Serge 4759
 
4104 Serge 4760
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5060 serge 4761
		      connector->base.id, connector->name);
5354 serge 4762
	intel_dp_unset_edid(intel_dp);
4104 Serge 4763
 
5060 serge 4764
	if (intel_dp->is_mst) {
4765
		/* MST devices are disconnected from a monitor POV */
4766
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4767
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5354 serge 4768
		return connector_status_disconnected;
5060 serge 4769
	}
4770
 
6084 serge 4771
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4772
	intel_display_power_get(to_i915(dev), power_domain);
2330 Serge 4773
 
5354 serge 4774
	/* Can't disconnect eDP, but you can close the lid... */
4775
	if (is_edp(intel_dp))
4776
		status = edp_detect(intel_dp);
4777
	else if (HAS_PCH_SPLIT(dev))
2330 Serge 4778
		status = ironlake_dp_detect(intel_dp);
4779
	else
4780
		status = g4x_dp_detect(intel_dp);
4781
	if (status != connector_status_connected)
4560 Serge 4782
		goto out;
3031 serge 4783
 
4784
	intel_dp_probe_oui(intel_dp);
4785
 
5060 serge 4786
	ret = intel_dp_probe_mst(intel_dp);
4787
	if (ret) {
4788
		/* if we are in MST mode then this connector
4789
		   won't appear connected or have anything with EDID on it */
4790
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4791
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4792
		status = connector_status_disconnected;
4793
		goto out;
4794
	}
4795
 
5354 serge 4796
	intel_dp_set_edid(intel_dp);
3243 Serge 4797
 
4798
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4799
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4560 Serge 4800
	status = connector_status_connected;
4801
 
6084 serge 4802
	/* Try to read the source of the interrupt */
4803
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4804
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4805
		/* Clear interrupt source */
4806
		drm_dp_dpcd_writeb(&intel_dp->aux,
4807
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4808
				   sink_irq_vector);
4809
 
4810
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4811
			intel_dp_handle_test_request(intel_dp);
4812
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4813
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4814
	}
4815
 
4560 Serge 4816
out:
6084 serge 4817
	intel_display_power_put(to_i915(dev), power_domain);
4560 Serge 4818
	return status;
2330 Serge 4819
}
4820
 
5354 serge 4821
static void
4822
intel_dp_force(struct drm_connector *connector)
2330 Serge 4823
{
4824
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5354 serge 4825
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
6084 serge 4826
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5060 serge 4827
	enum intel_display_power_domain power_domain;
2330 Serge 4828
 
5354 serge 4829
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4830
		      connector->base.id, connector->name);
4831
	intel_dp_unset_edid(intel_dp);
2330 Serge 4832
 
5354 serge 4833
	if (connector->status != connector_status_connected)
4834
		return;
5060 serge 4835
 
6084 serge 4836
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4837
	intel_display_power_get(dev_priv, power_domain);
5354 serge 4838
 
4839
	intel_dp_set_edid(intel_dp);
4840
 
6084 serge 4841
	intel_display_power_put(dev_priv, power_domain);
5354 serge 4842
 
4843
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4844
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4845
}
4846
 
4847
static int intel_dp_get_modes(struct drm_connector *connector)
4848
{
4849
	struct intel_connector *intel_connector = to_intel_connector(connector);
4850
	struct edid *edid;
4851
 
4852
	edid = intel_connector->detect_edid;
4853
	if (edid) {
4854
		int ret = intel_connector_update_modes(connector, edid);
6084 serge 4855
		if (ret)
4856
			return ret;
5354 serge 4857
	}
2330 Serge 4858
 
3243 Serge 4859
	/* if eDP has no EDID, fall back to fixed mode */
5354 serge 4860
	if (is_edp(intel_attached_dp(connector)) &&
4861
	    intel_connector->panel.fixed_mode) {
6084 serge 4862
		struct drm_display_mode *mode;
5354 serge 4863
 
4864
		mode = drm_mode_duplicate(connector->dev,
3243 Serge 4865
					  intel_connector->panel.fixed_mode);
4866
		if (mode) {
2330 Serge 4867
			drm_mode_probed_add(connector, mode);
4868
			return 1;
4869
		}
4870
	}
5354 serge 4871
 
2330 Serge 4872
	return 0;
4873
}
4874
 
3243 Serge 4875
static bool
4876
intel_dp_detect_audio(struct drm_connector *connector)
4877
{
5354 serge 4878
	bool has_audio = false;
3243 Serge 4879
	struct edid *edid;
2330 Serge 4880
 
5354 serge 4881
	edid = to_intel_connector(connector)->detect_edid;
4882
	if (edid)
3243 Serge 4883
		has_audio = drm_detect_monitor_audio(edid);
2330 Serge 4884
 
3243 Serge 4885
	return has_audio;
4886
}
2330 Serge 4887
 
4888
static int
4889
intel_dp_set_property(struct drm_connector *connector,
4890
		      struct drm_property *property,
4891
		      uint64_t val)
4892
{
4893
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3243 Serge 4894
	struct intel_connector *intel_connector = to_intel_connector(connector);
4895
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4896
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 4897
	int ret;
4898
 
3243 Serge 4899
	ret = drm_object_property_set_value(&connector->base, property, val);
2330 Serge 4900
	if (ret)
4901
		return ret;
3480 Serge 4902
 
2330 Serge 4903
	if (property == dev_priv->force_audio_property) {
4904
		int i = val;
4905
		bool has_audio;
4906
 
4907
		if (i == intel_dp->force_audio)
4908
			return 0;
4909
 
4910
		intel_dp->force_audio = i;
4911
 
3031 serge 4912
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 4913
			has_audio = intel_dp_detect_audio(connector);
4914
		else
3031 serge 4915
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 4916
 
4917
		if (has_audio == intel_dp->has_audio)
4918
			return 0;
4919
 
4920
		intel_dp->has_audio = has_audio;
4921
		goto done;
4922
	}
4923
 
4924
	if (property == dev_priv->broadcast_rgb_property) {
3746 Serge 4925
		bool old_auto = intel_dp->color_range_auto;
6084 serge 4926
		bool old_range = intel_dp->limited_color_range;
3746 Serge 4927
 
3480 Serge 4928
		switch (val) {
4929
		case INTEL_BROADCAST_RGB_AUTO:
4930
			intel_dp->color_range_auto = true;
4931
			break;
4932
		case INTEL_BROADCAST_RGB_FULL:
4933
			intel_dp->color_range_auto = false;
6084 serge 4934
			intel_dp->limited_color_range = false;
3480 Serge 4935
			break;
4936
		case INTEL_BROADCAST_RGB_LIMITED:
4937
			intel_dp->color_range_auto = false;
6084 serge 4938
			intel_dp->limited_color_range = true;
3480 Serge 4939
			break;
4940
		default:
4941
			return -EINVAL;
4942
		}
3746 Serge 4943
 
4944
		if (old_auto == intel_dp->color_range_auto &&
6084 serge 4945
		    old_range == intel_dp->limited_color_range)
3746 Serge 4946
			return 0;
4947
 
6084 serge 4948
		goto done;
2330 Serge 4949
	}
4950
 
3243 Serge 4951
	if (is_edp(intel_dp) &&
4952
	    property == connector->dev->mode_config.scaling_mode_property) {
4953
		if (val == DRM_MODE_SCALE_NONE) {
4954
			DRM_DEBUG_KMS("no scaling not supported\n");
4955
			return -EINVAL;
4956
		}
4957
 
4958
		if (intel_connector->panel.fitting_mode == val) {
4959
			/* the eDP scaling property is not changed */
4960
			return 0;
4961
		}
4962
		intel_connector->panel.fitting_mode = val;
4963
 
4964
		goto done;
4965
	}
4966
 
2330 Serge 4967
	return -EINVAL;
4968
 
4969
done:
3480 Serge 4970
	if (intel_encoder->base.crtc)
4971
		intel_crtc_restore_mode(intel_encoder->base.crtc);
2330 Serge 4972
 
4973
	return 0;
4974
}
4975
 
4976
static void
4104 Serge 4977
intel_dp_connector_destroy(struct drm_connector *connector)
2330 Serge 4978
{
3243 Serge 4979
	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 Serge 4980
 
5354 serge 4981
	kfree(intel_connector->detect_edid);
4982
 
3243 Serge 4983
	if (!IS_ERR_OR_NULL(intel_connector->edid))
4984
		kfree(intel_connector->edid);
4985
 
4104 Serge 4986
	/* Can't call is_edp() since the encoder may have been destroyed
4987
	 * already. */
4988
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3243 Serge 4989
		intel_panel_fini(&intel_connector->panel);
2330 Serge 4990
 
4991
	drm_connector_cleanup(connector);
4992
	kfree(connector);
4993
}
4994
 
3243 Serge 4995
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4996
{
3243 Serge 4997
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4998
	struct intel_dp *intel_dp = &intel_dig_port->dp;
2330 Serge 4999
 
5060 serge 5000
	drm_dp_aux_unregister(&intel_dp->aux);
5001
	intel_dp_mst_encoder_cleanup(intel_dig_port);
2342 Serge 5002
	if (is_edp(intel_dp)) {
4293 Serge 5003
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 5004
		/*
5005
		 * vdd might still be enabled do to the delayed vdd off.
5006
		 * Make sure vdd is actually turned off here.
5007
		 */
5008
		pps_lock(intel_dp);
5060 serge 5009
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 5010
		pps_unlock(intel_dp);
5011
 
2342 Serge 5012
	}
6084 serge 5013
	drm_encoder_cleanup(encoder);
3243 Serge 5014
	kfree(intel_dig_port);
2330 Serge 5015
}
5016
 
5060 serge 5017
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5018
{
5019
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5020
 
5021
	if (!is_edp(intel_dp))
5022
		return;
5023
 
5354 serge 5024
	/*
5025
	 * vdd might still be enabled do to the delayed vdd off.
5026
	 * Make sure vdd is actually turned off here.
5027
	 */
5028
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5029
	pps_lock(intel_dp);
5060 serge 5030
	edp_panel_vdd_off_sync(intel_dp);
5354 serge 5031
	pps_unlock(intel_dp);
5060 serge 5032
}
5033
 
5354 serge 5034
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5035
{
5036
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5037
	struct drm_device *dev = intel_dig_port->base.base.dev;
5038
	struct drm_i915_private *dev_priv = dev->dev_private;
5039
	enum intel_display_power_domain power_domain;
5040
 
5041
	lockdep_assert_held(&dev_priv->pps_mutex);
5042
 
5043
	if (!edp_have_panel_vdd(intel_dp))
5044
		return;
5045
 
5046
	/*
5047
	 * The VDD bit needs a power domain reference, so if the bit is
5048
	 * already enabled when we boot or resume, grab this reference and
5049
	 * schedule a vdd off, so we don't hold on to the reference
5050
	 * indefinitely.
5051
	 */
5052
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6084 serge 5053
	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5354 serge 5054
	intel_display_power_get(dev_priv, power_domain);
5055
 
5056
	edp_panel_vdd_schedule_off(intel_dp);
5057
}
5058
 
5060 serge 5059
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5060
{
5354 serge 5061
	struct intel_dp *intel_dp;
5062
 
5063
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5064
		return;
5065
 
5066
	intel_dp = enc_to_intel_dp(encoder);
5067
 
5068
	pps_lock(intel_dp);
5069
 
5070
	/*
5071
	 * Read out the current power sequencer assignment,
5072
	 * in case the BIOS did something with it.
5073
	 */
5074
	if (IS_VALLEYVIEW(encoder->dev))
5075
		vlv_initial_power_sequencer_setup(intel_dp);
5076
 
5077
	intel_edp_panel_vdd_sanitize(intel_dp);
5078
 
5079
	pps_unlock(intel_dp);
5060 serge 5080
}
5081
 
2330 Serge 5082
static const struct drm_connector_funcs intel_dp_connector_funcs = {
6084 serge 5083
	.dpms = drm_atomic_helper_connector_dpms,
2330 Serge 5084
	.detect = intel_dp_detect,
5354 serge 5085
	.force = intel_dp_force,
2330 Serge 5086
	.fill_modes = drm_helper_probe_single_connector_modes,
5087
	.set_property = intel_dp_set_property,
6084 serge 5088
	.atomic_get_property = intel_connector_atomic_get_property,
4104 Serge 5089
	.destroy = intel_dp_connector_destroy,
6084 serge 5090
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5091
	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
2330 Serge 5092
};
5093
 
5094
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5095
	.get_modes = intel_dp_get_modes,
5096
	.mode_valid = intel_dp_mode_valid,
5097
	.best_encoder = intel_best_encoder,
5098
};
5099
 
5100
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5060 serge 5101
	.reset = intel_dp_encoder_reset,
2330 Serge 5102
	.destroy = intel_dp_encoder_destroy,
5103
};
5104
 
6084 serge 5105
enum irqreturn
5060 serge 5106
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5107
{
5108
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5109
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5110
	struct drm_device *dev = intel_dig_port->base.base.dev;
5111
	struct drm_i915_private *dev_priv = dev->dev_private;
5112
	enum intel_display_power_domain power_domain;
6084 serge 5113
	enum irqreturn ret = IRQ_NONE;
5060 serge 5114
 
6084 serge 5115
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5116
	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5060 serge 5117
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5118
 
5354 serge 5119
	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5120
		/*
5121
		 * vdd off can generate a long pulse on eDP which
5122
		 * would require vdd on to handle it, and thus we
5123
		 * would end up in an endless cycle of
5124
		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5125
		 */
5126
		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5127
			      port_name(intel_dig_port->port));
6084 serge 5128
		return IRQ_HANDLED;
5354 serge 5129
	}
5130
 
5131
	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5132
		      port_name(intel_dig_port->port),
5060 serge 5133
		      long_hpd ? "long" : "short");
5134
 
6084 serge 5135
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 5136
	intel_display_power_get(dev_priv, power_domain);
5137
 
5138
	if (long_hpd) {
6084 serge 5139
		/* indicate that we need to restart link training */
5140
		intel_dp->train_set_valid = false;
5097 serge 5141
 
6084 serge 5142
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5060 serge 5143
			goto mst_fail;
5144
 
5145
		if (!intel_dp_get_dpcd(intel_dp)) {
5146
			goto mst_fail;
5147
		}
5148
 
5149
		intel_dp_probe_oui(intel_dp);
5150
 
6084 serge 5151
		if (!intel_dp_probe_mst(intel_dp)) {
5152
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5153
			intel_dp_check_link_status(intel_dp);
5154
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5060 serge 5155
			goto mst_fail;
6084 serge 5156
		}
5060 serge 5157
	} else {
5158
		if (intel_dp->is_mst) {
5159
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5160
				goto mst_fail;
5161
		}
5162
 
5163
		if (!intel_dp->is_mst) {
5164
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5165
			intel_dp_check_link_status(intel_dp);
5166
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5167
		}
5168
	}
6084 serge 5169
 
5170
	ret = IRQ_HANDLED;
5171
 
5060 serge 5172
	goto put_power;
5173
mst_fail:
5174
	/* if we were in MST mode, and device is not there get out of MST mode */
5175
	if (intel_dp->is_mst) {
5176
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5177
		intel_dp->is_mst = false;
5178
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5179
	}
5180
put_power:
5181
	intel_display_power_put(dev_priv, power_domain);
5182
 
5183
	return ret;
2330 Serge 5184
}
5185
 
2327 Serge 5186
/* Return which DP Port should be selected for Transcoder DP control */
5187
int
2342 Serge 5188
intel_trans_dp_port_sel(struct drm_crtc *crtc)
2327 Serge 5189
{
5190
	struct drm_device *dev = crtc->dev;
3243 Serge 5191
	struct intel_encoder *intel_encoder;
5192
	struct intel_dp *intel_dp;
2327 Serge 5193
 
3243 Serge 5194
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5195
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2327 Serge 5196
 
3243 Serge 5197
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5198
		    intel_encoder->type == INTEL_OUTPUT_EDP)
2327 Serge 5199
			return intel_dp->output_reg;
5200
	}
5201
 
5202
	return -1;
5203
}
2330 Serge 5204
 
6084 serge 5205
/* check the VBT to see whether the eDP is on another port */
4560 Serge 5206
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
2330 Serge 5207
{
5208
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 5209
	union child_device_config *p_child;
2330 Serge 5210
	int i;
4560 Serge 5211
	static const short port_mapping[] = {
6084 serge 5212
		[PORT_B] = DVO_PORT_DPB,
5213
		[PORT_C] = DVO_PORT_DPC,
5214
		[PORT_D] = DVO_PORT_DPD,
5215
		[PORT_E] = DVO_PORT_DPE,
4560 Serge 5216
	};
2330 Serge 5217
 
6084 serge 5218
	/*
5219
	 * eDP not supported on g4x. so bail out early just
5220
	 * for a bit extra safety in case the VBT is bonkers.
5221
	 */
5222
	if (INTEL_INFO(dev)->gen < 5)
5223
		return false;
5224
 
4560 Serge 5225
	if (port == PORT_A)
5226
		return true;
5227
 
4104 Serge 5228
	if (!dev_priv->vbt.child_dev_num)
2330 Serge 5229
		return false;
5230
 
4104 Serge 5231
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5232
		p_child = dev_priv->vbt.child_dev + i;
2330 Serge 5233
 
4560 Serge 5234
		if (p_child->common.dvo_port == port_mapping[port] &&
5235
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5236
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
2330 Serge 5237
			return true;
5238
	}
5239
	return false;
5240
}
5241
 
5060 serge 5242
void
2330 Serge 5243
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5244
{
3243 Serge 5245
	struct intel_connector *intel_connector = to_intel_connector(connector);
5246
 
2330 Serge 5247
	intel_attach_force_audio_property(connector);
5248
	intel_attach_broadcast_rgb_property(connector);
3480 Serge 5249
	intel_dp->color_range_auto = true;
3243 Serge 5250
 
5251
	if (is_edp(intel_dp)) {
5252
		drm_mode_create_scaling_mode_property(connector->dev);
5253
		drm_object_attach_property(
5254
			&connector->base,
5255
			connector->dev->mode_config.scaling_mode_property,
5256
			DRM_MODE_SCALE_ASPECT);
5257
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5258
	}
2330 Serge 5259
}
5260
 
5060 serge 5261
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5262
{
5263
	intel_dp->last_power_cycle = jiffies;
5264
	intel_dp->last_power_on = jiffies;
5265
	intel_dp->last_backlight_off = jiffies;
5266
}
5267
 
3243 Serge 5268
static void
5269
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 5270
				    struct intel_dp *intel_dp)
3243 Serge 5271
{
5272
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 5273
	struct edp_power_seq cur, vbt, spec,
5274
		*final = &intel_dp->pps_delays;
6084 serge 5275
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5276
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
3243 Serge 5277
 
5354 serge 5278
	lockdep_assert_held(&dev_priv->pps_mutex);
5279
 
5280
	/* already initialized? */
5281
	if (final->t11_t12 != 0)
5282
		return;
5283
 
6084 serge 5284
	if (IS_BROXTON(dev)) {
5285
		/*
5286
		 * TODO: BXT has 2 sets of PPS registers.
5287
		 * Correct Register for Broxton need to be identified
5288
		 * using VBT. hardcoding for now
5289
		 */
5290
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5291
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5292
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5293
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 5294
		pp_ctrl_reg = PCH_PP_CONTROL;
3746 Serge 5295
		pp_on_reg = PCH_PP_ON_DELAYS;
5296
		pp_off_reg = PCH_PP_OFF_DELAYS;
5297
		pp_div_reg = PCH_PP_DIVISOR;
5298
	} else {
4560 Serge 5299
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5300
 
5301
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5302
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5303
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5304
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5305
	}
5306
 
3243 Serge 5307
	/* Workaround: Need to write PP_CONTROL with the unlock key as
5308
	 * the very first thing. */
6084 serge 5309
	pp_ctl = ironlake_get_pp_control(intel_dp);
3243 Serge 5310
 
3746 Serge 5311
	pp_on = I915_READ(pp_on_reg);
5312
	pp_off = I915_READ(pp_off_reg);
6084 serge 5313
	if (!IS_BROXTON(dev)) {
5314
		I915_WRITE(pp_ctrl_reg, pp_ctl);
5315
		pp_div = I915_READ(pp_div_reg);
5316
	}
3243 Serge 5317
 
5318
	/* Pull timing values out of registers */
5319
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5320
		PANEL_POWER_UP_DELAY_SHIFT;
5321
 
5322
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5323
		PANEL_LIGHT_ON_DELAY_SHIFT;
5324
 
5325
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5326
		PANEL_LIGHT_OFF_DELAY_SHIFT;
5327
 
5328
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5329
		PANEL_POWER_DOWN_DELAY_SHIFT;
5330
 
6084 serge 5331
	if (IS_BROXTON(dev)) {
5332
		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5333
			BXT_POWER_CYCLE_DELAY_SHIFT;
5334
		if (tmp > 0)
5335
			cur.t11_t12 = (tmp - 1) * 1000;
5336
		else
5337
			cur.t11_t12 = 0;
5338
	} else {
5339
		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3243 Serge 5340
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
6084 serge 5341
	}
3243 Serge 5342
 
5343
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5344
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5345
 
4104 Serge 5346
	vbt = dev_priv->vbt.edp_pps;
3243 Serge 5347
 
5348
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5349
	 * our hw here, which are all in 100usec. */
5350
	spec.t1_t3 = 210 * 10;
5351
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5352
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5353
	spec.t10 = 500 * 10;
5354
	/* This one is special and actually in units of 100ms, but zero
5355
	 * based in the hw (so we need to add 100 ms). But the sw vbt
5356
	 * table multiplies it with 1000 to make it in units of 100usec,
5357
	 * too. */
5358
	spec.t11_t12 = (510 + 100) * 10;
5359
 
5360
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5361
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5362
 
5363
	/* Use the max of the register settings and vbt. If both are
5364
	 * unset, fall back to the spec limits. */
5354 serge 5365
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
3243 Serge 5366
				       spec.field : \
5367
				       max(cur.field, vbt.field))
5368
	assign_final(t1_t3);
5369
	assign_final(t8);
5370
	assign_final(t9);
5371
	assign_final(t10);
5372
	assign_final(t11_t12);
5373
#undef assign_final
5374
 
5354 serge 5375
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
3243 Serge 5376
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5377
	intel_dp->backlight_on_delay = get_delay(t8);
5378
	intel_dp->backlight_off_delay = get_delay(t9);
5379
	intel_dp->panel_power_down_delay = get_delay(t10);
5380
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5381
#undef get_delay
5382
 
5383
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5384
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5385
		      intel_dp->panel_power_cycle_delay);
5386
 
5387
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5388
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5389
}
5390
 
5391
static void
5392
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 5393
					      struct intel_dp *intel_dp)
3243 Serge 5394
{
5395
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5396
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5397
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
6084 serge 5398
	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5354 serge 5399
	enum port port = dp_to_dig_port(intel_dp)->port;
5400
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
3243 Serge 5401
 
5354 serge 5402
	lockdep_assert_held(&dev_priv->pps_mutex);
5403
 
6084 serge 5404
	if (IS_BROXTON(dev)) {
5405
		/*
5406
		 * TODO: BXT has 2 sets of PPS registers.
5407
		 * Correct Register for Broxton need to be identified
5408
		 * using VBT. hardcoding for now
5409
		 */
5410
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5411
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5412
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5413
 
5414
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 5415
		pp_on_reg = PCH_PP_ON_DELAYS;
5416
		pp_off_reg = PCH_PP_OFF_DELAYS;
5417
		pp_div_reg = PCH_PP_DIVISOR;
5418
	} else {
4560 Serge 5419
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5420
 
5421
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5422
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5423
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5424
	}
5425
 
5060 serge 5426
	/*
5427
	 * And finally store the new values in the power sequencer. The
5428
	 * backlight delays are set to 1 because we do manual waits on them. For
5429
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5430
	 * we'll end up waiting for the backlight off delay twice: once when we
5431
	 * do the manual sleep, and once when we disable the panel and wait for
5432
	 * the PP_STATUS bit to become zero.
5433
	 */
3243 Serge 5434
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5060 serge 5435
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5436
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3243 Serge 5437
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5438
	/* Compute the divisor for the pp clock, simply match the Bspec
5439
	 * formula. */
6084 serge 5440
	if (IS_BROXTON(dev)) {
5441
		pp_div = I915_READ(pp_ctrl_reg);
5442
		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5443
		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5444
				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5445
	} else {
5446
		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5447
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5448
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5449
	}
3243 Serge 5450
 
5451
	/* Haswell doesn't have any port selection bits for the panel
5452
	 * power sequencer any more. */
4104 Serge 5453
	if (IS_VALLEYVIEW(dev)) {
5354 serge 5454
		port_sel = PANEL_PORT_SELECT_VLV(port);
4104 Serge 5455
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5354 serge 5456
		if (port == PORT_A)
4560 Serge 5457
			port_sel = PANEL_PORT_SELECT_DPA;
3243 Serge 5458
		else
4560 Serge 5459
			port_sel = PANEL_PORT_SELECT_DPD;
3243 Serge 5460
	}
5461
 
3746 Serge 5462
	pp_on |= port_sel;
3243 Serge 5463
 
3746 Serge 5464
	I915_WRITE(pp_on_reg, pp_on);
5465
	I915_WRITE(pp_off_reg, pp_off);
6084 serge 5466
	if (IS_BROXTON(dev))
5467
		I915_WRITE(pp_ctrl_reg, pp_div);
5468
	else
5469
		I915_WRITE(pp_div_reg, pp_div);
3746 Serge 5470
 
3243 Serge 5471
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3746 Serge 5472
		      I915_READ(pp_on_reg),
5473
		      I915_READ(pp_off_reg),
6084 serge 5474
		      IS_BROXTON(dev) ?
5475
		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
3746 Serge 5476
		      I915_READ(pp_div_reg));
3243 Serge 5477
}
5478
 
6084 serge 5479
/**
5480
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5481
 * @dev: DRM device
5482
 * @refresh_rate: RR to be programmed
5483
 *
5484
 * This function gets called when refresh rate (RR) has to be changed from
5485
 * one frequency to another. Switches can be between high and low RR
5486
 * supported by the panel or to any other RR based on media playback (in
5487
 * this case, RR value needs to be passed from user space).
5488
 *
5489
 * The caller of this function needs to take a lock on dev_priv->drrs.
5490
 */
5491
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5060 serge 5492
{
5493
	struct drm_i915_private *dev_priv = dev->dev_private;
5494
	struct intel_encoder *encoder;
6084 serge 5495
	struct intel_digital_port *dig_port = NULL;
5496
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5497
	struct intel_crtc_state *config = NULL;
5060 serge 5498
	struct intel_crtc *intel_crtc = NULL;
6084 serge 5499
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5060 serge 5500
 
5501
	if (refresh_rate <= 0) {
5502
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5503
		return;
5504
	}
5505
 
6084 serge 5506
	if (intel_dp == NULL) {
5507
		DRM_DEBUG_KMS("DRRS not supported.\n");
5060 serge 5508
		return;
5509
	}
5510
 
5511
	/*
6084 serge 5512
	 * FIXME: This needs proper synchronization with psr state for some
5513
	 * platforms that cannot have PSR and DRRS enabled at the same time.
5060 serge 5514
	 */
5515
 
6084 serge 5516
	dig_port = dp_to_dig_port(intel_dp);
5517
	encoder = &dig_port->base;
5518
	intel_crtc = to_intel_crtc(encoder->base.crtc);
5060 serge 5519
 
5520
	if (!intel_crtc) {
5521
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5522
		return;
5523
	}
5524
 
6084 serge 5525
	config = intel_crtc->config;
5060 serge 5526
 
6084 serge 5527
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5060 serge 5528
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5529
		return;
5530
	}
5531
 
6084 serge 5532
	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5533
			refresh_rate)
5060 serge 5534
		index = DRRS_LOW_RR;
5535
 
6084 serge 5536
	if (index == dev_priv->drrs.refresh_rate_type) {
5060 serge 5537
		DRM_DEBUG_KMS(
5538
			"DRRS requested for previously set RR...ignoring\n");
5539
		return;
5540
	}
5541
 
5542
	if (!intel_crtc->active) {
5543
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5544
		return;
5545
	}
5546
 
6084 serge 5547
	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5548
		switch (index) {
5549
		case DRRS_HIGH_RR:
5550
			intel_dp_set_m_n(intel_crtc, M1_N1);
5551
			break;
5552
		case DRRS_LOW_RR:
5553
			intel_dp_set_m_n(intel_crtc, M2_N2);
5554
			break;
5555
		case DRRS_MAX_RR:
5556
		default:
5557
			DRM_ERROR("Unsupported refreshrate type\n");
5558
		}
5559
	} else if (INTEL_INFO(dev)->gen > 6) {
5560
		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5561
		u32 val;
5562
 
5060 serge 5563
		val = I915_READ(reg);
5564
		if (index > DRRS_HIGH_RR) {
6084 serge 5565
			if (IS_VALLEYVIEW(dev))
5566
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5567
			else
5568
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5569
		} else {
6084 serge 5570
			if (IS_VALLEYVIEW(dev))
5571
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5572
			else
5573
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5574
		}
5575
		I915_WRITE(reg, val);
5576
	}
5577
 
6084 serge 5578
	dev_priv->drrs.refresh_rate_type = index;
5579
 
5580
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5581
}
5582
 
5583
/**
5584
 * intel_edp_drrs_enable - init drrs struct if supported
5585
 * @intel_dp: DP struct
5586
 *
5587
 * Initializes frontbuffer_bits and drrs.dp
5588
 */
5589
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5590
{
5591
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5592
	struct drm_i915_private *dev_priv = dev->dev_private;
5593
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5594
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5595
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5596
 
5597
	if (!intel_crtc->config->has_drrs) {
5598
		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5599
		return;
5600
	}
5601
 
5602
	mutex_lock(&dev_priv->drrs.mutex);
5603
	if (WARN_ON(dev_priv->drrs.dp)) {
5604
		DRM_ERROR("DRRS already enabled\n");
5605
		goto unlock;
5606
	}
5607
 
5608
	dev_priv->drrs.busy_frontbuffer_bits = 0;
5609
 
5610
	dev_priv->drrs.dp = intel_dp;
5611
 
5612
unlock:
5613
	mutex_unlock(&dev_priv->drrs.mutex);
5614
}
5615
 
5616
/**
5617
 * intel_edp_drrs_disable - Disable DRRS
5618
 * @intel_dp: DP struct
5619
 *
5620
 */
5621
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5622
{
5623
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5624
	struct drm_i915_private *dev_priv = dev->dev_private;
5625
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5626
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5627
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5628
 
5629
	if (!intel_crtc->config->has_drrs)
5630
		return;
5631
 
5632
	mutex_lock(&dev_priv->drrs.mutex);
5633
	if (!dev_priv->drrs.dp) {
5634
		mutex_unlock(&dev_priv->drrs.mutex);
5635
		return;
5636
	}
5637
 
5638
	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5639
		intel_dp_set_drrs_state(dev_priv->dev,
5640
			intel_dp->attached_connector->panel.
5641
			fixed_mode->vrefresh);
5642
 
5643
	dev_priv->drrs.dp = NULL;
5644
	mutex_unlock(&dev_priv->drrs.mutex);
5645
 
5646
	cancel_delayed_work_sync(&dev_priv->drrs.work);
5647
}
5648
 
5649
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5650
{
5651
	struct drm_i915_private *dev_priv =
5652
		container_of(work, typeof(*dev_priv), drrs.work.work);
5653
	struct intel_dp *intel_dp;
5654
 
5655
	mutex_lock(&dev_priv->drrs.mutex);
5656
 
5657
	intel_dp = dev_priv->drrs.dp;
5658
 
5659
	if (!intel_dp)
5660
		goto unlock;
5661
 
5060 serge 5662
	/*
6084 serge 5663
	 * The delayed work can race with an invalidate hence we need to
5664
	 * recheck.
5060 serge 5665
	 */
5666
 
6084 serge 5667
	if (dev_priv->drrs.busy_frontbuffer_bits)
5668
		goto unlock;
5060 serge 5669
 
6084 serge 5670
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5671
		intel_dp_set_drrs_state(dev_priv->dev,
5672
			intel_dp->attached_connector->panel.
5673
			downclock_mode->vrefresh);
5060 serge 5674
 
6084 serge 5675
unlock:
5676
	mutex_unlock(&dev_priv->drrs.mutex);
5677
}
5060 serge 5678
 
6084 serge 5679
/**
5680
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5681
 * @dev: DRM device
5682
 * @frontbuffer_bits: frontbuffer plane tracking bits
5683
 *
5684
 * This function gets called everytime rendering on the given planes start.
5685
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5686
 *
5687
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5688
 */
5689
void intel_edp_drrs_invalidate(struct drm_device *dev,
5690
		unsigned frontbuffer_bits)
5691
{
5692
	struct drm_i915_private *dev_priv = dev->dev_private;
5693
	struct drm_crtc *crtc;
5694
	enum pipe pipe;
5695
 
5696
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5697
		return;
5698
 
5699
	cancel_delayed_work(&dev_priv->drrs.work);
5700
 
5701
	mutex_lock(&dev_priv->drrs.mutex);
5702
	if (!dev_priv->drrs.dp) {
5703
		mutex_unlock(&dev_priv->drrs.mutex);
5704
		return;
5705
	}
5706
 
5707
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5708
	pipe = to_intel_crtc(crtc)->pipe;
5709
 
5710
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5711
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5712
 
5713
	/* invalidate means busy screen hence upclock */
5714
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5715
		intel_dp_set_drrs_state(dev_priv->dev,
5716
				dev_priv->drrs.dp->attached_connector->panel.
5717
				fixed_mode->vrefresh);
5718
 
5719
	mutex_unlock(&dev_priv->drrs.mutex);
5060 serge 5720
}
5721
 
6084 serge 5722
/**
5723
 * intel_edp_drrs_flush - Restart Idleness DRRS
5724
 * @dev: DRM device
5725
 * @frontbuffer_bits: frontbuffer plane tracking bits
5726
 *
5727
 * This function gets called every time rendering on the given planes has
5728
 * completed or flip on a crtc is completed. So DRRS should be upclocked
5729
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5730
 * if no other planes are dirty.
5731
 *
5732
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5733
 */
5734
void intel_edp_drrs_flush(struct drm_device *dev,
5735
		unsigned frontbuffer_bits)
5736
{
5737
	struct drm_i915_private *dev_priv = dev->dev_private;
5738
	struct drm_crtc *crtc;
5739
	enum pipe pipe;
5740
 
5741
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5742
		return;
5743
 
5744
//	cancel_delayed_work(&dev_priv->drrs.work);
5745
 
5746
	mutex_lock(&dev_priv->drrs.mutex);
5747
	if (!dev_priv->drrs.dp) {
5748
		mutex_unlock(&dev_priv->drrs.mutex);
5749
		return;
5750
	}
5751
 
5752
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5753
	pipe = to_intel_crtc(crtc)->pipe;
5754
 
5755
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5756
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5757
 
5758
	/* flush means busy screen hence upclock */
5759
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5760
		intel_dp_set_drrs_state(dev_priv->dev,
5761
				dev_priv->drrs.dp->attached_connector->panel.
5762
				fixed_mode->vrefresh);
5763
 
5764
	mutex_unlock(&dev_priv->drrs.mutex);
5765
}
5766
 
5767
/**
5768
 * DOC: Display Refresh Rate Switching (DRRS)
5769
 *
5770
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5771
 * which enables swtching between low and high refresh rates,
5772
 * dynamically, based on the usage scenario. This feature is applicable
5773
 * for internal panels.
5774
 *
5775
 * Indication that the panel supports DRRS is given by the panel EDID, which
5776
 * would list multiple refresh rates for one resolution.
5777
 *
5778
 * DRRS is of 2 types - static and seamless.
5779
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5780
 * (may appear as a blink on screen) and is used in dock-undock scenario.
5781
 * Seamless DRRS involves changing RR without any visual effect to the user
5782
 * and can be used during normal system usage. This is done by programming
5783
 * certain registers.
5784
 *
5785
 * Support for static/seamless DRRS may be indicated in the VBT based on
5786
 * inputs from the panel spec.
5787
 *
5788
 * DRRS saves power by switching to low RR based on usage scenarios.
5789
 *
5790
 * eDP DRRS:-
5791
 *        The implementation is based on frontbuffer tracking implementation.
5792
 * When there is a disturbance on the screen triggered by user activity or a
5793
 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5794
 * When there is no movement on screen, after a timeout of 1 second, a switch
5795
 * to low RR is made.
5796
 *        For integration with frontbuffer tracking code,
5797
 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5798
 *
5799
 * DRRS can be further extended to support other internal panels and also
5800
 * the scenario of video playback wherein RR is set based on the rate
5801
 * requested by userspace.
5802
 */
5803
 
5804
/**
5805
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5806
 * @intel_connector: eDP connector
5807
 * @fixed_mode: preferred mode of panel
5808
 *
5809
 * This function is  called only once at driver load to initialize basic
5810
 * DRRS stuff.
5811
 *
5812
 * Returns:
5813
 * Downclock mode if panel supports it, else return NULL.
5814
 * DRRS support is determined by the presence of downclock mode (apart
5815
 * from VBT setting).
5816
 */
5060 serge 5817
static struct drm_display_mode *
6084 serge 5818
intel_dp_drrs_init(struct intel_connector *intel_connector,
5819
		struct drm_display_mode *fixed_mode)
5060 serge 5820
{
5821
	struct drm_connector *connector = &intel_connector->base;
6084 serge 5822
	struct drm_device *dev = connector->dev;
5060 serge 5823
	struct drm_i915_private *dev_priv = dev->dev_private;
5824
	struct drm_display_mode *downclock_mode = NULL;
5825
 
6084 serge 5826
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5827
	mutex_init(&dev_priv->drrs.mutex);
5828
 
5060 serge 5829
	if (INTEL_INFO(dev)->gen <= 6) {
5830
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5831
		return NULL;
5832
	}
5833
 
5834
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5354 serge 5835
		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5060 serge 5836
		return NULL;
5837
	}
5838
 
5839
	downclock_mode = intel_find_panel_downclock
5840
					(dev, fixed_mode, connector);
5841
 
5842
	if (!downclock_mode) {
6084 serge 5843
		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5060 serge 5844
		return NULL;
5845
	}
5846
 
6084 serge 5847
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5060 serge 5848
 
6084 serge 5849
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5354 serge 5850
	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5060 serge 5851
	return downclock_mode;
5852
}
5853
 
4104 Serge 5854
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5354 serge 5855
				     struct intel_connector *intel_connector)
4104 Serge 5856
{
5857
	struct drm_connector *connector = &intel_connector->base;
5858
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5060 serge 5859
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5860
	struct drm_device *dev = intel_encoder->base.dev;
4104 Serge 5861
	struct drm_i915_private *dev_priv = dev->dev_private;
5862
	struct drm_display_mode *fixed_mode = NULL;
5060 serge 5863
	struct drm_display_mode *downclock_mode = NULL;
4104 Serge 5864
	bool has_dpcd;
5865
	struct drm_display_mode *scan;
5866
	struct edid *edid;
5354 serge 5867
	enum pipe pipe = INVALID_PIPE;
4104 Serge 5868
 
5869
	if (!is_edp(intel_dp))
5870
		return true;
5871
 
5354 serge 5872
	pps_lock(intel_dp);
5873
	intel_edp_panel_vdd_sanitize(intel_dp);
5874
	pps_unlock(intel_dp);
4104 Serge 5875
 
5876
	/* Cache DPCD and EDID for edp. */
5877
	has_dpcd = intel_dp_get_dpcd(intel_dp);
5878
 
5879
	if (has_dpcd) {
5880
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5881
			dev_priv->no_aux_handshake =
5882
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5883
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5884
	} else {
5885
		/* if this fails, presume the device is a ghost */
5886
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5887
		return false;
5888
	}
5889
 
5890
	/* We now know it's not a ghost, init power sequence regs. */
5354 serge 5891
	pps_lock(intel_dp);
5892
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5893
	pps_unlock(intel_dp);
4104 Serge 5894
 
5060 serge 5895
	mutex_lock(&dev->mode_config.mutex);
5896
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4104 Serge 5897
	if (edid) {
5898
		if (drm_add_edid_modes(connector, edid)) {
5899
			drm_mode_connector_update_edid_property(connector,
5900
								edid);
5901
			drm_edid_to_eld(connector, edid);
5902
		} else {
5903
			kfree(edid);
5904
			edid = ERR_PTR(-EINVAL);
5905
		}
5906
	} else {
5907
		edid = ERR_PTR(-ENOENT);
5908
	}
5909
	intel_connector->edid = edid;
5910
 
5911
	/* prefer fixed mode from EDID if available */
5912
	list_for_each_entry(scan, &connector->probed_modes, head) {
5913
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5914
			fixed_mode = drm_mode_duplicate(dev, scan);
5060 serge 5915
			downclock_mode = intel_dp_drrs_init(
5916
						intel_connector, fixed_mode);
4104 Serge 5917
			break;
5918
		}
5919
	}
5920
 
5921
	/* fallback to VBT if available for eDP */
5922
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5923
		fixed_mode = drm_mode_duplicate(dev,
5924
					dev_priv->vbt.lfp_lvds_vbt_mode);
5925
		if (fixed_mode)
5926
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5927
	}
5060 serge 5928
	mutex_unlock(&dev->mode_config.mutex);
4104 Serge 5929
 
5354 serge 5930
	if (IS_VALLEYVIEW(dev)) {
5931
 
5932
		/*
5933
		 * Figure out the current pipe for the initial backlight setup.
5934
		 * If the current pipe isn't valid, try the PPS pipe, and if that
5935
		 * fails just assume pipe A.
5936
		 */
5937
		if (IS_CHERRYVIEW(dev))
5938
			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5939
		else
5940
			pipe = PORT_TO_PIPE(intel_dp->DP);
5941
 
5942
		if (pipe != PIPE_A && pipe != PIPE_B)
5943
			pipe = intel_dp->pps_pipe;
5944
 
5945
		if (pipe != PIPE_A && pipe != PIPE_B)
5946
			pipe = PIPE_A;
5947
 
5948
		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5949
			      pipe_name(pipe));
5950
	}
5951
 
5060 serge 5952
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6084 serge 5953
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5354 serge 5954
	intel_panel_setup_backlight(connector, pipe);
4104 Serge 5955
 
5956
	return true;
5957
}
5958
 
5959
bool
3243 Serge 5960
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5961
			struct intel_connector *intel_connector)
2330 Serge 5962
{
3243 Serge 5963
	struct drm_connector *connector = &intel_connector->base;
5964
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5965
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5966
	struct drm_device *dev = intel_encoder->base.dev;
2330 Serge 5967
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5968
	enum port port = intel_dig_port->port;
5060 serge 5969
	int type;
2330 Serge 5970
 
5354 serge 5971
	intel_dp->pps_pipe = INVALID_PIPE;
5972
 
5060 serge 5973
	/* intel_dp vfuncs */
5354 serge 5974
	if (INTEL_INFO(dev)->gen >= 9)
5975
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5976
	else if (IS_VALLEYVIEW(dev))
5060 serge 5977
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5978
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5979
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5980
	else if (HAS_PCH_SPLIT(dev))
5981
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5982
	else
5983
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5984
 
5354 serge 5985
	if (INTEL_INFO(dev)->gen >= 9)
5986
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5987
	else
6084 serge 5988
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5060 serge 5989
 
3031 serge 5990
	/* Preserve the current hw state. */
5991
	intel_dp->DP = I915_READ(intel_dp->output_reg);
3243 Serge 5992
	intel_dp->attached_connector = intel_connector;
2330 Serge 5993
 
4560 Serge 5994
	if (intel_dp_is_edp(dev, port))
5995
		type = DRM_MODE_CONNECTOR_eDP;
5996
	else
6084 serge 5997
		type = DRM_MODE_CONNECTOR_DisplayPort;
2330 Serge 5998
 
4104 Serge 5999
	/*
6000
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6001
	 * for DP the encoder type can be set by the caller to
6002
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6003
	 */
6004
	if (type == DRM_MODE_CONNECTOR_eDP)
6005
		intel_encoder->type = INTEL_OUTPUT_EDP;
6006
 
5354 serge 6007
	/* eDP only on port B and/or C on vlv/chv */
6008
	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6009
		    port != PORT_B && port != PORT_C))
6010
		return false;
6011
 
4104 Serge 6012
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6013
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6014
			port_name(port));
6015
 
2330 Serge 6016
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6017
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6018
 
6019
	connector->interlace_allowed = true;
6020
	connector->doublescan_allowed = 0;
6021
 
3243 Serge 6022
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5060 serge 6023
			  edp_panel_vdd_work);
2330 Serge 6024
 
6025
	intel_connector_attach_encoder(intel_connector, intel_encoder);
5060 serge 6026
	drm_connector_register(connector);
2330 Serge 6027
 
3480 Serge 6028
	if (HAS_DDI(dev))
3243 Serge 6029
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6030
	else
6084 serge 6031
		intel_connector->get_hw_state = intel_connector_get_hw_state;
5060 serge 6032
	intel_connector->unregister = intel_dp_connector_unregister;
3031 serge 6033
 
5060 serge 6034
	/* Set up the hotplug pin. */
3031 serge 6035
	switch (port) {
6036
	case PORT_A:
3746 Serge 6037
		intel_encoder->hpd_pin = HPD_PORT_A;
6084 serge 6038
		break;
3031 serge 6039
	case PORT_B:
3746 Serge 6040
		intel_encoder->hpd_pin = HPD_PORT_B;
6084 serge 6041
		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6042
			intel_encoder->hpd_pin = HPD_PORT_A;
6043
		break;
3031 serge 6044
	case PORT_C:
3746 Serge 6045
		intel_encoder->hpd_pin = HPD_PORT_C;
6084 serge 6046
		break;
3031 serge 6047
	case PORT_D:
3746 Serge 6048
		intel_encoder->hpd_pin = HPD_PORT_D;
6084 serge 6049
		break;
6050
	case PORT_E:
6051
		intel_encoder->hpd_pin = HPD_PORT_E;
6052
		break;
3031 serge 6053
	default:
3746 Serge 6054
		BUG();
2330 Serge 6055
	}
6056
 
5060 serge 6057
	if (is_edp(intel_dp)) {
5354 serge 6058
		pps_lock(intel_dp);
5060 serge 6059
		intel_dp_init_panel_power_timestamps(intel_dp);
5354 serge 6060
		if (IS_VALLEYVIEW(dev))
6061
			vlv_initial_power_sequencer_setup(intel_dp);
6062
		else
6063
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6064
		pps_unlock(intel_dp);
5060 serge 6065
	}
2330 Serge 6066
 
5060 serge 6067
	intel_dp_aux_init(intel_dp, intel_connector);
3031 serge 6068
 
5060 serge 6069
	/* init MST on ports that can support it */
6084 serge 6070
	if (HAS_DP_MST(dev) &&
6071
	    (port == PORT_B || port == PORT_C || port == PORT_D))
6072
		intel_dp_mst_encoder_init(intel_dig_port,
6073
					  intel_connector->base.base.id);
5060 serge 6074
 
5354 serge 6075
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5060 serge 6076
		drm_dp_aux_unregister(&intel_dp->aux);
6084 serge 6077
		if (is_edp(intel_dp)) {
4293 Serge 6078
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 6079
			/*
6080
			 * vdd might still be enabled do to the delayed vdd off.
6081
			 * Make sure vdd is actually turned off here.
6082
			 */
6083
			pps_lock(intel_dp);
5060 serge 6084
			edp_panel_vdd_off_sync(intel_dp);
5354 serge 6085
			pps_unlock(intel_dp);
2330 Serge 6086
		}
5060 serge 6087
		drm_connector_unregister(connector);
4104 Serge 6088
		drm_connector_cleanup(connector);
6089
		return false;
2330 Serge 6090
	}
6091
 
6092
	intel_dp_add_properties(intel_dp, connector);
6093
 
6094
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6095
	 * 0xd.  Failure to do so will result in spurious interrupts being
6096
	 * generated on the port when a cable is not attached.
6097
	 */
6098
	if (IS_G4X(dev) && !IS_GM45(dev)) {
6099
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6100
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6101
	}
4104 Serge 6102
 
6103
	return true;
2330 Serge 6104
}
3243 Serge 6105
 
6106
void
6107
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6108
{
5060 serge 6109
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6110
	struct intel_digital_port *intel_dig_port;
6111
	struct intel_encoder *intel_encoder;
6112
	struct drm_encoder *encoder;
6113
	struct intel_connector *intel_connector;
6114
 
4560 Serge 6115
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3243 Serge 6116
	if (!intel_dig_port)
6117
		return;
6118
 
6084 serge 6119
	intel_connector = intel_connector_alloc();
6120
	if (!intel_connector)
6121
		goto err_connector_alloc;
3243 Serge 6122
 
6123
	intel_encoder = &intel_dig_port->base;
6124
	encoder = &intel_encoder->base;
6125
 
6126
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6127
			 DRM_MODE_ENCODER_TMDS);
6128
 
3746 Serge 6129
	intel_encoder->compute_config = intel_dp_compute_config;
3243 Serge 6130
	intel_encoder->disable = intel_disable_dp;
6131
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4104 Serge 6132
	intel_encoder->get_config = intel_dp_get_config;
5060 serge 6133
	intel_encoder->suspend = intel_dp_encoder_suspend;
6134
	if (IS_CHERRYVIEW(dev)) {
6135
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6136
		intel_encoder->pre_enable = chv_pre_enable_dp;
6137
		intel_encoder->enable = vlv_enable_dp;
6138
		intel_encoder->post_disable = chv_post_disable_dp;
6084 serge 6139
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5060 serge 6140
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 6141
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4104 Serge 6142
		intel_encoder->pre_enable = vlv_pre_enable_dp;
6143
		intel_encoder->enable = vlv_enable_dp;
5060 serge 6144
		intel_encoder->post_disable = vlv_post_disable_dp;
4104 Serge 6145
	} else {
4560 Serge 6146
		intel_encoder->pre_enable = g4x_pre_enable_dp;
6147
		intel_encoder->enable = g4x_enable_dp;
5354 serge 6148
		if (INTEL_INFO(dev)->gen >= 5)
6149
			intel_encoder->post_disable = ilk_post_disable_dp;
4104 Serge 6150
	}
3243 Serge 6151
 
6152
	intel_dig_port->port = port;
6153
	intel_dig_port->dp.output_reg = output_reg;
6154
 
6155
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5060 serge 6156
	if (IS_CHERRYVIEW(dev)) {
6157
		if (port == PORT_D)
6158
			intel_encoder->crtc_mask = 1 << 2;
6159
		else
6160
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6161
	} else {
6084 serge 6162
		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5060 serge 6163
	}
6164
	intel_encoder->cloneable = 0;
3243 Serge 6165
 
5060 serge 6166
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6084 serge 6167
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5060 serge 6168
 
6084 serge 6169
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6170
		goto err_init_connector;
6171
 
6172
	return;
6173
 
6174
err_init_connector:
6175
	drm_encoder_cleanup(encoder);
6176
	kfree(intel_connector);
6177
err_connector_alloc:
6178
	kfree(intel_dig_port);
6179
 
6180
	return;
3243 Serge 6181
}
5060 serge 6182
 
6183
void intel_dp_mst_suspend(struct drm_device *dev)
6184
{
6185
	struct drm_i915_private *dev_priv = dev->dev_private;
6186
	int i;
6187
 
6188
	/* disable MST */
6189
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6190
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6191
		if (!intel_dig_port)
6192
			continue;
6193
 
6194
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6195
			if (!intel_dig_port->dp.can_mst)
6196
				continue;
6197
			if (intel_dig_port->dp.is_mst)
6198
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6199
		}
6200
	}
6201
}
6202
 
6203
void intel_dp_mst_resume(struct drm_device *dev)
6204
{
6205
	struct drm_i915_private *dev_priv = dev->dev_private;
6206
	int i;
6207
 
6208
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6209
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6210
		if (!intel_dig_port)
6211
			continue;
6212
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6213
			int ret;
6214
 
6215
			if (!intel_dig_port->dp.can_mst)
6216
				continue;
6217
 
6218
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6219
			if (ret != 0) {
6220
				intel_dp_check_mst_status(&intel_dig_port->dp);
6221
			}
6222
		}
6223
	}
6224
}