Subversion Repositories Kolibri OS

Rev

Rev 5367 | Rev 6103 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
31
#include 
6084 serge 32
#include 
3031 serge 33
#include 
34
#include 
35
#include 
2327 Serge 36
#include "intel_drv.h"
3031 serge 37
#include 
2327 Serge 38
#include "i915_drv.h"
39
 
5060 serge 40
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
2327 Serge 41
 
6084 serge 42
/* Compliance test status bits  */
43
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
44
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
45
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
46
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47
 
4560 Serge 48
struct dp_link_dpll {
6084 serge 49
	int clock;
4560 Serge 50
	struct dpll dpll;
51
};
52
 
53
static const struct dp_link_dpll gen4_dpll[] = {
6084 serge 54
	{ 162000,
4560 Serge 55
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
6084 serge 56
	{ 270000,
4560 Serge 57
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
58
};
59
 
60
static const struct dp_link_dpll pch_dpll[] = {
6084 serge 61
	{ 162000,
4560 Serge 62
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
6084 serge 63
	{ 270000,
4560 Serge 64
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
65
};
66
 
67
static const struct dp_link_dpll vlv_dpll[] = {
6084 serge 68
	{ 162000,
4560 Serge 69
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
6084 serge 70
	{ 270000,
4560 Serge 71
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
72
};
73
 
5060 serge 74
/*
75
 * CHV supports eDP 1.4 that have  more link rates.
76
 * Below only provides the fixed rate but exclude variable rate.
77
 */
78
static const struct dp_link_dpll chv_dpll[] = {
79
	/*
80
	 * CHV requires to program fractional division for m2.
81
	 * m2 is stored in fixed point format using formula below
82
	 * (m2_int << 22) | m2_fraction
83
	 */
6084 serge 84
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
5060 serge 85
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
6084 serge 86
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 87
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
6084 serge 88
	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 89
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
90
};
91
 
6084 serge 92
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
93
				  324000, 432000, 540000 };
94
static const int skl_rates[] = { 162000, 216000, 270000,
95
				  324000, 432000, 540000 };
96
static const int default_rates[] = { 162000, 270000, 540000 };
97
 
2327 Serge 98
/**
99
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
100
 * @intel_dp: DP struct
101
 *
102
 * If a CPU or PCH DP output is attached to an eDP panel, this function
103
 * will return true, and false otherwise.
104
 */
105
static bool is_edp(struct intel_dp *intel_dp)
106
{
3243 Serge 107
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
108
 
109
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
2327 Serge 110
}
111
 
3243 Serge 112
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
2327 Serge 113
{
3243 Serge 114
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
115
 
116
	return intel_dig_port->base.base.dev;
2327 Serge 117
}
118
 
2330 Serge 119
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
120
{
3243 Serge 121
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2330 Serge 122
}
2327 Serge 123
 
2330 Serge 124
static void intel_dp_link_down(struct intel_dp *intel_dp);
5354 serge 125
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
5060 serge 126
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
5354 serge 127
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
128
static void vlv_steal_power_sequencer(struct drm_device *dev,
129
				      enum pipe pipe);
2330 Serge 130
 
6084 serge 131
static unsigned int intel_dp_unused_lane_mask(int lane_count)
2330 Serge 132
{
6084 serge 133
	return ~((1 << lane_count) - 1) & 0xf;
134
}
135
 
136
static int
137
intel_dp_max_link_bw(struct intel_dp  *intel_dp)
138
{
2330 Serge 139
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 140
 
2330 Serge 141
	switch (max_link_bw) {
142
	case DP_LINK_BW_1_62:
143
	case DP_LINK_BW_2_7:
6084 serge 144
	case DP_LINK_BW_5_4:
2330 Serge 145
		break;
146
	default:
4104 Serge 147
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148
		     max_link_bw);
2330 Serge 149
		max_link_bw = DP_LINK_BW_1_62;
150
		break;
151
	}
152
	return max_link_bw;
153
}
2327 Serge 154
 
5060 serge 155
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156
{
157
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158
	struct drm_device *dev = intel_dig_port->base.base.dev;
159
	u8 source_max, sink_max;
160
 
161
	source_max = 4;
162
	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163
	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164
		source_max = 2;
165
 
166
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
 
168
	return min(source_max, sink_max);
169
}
170
 
2342 Serge 171
/*
172
 * The units on the numbers in the next two are... bizarre.  Examples will
173
 * make it clearer; this one parallels an example in the eDP spec.
174
 *
175
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176
 *
177
 *     270000 * 1 * 8 / 10 == 216000
178
 *
179
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
181
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182
 * 119000.  At 18bpp that's 2142000 kilobits per second.
183
 *
184
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185
 * get the result in decakilobits instead of kilobits.
186
 */
187
 
2330 Serge 188
static int
2351 Serge 189
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 190
{
2342 Serge 191
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 192
}
2327 Serge 193
 
2330 Serge 194
static int
195
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196
{
197
	return (max_link_clock * max_lanes * 8) / 10;
198
}
2327 Serge 199
 
4560 Serge 200
static enum drm_mode_status
2330 Serge 201
intel_dp_mode_valid(struct drm_connector *connector,
202
		    struct drm_display_mode *mode)
203
{
204
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 205
	struct intel_connector *intel_connector = to_intel_connector(connector);
206
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
3746 Serge 207
	int target_clock = mode->clock;
208
	int max_rate, mode_rate, max_lanes, max_link_clock;
2327 Serge 209
 
3243 Serge 210
	if (is_edp(intel_dp) && fixed_mode) {
211
		if (mode->hdisplay > fixed_mode->hdisplay)
2330 Serge 212
			return MODE_PANEL;
2327 Serge 213
 
3243 Serge 214
		if (mode->vdisplay > fixed_mode->vdisplay)
2330 Serge 215
			return MODE_PANEL;
3746 Serge 216
 
217
		target_clock = fixed_mode->clock;
2330 Serge 218
	}
2327 Serge 219
 
6084 serge 220
	max_link_clock = intel_dp_max_link_rate(intel_dp);
5060 serge 221
	max_lanes = intel_dp_max_lane_count(intel_dp);
3746 Serge 222
 
223
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224
	mode_rate = intel_dp_link_required(target_clock, 18);
225
 
226
	if (mode_rate > max_rate)
2330 Serge 227
		return MODE_CLOCK_HIGH;
2327 Serge 228
 
2330 Serge 229
	if (mode->clock < 10000)
230
		return MODE_CLOCK_LOW;
231
 
3031 serge 232
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233
		return MODE_H_ILLEGAL;
234
 
2330 Serge 235
	return MODE_OK;
236
}
237
 
5354 serge 238
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
2330 Serge 239
{
240
	int	i;
241
	uint32_t v = 0;
242
 
243
	if (src_bytes > 4)
244
		src_bytes = 4;
245
	for (i = 0; i < src_bytes; i++)
246
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
247
	return v;
248
}
249
 
6084 serge 250
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
2330 Serge 251
{
252
	int i;
253
	if (dst_bytes > 4)
254
		dst_bytes = 4;
255
	for (i = 0; i < dst_bytes; i++)
256
		dst[i] = src >> ((3-i) * 8);
257
}
258
 
4560 Serge 259
static void
260
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 261
				    struct intel_dp *intel_dp);
4560 Serge 262
static void
263
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 264
					      struct intel_dp *intel_dp);
4560 Serge 265
 
5354 serge 266
static void pps_lock(struct intel_dp *intel_dp)
267
{
268
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
269
	struct intel_encoder *encoder = &intel_dig_port->base;
270
	struct drm_device *dev = encoder->base.dev;
271
	struct drm_i915_private *dev_priv = dev->dev_private;
272
	enum intel_display_power_domain power_domain;
273
 
274
	/*
275
	 * See vlv_power_sequencer_reset() why we need
276
	 * a power domain reference here.
277
	 */
6084 serge 278
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 279
	intel_display_power_get(dev_priv, power_domain);
280
 
281
	mutex_lock(&dev_priv->pps_mutex);
282
}
283
 
284
static void pps_unlock(struct intel_dp *intel_dp)
285
{
286
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
287
	struct intel_encoder *encoder = &intel_dig_port->base;
288
	struct drm_device *dev = encoder->base.dev;
289
	struct drm_i915_private *dev_priv = dev->dev_private;
290
	enum intel_display_power_domain power_domain;
291
 
292
	mutex_unlock(&dev_priv->pps_mutex);
293
 
6084 serge 294
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 295
	intel_display_power_put(dev_priv, power_domain);
296
}
297
 
298
static void
299
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
300
{
301
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
302
	struct drm_device *dev = intel_dig_port->base.base.dev;
303
	struct drm_i915_private *dev_priv = dev->dev_private;
304
	enum pipe pipe = intel_dp->pps_pipe;
6084 serge 305
	bool pll_enabled, release_cl_override = false;
306
	enum dpio_phy phy = DPIO_PHY(pipe);
307
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
5354 serge 308
	uint32_t DP;
309
 
310
	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
311
		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
312
		 pipe_name(pipe), port_name(intel_dig_port->port)))
313
		return;
314
 
315
	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
316
		      pipe_name(pipe), port_name(intel_dig_port->port));
317
 
318
	/* Preserve the BIOS-computed detected bit. This is
319
	 * supposed to be read-only.
320
	 */
321
	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
322
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
323
	DP |= DP_PORT_WIDTH(1);
324
	DP |= DP_LINK_TRAIN_PAT_1;
325
 
326
	if (IS_CHERRYVIEW(dev))
327
		DP |= DP_PIPE_SELECT_CHV(pipe);
328
	else if (pipe == PIPE_B)
329
		DP |= DP_PIPEB_SELECT;
330
 
331
	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
332
 
333
	/*
334
	 * The DPLL for the pipe must be enabled for this to work.
335
	 * So enable temporarily it if it's not already enabled.
336
	 */
6084 serge 337
	if (!pll_enabled) {
338
		release_cl_override = IS_CHERRYVIEW(dev) &&
339
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
340
 
5354 serge 341
		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
342
				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
6084 serge 343
	}
5354 serge 344
 
345
	/*
346
	 * Similar magic as in intel_dp_enable_port().
347
	 * We _must_ do this port enable + disable trick
348
	 * to make this power seqeuencer lock onto the port.
349
	 * Otherwise even VDD force bit won't work.
350
	 */
351
	I915_WRITE(intel_dp->output_reg, DP);
352
	POSTING_READ(intel_dp->output_reg);
353
 
354
	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
355
	POSTING_READ(intel_dp->output_reg);
356
 
357
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
358
	POSTING_READ(intel_dp->output_reg);
359
 
6084 serge 360
	if (!pll_enabled) {
5354 serge 361
		vlv_force_pll_off(dev, pipe);
6084 serge 362
 
363
		if (release_cl_override)
364
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
365
	}
5354 serge 366
}
367
 
4560 Serge 368
static enum pipe
369
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
370
{
371
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
372
	struct drm_device *dev = intel_dig_port->base.base.dev;
373
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 374
	struct intel_encoder *encoder;
375
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
4560 Serge 376
	enum pipe pipe;
377
 
5354 serge 378
	lockdep_assert_held(&dev_priv->pps_mutex);
4560 Serge 379
 
5354 serge 380
	/* We should never land here with regular DP ports */
381
	WARN_ON(!is_edp(intel_dp));
382
 
383
	if (intel_dp->pps_pipe != INVALID_PIPE)
384
		return intel_dp->pps_pipe;
385
 
386
	/*
387
	 * We don't have power sequencer currently.
388
	 * Pick one that's not used by other ports.
389
	 */
390
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
391
			    base.head) {
392
		struct intel_dp *tmp;
393
 
394
		if (encoder->type != INTEL_OUTPUT_EDP)
395
			continue;
396
 
397
		tmp = enc_to_intel_dp(&encoder->base);
398
 
399
		if (tmp->pps_pipe != INVALID_PIPE)
400
			pipes &= ~(1 << tmp->pps_pipe);
401
	}
402
 
403
	/*
404
	 * Didn't find one. This should not happen since there
405
	 * are two power sequencers and up to two eDP ports.
406
	 */
407
	if (WARN_ON(pipes == 0))
408
		pipe = PIPE_A;
409
	else
410
		pipe = ffs(pipes) - 1;
411
 
412
	vlv_steal_power_sequencer(dev, pipe);
413
	intel_dp->pps_pipe = pipe;
414
 
415
	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416
		      pipe_name(intel_dp->pps_pipe),
417
		      port_name(intel_dig_port->port));
418
 
419
	/* init power sequencer on this pipe and port */
420
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
421
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
422
 
423
	/*
424
	 * Even vdd force doesn't work until we've made
425
	 * the power sequencer lock in on the port.
426
	 */
427
	vlv_power_sequencer_kick(intel_dp);
428
 
429
	return intel_dp->pps_pipe;
430
}
431
 
432
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
433
			       enum pipe pipe);
434
 
435
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
436
			       enum pipe pipe)
437
{
438
	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
439
}
440
 
441
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
442
				enum pipe pipe)
443
{
444
	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
445
}
446
 
447
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
448
			 enum pipe pipe)
449
{
450
	return true;
451
}
452
 
453
static enum pipe
454
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
455
		     enum port port,
456
		     vlv_pipe_check pipe_check)
457
{
458
	enum pipe pipe;
459
 
4560 Serge 460
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462
			PANEL_PORT_SELECT_MASK;
5354 serge 463
 
464
		if (port_sel != PANEL_PORT_SELECT_VLV(port))
465
			continue;
466
 
467
		if (!pipe_check(dev_priv, pipe))
468
			continue;
469
 
6084 serge 470
		return pipe;
4560 Serge 471
	}
472
 
5354 serge 473
	return INVALID_PIPE;
4560 Serge 474
}
475
 
5354 serge 476
static void
477
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
478
{
479
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480
	struct drm_device *dev = intel_dig_port->base.base.dev;
481
	struct drm_i915_private *dev_priv = dev->dev_private;
482
	enum port port = intel_dig_port->port;
483
 
484
	lockdep_assert_held(&dev_priv->pps_mutex);
485
 
486
	/* try to find a pipe with this port selected */
487
	/* first pick one where the panel is on */
488
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489
						  vlv_pipe_has_pp_on);
490
	/* didn't find one? pick one where vdd is on */
491
	if (intel_dp->pps_pipe == INVALID_PIPE)
492
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493
							  vlv_pipe_has_vdd_on);
494
	/* didn't find one? pick one with just the correct port */
495
	if (intel_dp->pps_pipe == INVALID_PIPE)
496
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497
							  vlv_pipe_any);
498
 
499
	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500
	if (intel_dp->pps_pipe == INVALID_PIPE) {
501
		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
502
			      port_name(port));
503
		return;
504
	}
505
 
506
	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507
		      port_name(port), pipe_name(intel_dp->pps_pipe));
508
 
509
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
510
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
511
}
512
 
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514
{
515
	struct drm_device *dev = dev_priv->dev;
516
	struct intel_encoder *encoder;
517
 
518
	if (WARN_ON(!IS_VALLEYVIEW(dev)))
519
		return;
520
 
521
	/*
522
	 * We can't grab pps_mutex here due to deadlock with power_domain
523
	 * mutex when power_domain functions are called while holding pps_mutex.
524
	 * That also means that in order to use pps_pipe the code needs to
525
	 * hold both a power domain reference and pps_mutex, and the power domain
526
	 * reference get/put must be done while _not_ holding pps_mutex.
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
528
	 * should use them always.
529
	 */
530
 
531
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
532
		struct intel_dp *intel_dp;
533
 
534
		if (encoder->type != INTEL_OUTPUT_EDP)
535
			continue;
536
 
537
		intel_dp = enc_to_intel_dp(&encoder->base);
538
		intel_dp->pps_pipe = INVALID_PIPE;
539
	}
540
}
541
 
4560 Serge 542
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
543
{
544
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
545
 
6084 serge 546
	if (IS_BROXTON(dev))
547
		return BXT_PP_CONTROL(0);
548
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 549
		return PCH_PP_CONTROL;
550
	else
551
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
552
}
553
 
554
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
555
{
556
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
557
 
6084 serge 558
	if (IS_BROXTON(dev))
559
		return BXT_PP_STATUS(0);
560
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 561
		return PCH_PP_STATUS;
562
	else
563
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
564
}
565
 
5354 serge 566
#if 0
567
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
568
   This function only applicable when panel PM state is not to be tracked */
569
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
570
			      void *unused)
571
{
572
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
573
						 edp_notifier);
574
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
575
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 576
 
5354 serge 577
	if (!is_edp(intel_dp) || code != SYS_RESTART)
578
		return 0;
579
 
580
	pps_lock(intel_dp);
581
 
582
	if (IS_VALLEYVIEW(dev)) {
583
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
6084 serge 584
		u32 pp_ctrl_reg, pp_div_reg;
585
		u32 pp_div;
5354 serge 586
 
587
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
588
		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
589
		pp_div = I915_READ(pp_div_reg);
590
		pp_div &= PP_REFERENCE_DIVIDER_MASK;
591
 
592
		/* 0x1F write to PP_DIV_REG sets max cycle delay */
593
		I915_WRITE(pp_div_reg, pp_div | 0x1F);
594
		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
595
		msleep(intel_dp->panel_power_cycle_delay);
596
	}
597
 
598
	pps_unlock(intel_dp);
599
 
600
	return 0;
601
}
602
#endif
603
 
5060 serge 604
static bool edp_have_panel_power(struct intel_dp *intel_dp)
2342 Serge 605
{
3243 Serge 606
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 607
	struct drm_i915_private *dev_priv = dev->dev_private;
608
 
5354 serge 609
	lockdep_assert_held(&dev_priv->pps_mutex);
610
 
611
	if (IS_VALLEYVIEW(dev) &&
612
	    intel_dp->pps_pipe == INVALID_PIPE)
613
		return false;
614
 
4560 Serge 615
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
2342 Serge 616
}
617
 
5060 serge 618
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
2342 Serge 619
{
3243 Serge 620
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 621
	struct drm_i915_private *dev_priv = dev->dev_private;
622
 
5354 serge 623
	lockdep_assert_held(&dev_priv->pps_mutex);
624
 
625
	if (IS_VALLEYVIEW(dev) &&
626
	    intel_dp->pps_pipe == INVALID_PIPE)
627
		return false;
628
 
629
	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
2342 Serge 630
}
631
 
632
static void
633
intel_dp_check_edp(struct intel_dp *intel_dp)
634
{
3243 Serge 635
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 636
	struct drm_i915_private *dev_priv = dev->dev_private;
637
 
638
	if (!is_edp(intel_dp))
639
		return;
3746 Serge 640
 
5060 serge 641
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
2342 Serge 642
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
643
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
4560 Serge 644
			      I915_READ(_pp_stat_reg(intel_dp)),
645
			      I915_READ(_pp_ctrl_reg(intel_dp)));
2342 Serge 646
	}
647
}
648
 
3480 Serge 649
static uint32_t
650
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651
{
652
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653
	struct drm_device *dev = intel_dig_port->base.base.dev;
654
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 655
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
3480 Serge 656
	uint32_t status;
657
	bool done;
658
 
659
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
660
	if (has_aux_irq)
661
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
4104 Serge 662
					  msecs_to_jiffies_timeout(10));
3480 Serge 663
	else
664
		done = wait_for_atomic(C, 10) == 0;
665
	if (!done)
666
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667
			  has_aux_irq);
668
#undef C
669
 
670
	return status;
671
}
672
 
5060 serge 673
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
4104 Serge 674
{
675
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676
	struct drm_device *dev = intel_dig_port->base.base.dev;
677
 
5060 serge 678
	/*
679
	 * The clock divider is based off the hrawclk, and would like to run at
680
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
4104 Serge 681
	 */
5060 serge 682
	return index ? 0 : intel_hrawclk(dev) / 2;
683
}
684
 
685
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686
{
687
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 689
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 690
 
691
	if (index)
692
		return 0;
693
 
694
	if (intel_dig_port->port == PORT_A) {
6084 serge 695
		return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
 
5060 serge 697
	} else {
698
		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699
	}
700
}
701
 
702
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703
{
704
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705
	struct drm_device *dev = intel_dig_port->base.base.dev;
706
	struct drm_i915_private *dev_priv = dev->dev_private;
707
 
708
	if (intel_dig_port->port == PORT_A) {
709
		if (index)
710
			return 0;
6084 serge 711
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
4104 Serge 712
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713
		/* Workaround for non-ULT HSW */
714
		switch (index) {
715
		case 0: return 63;
716
		case 1: return 72;
717
		default: return 0;
718
		}
5060 serge 719
	} else  {
4104 Serge 720
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721
	}
722
}
723
 
5060 serge 724
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725
{
726
	return index ? 0 : 100;
727
}
728
 
5354 serge 729
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730
{
731
	/*
732
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
733
	 * derive the clock from CDCLK automatically). We still implement the
734
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
735
	 */
736
	return index ? 0 : 1;
737
}
738
 
5060 serge 739
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740
				      bool has_aux_irq,
741
				      int send_bytes,
742
				      uint32_t aux_clock_divider)
743
{
744
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745
	struct drm_device *dev = intel_dig_port->base.base.dev;
746
	uint32_t precharge, timeout;
747
 
748
	if (IS_GEN6(dev))
749
		precharge = 3;
750
	else
751
		precharge = 5;
752
 
753
	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755
	else
756
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
 
758
	return DP_AUX_CH_CTL_SEND_BUSY |
759
	       DP_AUX_CH_CTL_DONE |
760
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
762
	       timeout |
763
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
764
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
767
}
768
 
5354 serge 769
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770
				      bool has_aux_irq,
771
				      int send_bytes,
772
				      uint32_t unused)
773
{
774
	return DP_AUX_CH_CTL_SEND_BUSY |
775
	       DP_AUX_CH_CTL_DONE |
776
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
778
	       DP_AUX_CH_CTL_TIME_OUT_1600us |
779
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
780
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781
	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782
}
783
 
2330 Serge 784
static int
785
intel_dp_aux_ch(struct intel_dp *intel_dp,
5354 serge 786
		const uint8_t *send, int send_bytes,
2330 Serge 787
		uint8_t *recv, int recv_size)
788
{
3243 Serge 789
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 791
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 792
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
2330 Serge 793
	uint32_t ch_data = ch_ctl + 4;
4104 Serge 794
	uint32_t aux_clock_divider;
3480 Serge 795
	int i, ret, recv_bytes;
2330 Serge 796
	uint32_t status;
5060 serge 797
	int try, clock = 0;
798
	bool has_aux_irq = HAS_AUX_IRQ(dev);
799
	bool vdd;
2330 Serge 800
 
5354 serge 801
	pps_lock(intel_dp);
5060 serge 802
 
5354 serge 803
	/*
804
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805
	 * In such cases we want to leave VDD enabled and it's up to upper layers
806
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807
	 * ourselves.
808
	 */
809
	vdd = edp_panel_vdd_on(intel_dp);
810
 
3480 Serge 811
	/* dp aux is extremely sensitive to irq latency, hence request the
812
	 * lowest possible wakeup latency and so prevent the cpu from going into
813
	 * deep sleep states.
814
	 */
815
 
2342 Serge 816
	intel_dp_check_edp(intel_dp);
2330 Serge 817
 
818
	/* Try to wait for any previous AUX channel activity */
819
	for (try = 0; try < 3; try++) {
3480 Serge 820
		status = I915_READ_NOTRACE(ch_ctl);
2330 Serge 821
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
822
			break;
823
		msleep(1);
824
	}
825
 
826
	if (try == 3) {
6084 serge 827
		static u32 last_status = -1;
828
		const u32 status = I915_READ(ch_ctl);
829
 
830
		if (status != last_status) {
831
			WARN(1, "dp_aux_ch not started status 0x%08x\n",
832
			     status);
833
			last_status = status;
834
		}
835
 
3480 Serge 836
		ret = -EBUSY;
837
		goto out;
2330 Serge 838
	}
839
 
4560 Serge 840
	/* Only 5 data registers! */
841
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
842
		ret = -E2BIG;
843
		goto out;
844
	}
845
 
5060 serge 846
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
847
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
848
							  has_aux_irq,
849
							  send_bytes,
850
							  aux_clock_divider);
851
 
6084 serge 852
		/* Must try at least 3 times according to DP spec */
853
		for (try = 0; try < 5; try++) {
854
			/* Load the send data into the aux channel data registers */
855
			for (i = 0; i < send_bytes; i += 4)
856
				I915_WRITE(ch_data + i,
5354 serge 857
					   intel_dp_pack_aux(send + i,
858
							     send_bytes - i));
2330 Serge 859
 
6084 serge 860
			/* Send the command and wait for it to complete */
5060 serge 861
			I915_WRITE(ch_ctl, send_ctl);
2330 Serge 862
 
6084 serge 863
			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
3480 Serge 864
 
6084 serge 865
			/* Clear done status and any errors */
866
			I915_WRITE(ch_ctl,
867
				   status |
868
				   DP_AUX_CH_CTL_DONE |
869
				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
870
				   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 871
 
6084 serge 872
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
873
				continue;
874
 
875
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
876
			 *   400us delay required for errors and timeouts
877
			 *   Timeout errors from the HW already meet this
878
			 *   requirement so skip to next iteration
879
			 */
880
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
881
				usleep_range(400, 500);
882
				continue;
883
			}
884
			if (status & DP_AUX_CH_CTL_DONE)
885
				goto done;
886
		}
2330 Serge 887
	}
888
 
889
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
890
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
3480 Serge 891
		ret = -EBUSY;
892
		goto out;
2330 Serge 893
	}
894
 
6084 serge 895
done:
2330 Serge 896
	/* Check for timeout or receive error.
897
	 * Timeouts occur when the sink is not connected
898
	 */
899
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
900
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
3480 Serge 901
		ret = -EIO;
902
		goto out;
2330 Serge 903
	}
904
 
905
	/* Timeouts occur when the device isn't connected, so they're
906
	 * "normal" -- don't fill the kernel log with these */
907
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
908
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
3480 Serge 909
		ret = -ETIMEDOUT;
910
		goto out;
2330 Serge 911
	}
912
 
913
	/* Unload any bytes sent back from the other side */
914
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
915
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
916
	if (recv_bytes > recv_size)
917
		recv_bytes = recv_size;
918
 
919
	for (i = 0; i < recv_bytes; i += 4)
5354 serge 920
		intel_dp_unpack_aux(I915_READ(ch_data + i),
6084 serge 921
				    recv + i, recv_bytes - i);
2330 Serge 922
 
3480 Serge 923
	ret = recv_bytes;
924
out:
925
 
5060 serge 926
	if (vdd)
927
		edp_panel_vdd_off(intel_dp, false);
928
 
5354 serge 929
	pps_unlock(intel_dp);
930
 
3480 Serge 931
	return ret;
2330 Serge 932
}
933
 
5060 serge 934
#define BARE_ADDRESS_SIZE	3
935
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
936
static ssize_t
937
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2330 Serge 938
{
5060 serge 939
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
940
	uint8_t txbuf[20], rxbuf[20];
941
	size_t txsize, rxsize;
2330 Serge 942
	int ret;
943
 
6084 serge 944
	txbuf[0] = (msg->request << 4) |
945
		((msg->address >> 16) & 0xf);
946
	txbuf[1] = (msg->address >> 8) & 0xff;
5060 serge 947
	txbuf[2] = msg->address & 0xff;
948
	txbuf[3] = msg->size - 1;
949
 
950
	switch (msg->request & ~DP_AUX_I2C_MOT) {
951
	case DP_AUX_NATIVE_WRITE:
952
	case DP_AUX_I2C_WRITE:
6084 serge 953
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
5060 serge 954
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
6084 serge 955
		rxsize = 2; /* 0 or 1 data bytes */
5060 serge 956
 
957
		if (WARN_ON(txsize > 20))
6084 serge 958
			return -E2BIG;
4560 Serge 959
 
5060 serge 960
		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
961
 
962
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963
		if (ret > 0) {
964
			msg->reply = rxbuf[0] >> 4;
965
 
6084 serge 966
			if (ret > 1) {
967
				/* Number of bytes written in a short write. */
968
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
969
			} else {
970
				/* Return payload size. */
971
				ret = msg->size;
972
			}
5060 serge 973
		}
6084 serge 974
		break;
2330 Serge 975
 
5060 serge 976
	case DP_AUX_NATIVE_READ:
977
	case DP_AUX_I2C_READ:
978
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
979
		rxsize = msg->size + 1;
2330 Serge 980
 
5060 serge 981
		if (WARN_ON(rxsize > 20))
6084 serge 982
			return -E2BIG;
4560 Serge 983
 
5060 serge 984
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985
		if (ret > 0) {
986
			msg->reply = rxbuf[0] >> 4;
987
			/*
988
			 * Assume happy day, and copy the data. The caller is
989
			 * expected to check msg->reply before touching it.
990
			 *
991
			 * Return payload size.
992
			 */
993
			ret--;
994
			memcpy(msg->buffer, rxbuf + 1, ret);
995
		}
996
		break;
2330 Serge 997
 
5060 serge 998
	default:
999
		ret = -EINVAL;
1000
		break;
1001
	}
2330 Serge 1002
 
6084 serge 1003
	return ret;
2330 Serge 1004
}
1005
 
5060 serge 1006
static void
1007
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
2330 Serge 1008
{
5060 serge 1009
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 1010
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1011
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1012
	enum port port = intel_dig_port->port;
6084 serge 1013
	struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
5060 serge 1014
	const char *name = NULL;
6084 serge 1015
	uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
2330 Serge 1016
	int ret;
1017
 
6084 serge 1018
	/* On SKL we don't have Aux for port E so we rely on VBT to set
1019
	 * a proper alternate aux channel.
1020
	 */
1021
	if (IS_SKYLAKE(dev) && port == PORT_E) {
1022
		switch (info->alternate_aux_channel) {
1023
		case DP_AUX_B:
1024
			porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1025
			break;
1026
		case DP_AUX_C:
1027
			porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1028
			break;
1029
		case DP_AUX_D:
1030
			porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1031
			break;
1032
		case DP_AUX_A:
1033
		default:
1034
			porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1035
		}
1036
	}
1037
 
5060 serge 1038
	switch (port) {
1039
	case PORT_A:
1040
		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1041
		name = "DPDDC-A";
2330 Serge 1042
		break;
5060 serge 1043
	case PORT_B:
1044
		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1045
		name = "DPDDC-B";
2330 Serge 1046
		break;
5060 serge 1047
	case PORT_C:
1048
		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1049
		name = "DPDDC-C";
1050
		break;
1051
	case PORT_D:
1052
		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1053
		name = "DPDDC-D";
1054
		break;
6084 serge 1055
	case PORT_E:
1056
		intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1057
		name = "DPDDC-E";
1058
		break;
2330 Serge 1059
	default:
5060 serge 1060
		BUG();
2330 Serge 1061
	}
1062
 
5354 serge 1063
	/*
1064
	 * The AUX_CTL register is usually DP_CTL + 0x10.
1065
	 *
1066
	 * On Haswell and Broadwell though:
1067
	 *   - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1068
	 *   - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1069
	 *
1070
	 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1071
	 */
6084 serge 1072
	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
5060 serge 1073
		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
2330 Serge 1074
 
5060 serge 1075
	intel_dp->aux.name = name;
1076
	intel_dp->aux.dev = dev->dev;
1077
	intel_dp->aux.transfer = intel_dp_aux_transfer;
2330 Serge 1078
 
6084 serge 1079
	DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1080
					"");
5060 serge 1081
 
1082
	ret = drm_dp_aux_register(&intel_dp->aux);
6084 serge 1083
	if (ret < 0) {
5060 serge 1084
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1085
			  name, ret);
1086
		return;
2330 Serge 1087
	}
5060 serge 1088
}
2330 Serge 1089
 
5060 serge 1090
static void
1091
intel_dp_connector_unregister(struct intel_connector *intel_connector)
1092
{
1093
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
4560 Serge 1094
 
5060 serge 1095
	intel_connector_unregister(intel_connector);
2330 Serge 1096
}
1097
 
5060 serge 1098
static void
6084 serge 1099
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5354 serge 1100
{
1101
	u32 ctrl1;
1102
 
6084 serge 1103
	memset(&pipe_config->dpll_hw_state, 0,
1104
	       sizeof(pipe_config->dpll_hw_state));
1105
 
5354 serge 1106
	pipe_config->ddi_pll_sel = SKL_DPLL0;
1107
	pipe_config->dpll_hw_state.cfgcr1 = 0;
1108
	pipe_config->dpll_hw_state.cfgcr2 = 0;
1109
 
1110
	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
6084 serge 1111
	switch (pipe_config->port_clock / 2) {
1112
	case 81000:
1113
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5354 serge 1114
					      SKL_DPLL0);
1115
		break;
6084 serge 1116
	case 135000:
1117
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5354 serge 1118
					      SKL_DPLL0);
1119
		break;
6084 serge 1120
	case 270000:
1121
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5354 serge 1122
					      SKL_DPLL0);
1123
		break;
6084 serge 1124
	case 162000:
1125
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1126
					      SKL_DPLL0);
1127
		break;
1128
	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1129
	results in CDCLK change. Need to handle the change of CDCLK by
1130
	disabling pipes and re-enabling them */
1131
	case 108000:
1132
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1133
					      SKL_DPLL0);
1134
		break;
1135
	case 216000:
1136
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1137
					      SKL_DPLL0);
1138
		break;
1139
 
5354 serge 1140
	}
1141
	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1142
}
1143
 
6084 serge 1144
void
1145
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
2330 Serge 1146
{
6084 serge 1147
	memset(&pipe_config->dpll_hw_state, 0,
1148
	       sizeof(pipe_config->dpll_hw_state));
1149
 
1150
	switch (pipe_config->port_clock / 2) {
1151
	case 81000:
5060 serge 1152
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1153
		break;
6084 serge 1154
	case 135000:
5060 serge 1155
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1156
		break;
6084 serge 1157
	case 270000:
5060 serge 1158
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1159
		break;
1160
	}
2330 Serge 1161
}
1162
 
6084 serge 1163
static int
1164
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1165
{
1166
	if (intel_dp->num_sink_rates) {
1167
		*sink_rates = intel_dp->sink_rates;
1168
		return intel_dp->num_sink_rates;
1169
	}
1170
 
1171
	*sink_rates = default_rates;
1172
 
1173
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1174
}
1175
 
1176
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1177
{
1178
	/* WaDisableHBR2:skl */
1179
	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1180
		return false;
1181
 
1182
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1183
	    (INTEL_INFO(dev)->gen >= 9))
1184
		return true;
1185
	else
1186
		return false;
1187
}
1188
 
1189
static int
1190
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1191
{
1192
	int size;
1193
 
1194
	if (IS_BROXTON(dev)) {
1195
		*source_rates = bxt_rates;
1196
		size = ARRAY_SIZE(bxt_rates);
1197
	} else if (IS_SKYLAKE(dev)) {
1198
		*source_rates = skl_rates;
1199
		size = ARRAY_SIZE(skl_rates);
1200
	} else {
1201
		*source_rates = default_rates;
1202
		size = ARRAY_SIZE(default_rates);
1203
	}
1204
 
1205
	/* This depends on the fact that 5.4 is last value in the array */
1206
	if (!intel_dp_source_supports_hbr2(dev))
1207
		size--;
1208
 
1209
	return size;
1210
}
1211
 
4104 Serge 1212
static void
1213
intel_dp_set_clock(struct intel_encoder *encoder,
6084 serge 1214
		   struct intel_crtc_state *pipe_config)
4104 Serge 1215
{
1216
	struct drm_device *dev = encoder->base.dev;
4560 Serge 1217
	const struct dp_link_dpll *divisor = NULL;
1218
	int i, count = 0;
4104 Serge 1219
 
1220
	if (IS_G4X(dev)) {
4560 Serge 1221
		divisor = gen4_dpll;
1222
		count = ARRAY_SIZE(gen4_dpll);
4104 Serge 1223
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 1224
		divisor = pch_dpll;
1225
		count = ARRAY_SIZE(pch_dpll);
5060 serge 1226
	} else if (IS_CHERRYVIEW(dev)) {
1227
		divisor = chv_dpll;
1228
		count = ARRAY_SIZE(chv_dpll);
4560 Serge 1229
	} else if (IS_VALLEYVIEW(dev)) {
1230
		divisor = vlv_dpll;
1231
		count = ARRAY_SIZE(vlv_dpll);
5060 serge 1232
	}
4560 Serge 1233
 
1234
	if (divisor && count) {
1235
		for (i = 0; i < count; i++) {
6084 serge 1236
			if (pipe_config->port_clock == divisor[i].clock) {
4560 Serge 1237
				pipe_config->dpll = divisor[i].dpll;
5060 serge 1238
				pipe_config->clock_set = true;
4560 Serge 1239
				break;
1240
			}
1241
		}
4104 Serge 1242
	}
1243
}
1244
 
6084 serge 1245
static int intersect_rates(const int *source_rates, int source_len,
1246
			   const int *sink_rates, int sink_len,
1247
			   int *common_rates)
1248
{
1249
	int i = 0, j = 0, k = 0;
1250
 
1251
	while (i < source_len && j < sink_len) {
1252
		if (source_rates[i] == sink_rates[j]) {
1253
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1254
				return k;
1255
			common_rates[k] = source_rates[i];
1256
			++k;
1257
			++i;
1258
			++j;
1259
		} else if (source_rates[i] < sink_rates[j]) {
1260
			++i;
1261
		} else {
1262
			++j;
1263
		}
1264
	}
1265
	return k;
1266
}
1267
 
1268
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1269
				 int *common_rates)
1270
{
1271
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1272
	const int *source_rates, *sink_rates;
1273
	int source_len, sink_len;
1274
 
1275
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1276
	source_len = intel_dp_source_rates(dev, &source_rates);
1277
 
1278
	return intersect_rates(source_rates, source_len,
1279
			       sink_rates, sink_len,
1280
			       common_rates);
1281
}
1282
 
1283
static void snprintf_int_array(char *str, size_t len,
1284
			       const int *array, int nelem)
1285
{
1286
	int i;
1287
 
1288
	str[0] = '\0';
1289
 
1290
	for (i = 0; i < nelem; i++) {
1291
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1292
		if (r >= len)
1293
			return;
1294
		str += r;
1295
		len -= r;
1296
	}
1297
}
1298
 
1299
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1300
{
1301
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1302
	const int *source_rates, *sink_rates;
1303
	int source_len, sink_len, common_len;
1304
	int common_rates[DP_MAX_SUPPORTED_RATES];
1305
	char str[128]; /* FIXME: too big for stack? */
1306
 
1307
	if ((drm_debug & DRM_UT_KMS) == 0)
1308
		return;
1309
 
1310
	source_len = intel_dp_source_rates(dev, &source_rates);
1311
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1312
	DRM_DEBUG_KMS("source rates: %s\n", str);
1313
 
1314
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1315
	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1316
	DRM_DEBUG_KMS("sink rates: %s\n", str);
1317
 
1318
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1319
	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1320
	DRM_DEBUG_KMS("common rates: %s\n", str);
1321
}
1322
 
1323
static int rate_to_index(int find, const int *rates)
1324
{
1325
	int i = 0;
1326
 
1327
	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1328
		if (find == rates[i])
1329
			break;
1330
 
1331
	return i;
1332
}
1333
 
1334
int
1335
intel_dp_max_link_rate(struct intel_dp *intel_dp)
1336
{
1337
	int rates[DP_MAX_SUPPORTED_RATES] = {};
1338
	int len;
1339
 
1340
	len = intel_dp_common_rates(intel_dp, rates);
1341
	if (WARN_ON(len <= 0))
1342
		return 162000;
1343
 
1344
	return rates[rate_to_index(0, rates) - 1];
1345
}
1346
 
1347
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1348
{
1349
	return rate_to_index(rate, intel_dp->sink_rates);
1350
}
1351
 
1352
static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1353
				  uint8_t *link_bw, uint8_t *rate_select)
1354
{
1355
	if (intel_dp->num_sink_rates) {
1356
		*link_bw = 0;
1357
		*rate_select =
1358
			intel_dp_rate_select(intel_dp, port_clock);
1359
	} else {
1360
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1361
		*rate_select = 0;
1362
	}
1363
}
1364
 
3243 Serge 1365
bool
3746 Serge 1366
intel_dp_compute_config(struct intel_encoder *encoder,
6084 serge 1367
			struct intel_crtc_state *pipe_config)
2330 Serge 1368
{
3746 Serge 1369
	struct drm_device *dev = encoder->base.dev;
1370
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1371
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
3746 Serge 1372
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1373
	enum port port = dp_to_dig_port(intel_dp)->port;
6084 serge 1374
	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
3243 Serge 1375
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2330 Serge 1376
	int lane_count, clock;
5060 serge 1377
	int min_lane_count = 1;
1378
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1379
	/* Conveniently, the link BW constants become indices with a shift...*/
1380
	int min_clock = 0;
6084 serge 1381
	int max_clock;
3031 serge 1382
	int bpp, mode_rate;
4104 Serge 1383
	int link_avail, link_clock;
6084 serge 1384
	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1385
	int common_len;
1386
	uint8_t link_bw, rate_select;
2330 Serge 1387
 
6084 serge 1388
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1389
 
1390
	/* No common link rates between source and sink */
1391
	WARN_ON(common_len <= 0);
1392
 
1393
	max_clock = common_len - 1;
1394
 
4104 Serge 1395
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
3746 Serge 1396
		pipe_config->has_pch_encoder = true;
1397
 
1398
	pipe_config->has_dp_encoder = true;
5354 serge 1399
	pipe_config->has_drrs = false;
6084 serge 1400
	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
3746 Serge 1401
 
3243 Serge 1402
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1403
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1404
				       adjusted_mode);
6084 serge 1405
 
1406
		if (INTEL_INFO(dev)->gen >= 9) {
1407
			int ret;
1408
			ret = skl_update_scaler_crtc(pipe_config);
1409
			if (ret)
1410
				return ret;
1411
		}
1412
 
4104 Serge 1413
		if (!HAS_PCH_SPLIT(dev))
1414
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1415
						 intel_connector->panel.fitting_mode);
1416
		else
1417
			intel_pch_panel_fitting(intel_crtc, pipe_config,
1418
						intel_connector->panel.fitting_mode);
2330 Serge 1419
	}
1420
 
3031 serge 1421
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1422
		return false;
1423
 
1424
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
6084 serge 1425
		      "max bw %d pixel clock %iKHz\n",
1426
		      max_lane_count, common_rates[max_clock],
4560 Serge 1427
		      adjusted_mode->crtc_clock);
3031 serge 1428
 
3746 Serge 1429
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1430
	 * bpc in between. */
4104 Serge 1431
	bpp = pipe_config->pipe_bpp;
5060 serge 1432
	if (is_edp(intel_dp)) {
3746 Serge 1433
 
6084 serge 1434
		/* Get bpp from vbt only for panels that dont have bpp in edid */
1435
		if (intel_connector->base.display_info.bpc == 0 &&
1436
			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1437
			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1438
				      dev_priv->vbt.edp_bpp);
1439
			bpp = dev_priv->vbt.edp_bpp;
1440
		}
1441
 
5354 serge 1442
		/*
1443
		 * Use the maximum clock and number of lanes the eDP panel
1444
		 * advertizes being capable of. The panels are generally
1445
		 * designed to support only a single clock and lane
1446
		 * configuration, and typically these values correspond to the
1447
		 * native resolution of the panel.
1448
		 */
6084 serge 1449
		min_lane_count = max_lane_count;
5354 serge 1450
		min_clock = max_clock;
5060 serge 1451
	}
1452
 
3746 Serge 1453
	for (; bpp >= 6*3; bpp -= 2*3) {
4560 Serge 1454
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1455
						   bpp);
3746 Serge 1456
 
5060 serge 1457
		for (clock = min_clock; clock <= max_clock; clock++) {
6084 serge 1458
			for (lane_count = min_lane_count;
1459
				lane_count <= max_lane_count;
1460
				lane_count <<= 1) {
1461
 
1462
				link_clock = common_rates[clock];
3746 Serge 1463
				link_avail = intel_dp_max_data_rate(link_clock,
1464
								    lane_count);
1465
 
1466
				if (mode_rate <= link_avail) {
1467
					goto found;
1468
				}
1469
			}
1470
		}
1471
	}
1472
 
6084 serge 1473
	return false;
3031 serge 1474
 
3746 Serge 1475
found:
3480 Serge 1476
	if (intel_dp->color_range_auto) {
1477
		/*
1478
		 * See:
1479
		 * CEA-861-E - 5.1 Default Encoding Parameters
1480
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1481
		 */
6084 serge 1482
		pipe_config->limited_color_range =
1483
			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1484
	} else {
1485
		pipe_config->limited_color_range =
1486
			intel_dp->limited_color_range;
3480 Serge 1487
	}
1488
 
6084 serge 1489
	pipe_config->lane_count = lane_count;
3480 Serge 1490
 
3746 Serge 1491
	pipe_config->pipe_bpp = bpp;
6084 serge 1492
	pipe_config->port_clock = common_rates[clock];
3746 Serge 1493
 
6084 serge 1494
	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1495
			      &link_bw, &rate_select);
1496
 
1497
	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1498
		      link_bw, rate_select, pipe_config->lane_count,
4104 Serge 1499
		      pipe_config->port_clock, bpp);
6084 serge 1500
	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1501
		      mode_rate, link_avail);
2330 Serge 1502
 
3746 Serge 1503
	intel_link_compute_m_n(bpp, lane_count,
4560 Serge 1504
			       adjusted_mode->crtc_clock,
1505
			       pipe_config->port_clock,
3746 Serge 1506
			       &pipe_config->dp_m_n);
2330 Serge 1507
 
5060 serge 1508
	if (intel_connector->panel.downclock_mode != NULL &&
6084 serge 1509
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
5354 serge 1510
			pipe_config->has_drrs = true;
5060 serge 1511
			intel_link_compute_m_n(bpp, lane_count,
1512
				intel_connector->panel.downclock_mode->clock,
1513
				pipe_config->port_clock,
1514
				&pipe_config->dp_m2_n2);
1515
	}
1516
 
5354 serge 1517
	if (IS_SKYLAKE(dev) && is_edp(intel_dp))
6084 serge 1518
		skl_edp_set_pll_config(pipe_config);
1519
	else if (IS_BROXTON(dev))
1520
		/* handled in ddi */;
5354 serge 1521
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6084 serge 1522
		hsw_dp_set_ddi_pll_sel(pipe_config);
5060 serge 1523
	else
6084 serge 1524
		intel_dp_set_clock(encoder, pipe_config);
4104 Serge 1525
 
3746 Serge 1526
	return true;
2327 Serge 1527
}
1528
 
4104 Serge 1529
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
3480 Serge 1530
{
4104 Serge 1531
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1532
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1533
	struct drm_device *dev = crtc->base.dev;
3480 Serge 1534
	struct drm_i915_private *dev_priv = dev->dev_private;
1535
	u32 dpa_ctl;
1536
 
6084 serge 1537
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1538
		      crtc->config->port_clock);
3480 Serge 1539
	dpa_ctl = I915_READ(DP_A);
1540
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1541
 
6084 serge 1542
	if (crtc->config->port_clock == 162000) {
3480 Serge 1543
		/* For a long time we've carried around a ILK-DevA w/a for the
1544
		 * 160MHz clock. If we're really unlucky, it's still required.
1545
		 */
1546
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1547
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
4104 Serge 1548
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
3480 Serge 1549
	} else {
1550
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
4104 Serge 1551
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3480 Serge 1552
	}
1553
 
1554
	I915_WRITE(DP_A, dpa_ctl);
1555
 
1556
	POSTING_READ(DP_A);
1557
	udelay(500);
1558
}
1559
 
6084 serge 1560
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1561
			      const struct intel_crtc_state *pipe_config)
1562
{
1563
	intel_dp->link_rate = pipe_config->port_clock;
1564
	intel_dp->lane_count = pipe_config->lane_count;
1565
}
1566
 
5060 serge 1567
static void intel_dp_prepare(struct intel_encoder *encoder)
2330 Serge 1568
{
4104 Serge 1569
	struct drm_device *dev = encoder->base.dev;
2342 Serge 1570
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1571
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1572
	enum port port = dp_to_dig_port(intel_dp)->port;
1573
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6084 serge 1574
	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
2327 Serge 1575
 
6084 serge 1576
	intel_dp_set_link_params(intel_dp, crtc->config);
1577
 
2342 Serge 1578
	/*
1579
	 * There are four kinds of DP registers:
1580
	 *
1581
	 * 	IBX PCH
1582
	 * 	SNB CPU
1583
	 *	IVB CPU
1584
	 * 	CPT PCH
1585
	 *
1586
	 * IBX PCH and CPU are the same for almost everything,
1587
	 * except that the CPU DP PLL is configured in this
1588
	 * register
1589
	 *
1590
	 * CPT PCH is quite different, having many bits moved
1591
	 * to the TRANS_DP_CTL register instead. That
1592
	 * configuration happens (oddly) in ironlake_pch_enable
1593
	 */
2327 Serge 1594
 
2342 Serge 1595
	/* Preserve the BIOS-computed detected bit. This is
1596
	 * supposed to be read-only.
1597
	 */
1598
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 1599
 
2342 Serge 1600
	/* Handle DP bits in common between all three register formats */
1601
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
6084 serge 1602
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
2342 Serge 1603
 
6084 serge 1604
	if (crtc->config->has_audio)
2330 Serge 1605
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2327 Serge 1606
 
2342 Serge 1607
	/* Split out the IBX/CPU vs CPT settings */
1608
 
6084 serge 1609
	if (IS_GEN7(dev) && port == PORT_A) {
2342 Serge 1610
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1611
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1612
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1613
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1614
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1615
 
4560 Serge 1616
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 1617
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1618
 
4104 Serge 1619
		intel_dp->DP |= crtc->pipe << 29;
6084 serge 1620
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1621
		u32 trans_dp;
2342 Serge 1622
 
6084 serge 1623
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1624
 
1625
		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1626
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1627
			trans_dp |= TRANS_DP_ENH_FRAMING;
1628
		else
1629
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1630
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1631
	} else {
1632
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1633
		    crtc->config->limited_color_range)
1634
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1635
 
2342 Serge 1636
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1637
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1638
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1639
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1640
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1641
 
4560 Serge 1642
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
6084 serge 1643
			intel_dp->DP |= DP_ENHANCED_FRAMING;
2342 Serge 1644
 
6084 serge 1645
		if (IS_CHERRYVIEW(dev))
5060 serge 1646
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
6084 serge 1647
		else if (crtc->pipe == PIPE_B)
1648
			intel_dp->DP |= DP_PIPEB_SELECT;
2342 Serge 1649
	}
2330 Serge 1650
}
2327 Serge 1651
 
5060 serge 1652
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1653
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2342 Serge 1654
 
5060 serge 1655
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1656
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2342 Serge 1657
 
5060 serge 1658
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1659
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2342 Serge 1660
 
5060 serge 1661
static void wait_panel_status(struct intel_dp *intel_dp,
2342 Serge 1662
				       u32 mask,
1663
				       u32 value)
1664
{
3243 Serge 1665
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 1666
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 1667
	u32 pp_stat_reg, pp_ctrl_reg;
2342 Serge 1668
 
5354 serge 1669
	lockdep_assert_held(&dev_priv->pps_mutex);
1670
 
4560 Serge 1671
	pp_stat_reg = _pp_stat_reg(intel_dp);
1672
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1673
 
2342 Serge 1674
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
6084 serge 1675
			mask, value,
3746 Serge 1676
			I915_READ(pp_stat_reg),
1677
			I915_READ(pp_ctrl_reg));
2342 Serge 1678
 
3746 Serge 1679
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
2342 Serge 1680
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
3746 Serge 1681
				I915_READ(pp_stat_reg),
1682
				I915_READ(pp_ctrl_reg));
2342 Serge 1683
	}
4560 Serge 1684
 
1685
	DRM_DEBUG_KMS("Wait complete\n");
2342 Serge 1686
}
1687
 
5060 serge 1688
static void wait_panel_on(struct intel_dp *intel_dp)
2342 Serge 1689
{
1690
	DRM_DEBUG_KMS("Wait for panel power on\n");
5060 serge 1691
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2342 Serge 1692
}
1693
 
5060 serge 1694
static void wait_panel_off(struct intel_dp *intel_dp)
2342 Serge 1695
{
1696
	DRM_DEBUG_KMS("Wait for panel power off time\n");
5060 serge 1697
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2342 Serge 1698
}
1699
 
5060 serge 1700
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2342 Serge 1701
{
1702
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
5060 serge 1703
 
1704
	/* When we disable the VDD override bit last we have to do the manual
1705
	 * wait. */
1706
	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1707
				       intel_dp->panel_power_cycle_delay);
1708
 
1709
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2342 Serge 1710
}
1711
 
5060 serge 1712
static void wait_backlight_on(struct intel_dp *intel_dp)
1713
{
1714
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1715
				       intel_dp->backlight_on_delay);
1716
}
2342 Serge 1717
 
5060 serge 1718
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1719
{
1720
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1721
				       intel_dp->backlight_off_delay);
1722
}
1723
 
2342 Serge 1724
/* Read the current pp_control value, unlocking the register if it
1725
 * is locked
1726
 */
1727
 
3746 Serge 1728
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2342 Serge 1729
{
3746 Serge 1730
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1731
	struct drm_i915_private *dev_priv = dev->dev_private;
1732
	u32 control;
2342 Serge 1733
 
5354 serge 1734
	lockdep_assert_held(&dev_priv->pps_mutex);
1735
 
4560 Serge 1736
	control = I915_READ(_pp_ctrl_reg(intel_dp));
6084 serge 1737
	if (!IS_BROXTON(dev)) {
1738
		control &= ~PANEL_UNLOCK_MASK;
1739
		control |= PANEL_UNLOCK_REGS;
1740
	}
2342 Serge 1741
	return control;
1742
}
1743
 
5354 serge 1744
/*
1745
 * Must be paired with edp_panel_vdd_off().
1746
 * Must hold pps_mutex around the whole on/off sequence.
1747
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1748
 */
1749
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1750
{
3243 Serge 1751
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5060 serge 1752
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1753
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2330 Serge 1754
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1755
	enum intel_display_power_domain power_domain;
2330 Serge 1756
	u32 pp;
3746 Serge 1757
	u32 pp_stat_reg, pp_ctrl_reg;
5060 serge 1758
	bool need_to_disable = !intel_dp->want_panel_vdd;
2327 Serge 1759
 
5354 serge 1760
	lockdep_assert_held(&dev_priv->pps_mutex);
1761
 
2342 Serge 1762
	if (!is_edp(intel_dp))
5060 serge 1763
		return false;
2327 Serge 1764
 
6084 serge 1765
	cancel_delayed_work(&intel_dp->panel_vdd_work);
2342 Serge 1766
	intel_dp->want_panel_vdd = true;
1767
 
5060 serge 1768
	if (edp_have_panel_vdd(intel_dp))
1769
		return need_to_disable;
2342 Serge 1770
 
6084 serge 1771
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 1772
	intel_display_power_get(dev_priv, power_domain);
4560 Serge 1773
 
5354 serge 1774
	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1775
		      port_name(intel_dig_port->port));
4560 Serge 1776
 
5060 serge 1777
	if (!edp_have_panel_power(intel_dp))
1778
		wait_panel_power_cycle(intel_dp);
2342 Serge 1779
 
3746 Serge 1780
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1781
	pp |= EDP_FORCE_VDD;
2342 Serge 1782
 
4560 Serge 1783
	pp_stat_reg = _pp_stat_reg(intel_dp);
1784
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1785
 
1786
	I915_WRITE(pp_ctrl_reg, pp);
1787
	POSTING_READ(pp_ctrl_reg);
1788
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1789
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2342 Serge 1790
	/*
1791
	 * If the panel wasn't on, delay before accessing aux channel
1792
	 */
5060 serge 1793
	if (!edp_have_panel_power(intel_dp)) {
5354 serge 1794
		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1795
			      port_name(intel_dig_port->port));
2342 Serge 1796
		msleep(intel_dp->panel_power_up_delay);
1797
	}
5060 serge 1798
 
1799
	return need_to_disable;
2330 Serge 1800
}
2327 Serge 1801
 
5354 serge 1802
/*
1803
 * Must be paired with intel_edp_panel_vdd_off() or
1804
 * intel_edp_panel_off().
1805
 * Nested calls to these functions are not allowed since
1806
 * we drop the lock. Caller must use some higher level
1807
 * locking to prevent nested calls from other threads.
1808
 */
5060 serge 1809
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1810
{
5354 serge 1811
	bool vdd;
5060 serge 1812
 
5354 serge 1813
	if (!is_edp(intel_dp))
1814
		return;
1815
 
1816
	pps_lock(intel_dp);
1817
	vdd = edp_panel_vdd_on(intel_dp);
1818
	pps_unlock(intel_dp);
1819
 
6084 serge 1820
	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
5354 serge 1821
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 1822
}
1823
 
1824
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1825
{
3243 Serge 1826
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1827
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1828
	struct intel_digital_port *intel_dig_port =
1829
		dp_to_dig_port(intel_dp);
1830
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1831
	enum intel_display_power_domain power_domain;
2330 Serge 1832
	u32 pp;
3746 Serge 1833
	u32 pp_stat_reg, pp_ctrl_reg;
2327 Serge 1834
 
5354 serge 1835
	lockdep_assert_held(&dev_priv->pps_mutex);
3480 Serge 1836
 
5354 serge 1837
	WARN_ON(intel_dp->want_panel_vdd);
5060 serge 1838
 
5354 serge 1839
	if (!edp_have_panel_vdd(intel_dp))
1840
		return;
4560 Serge 1841
 
5354 serge 1842
	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1843
		      port_name(intel_dig_port->port));
1844
 
6084 serge 1845
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1846
	pp &= ~EDP_FORCE_VDD;
2327 Serge 1847
 
6084 serge 1848
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1849
	pp_stat_reg = _pp_stat_reg(intel_dp);
3746 Serge 1850
 
6084 serge 1851
	I915_WRITE(pp_ctrl_reg, pp);
1852
	POSTING_READ(pp_ctrl_reg);
3746 Serge 1853
 
2330 Serge 1854
	/* Make sure sequencer is idle before allowing subsequent activity */
6084 serge 1855
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1856
	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
4560 Serge 1857
 
6084 serge 1858
	if ((pp & POWER_TARGET_ON) == 0)
1859
		intel_dp->last_power_cycle = jiffies;
4560 Serge 1860
 
6084 serge 1861
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1862
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 1863
}
2327 Serge 1864
 
5060 serge 1865
static void edp_panel_vdd_work(struct work_struct *__work)
3243 Serge 1866
{
3482 Serge 1867
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1868
						 struct intel_dp, panel_vdd_work);
1869
 
5354 serge 1870
	pps_lock(intel_dp);
1871
	if (!intel_dp->want_panel_vdd)
6084 serge 1872
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 1873
	pps_unlock(intel_dp);
3243 Serge 1874
}
2342 Serge 1875
 
5060 serge 1876
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2330 Serge 1877
{
5060 serge 1878
	unsigned long delay;
1879
 
1880
	/*
1881
	 * Queue the timer to fire a long time from now (relative to the power
1882
	 * down delay) to keep the panel power up across a sequence of
1883
	 * operations.
1884
	 */
1885
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
5367 serge 1886
//   schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
5060 serge 1887
}
1888
 
5354 serge 1889
/*
1890
 * Must be paired with edp_panel_vdd_on().
1891
 * Must hold pps_mutex around the whole on/off sequence.
1892
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1893
 */
5060 serge 1894
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1895
{
5354 serge 1896
	struct drm_i915_private *dev_priv =
1897
		intel_dp_to_dev(intel_dp)->dev_private;
1898
 
1899
	lockdep_assert_held(&dev_priv->pps_mutex);
1900
 
2342 Serge 1901
	if (!is_edp(intel_dp))
1902
		return;
1903
 
6084 serge 1904
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
5354 serge 1905
	     port_name(dp_to_dig_port(intel_dp)->port));
2342 Serge 1906
 
1907
	intel_dp->want_panel_vdd = false;
1908
 
5060 serge 1909
	if (sync)
1910
		edp_panel_vdd_off_sync(intel_dp);
1911
	else
1912
		edp_panel_vdd_schedule_off(intel_dp);
2342 Serge 1913
}
1914
 
5354 serge 1915
static void edp_panel_on(struct intel_dp *intel_dp)
2342 Serge 1916
{
3243 Serge 1917
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1918
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1919
	u32 pp;
3746 Serge 1920
	u32 pp_ctrl_reg;
2327 Serge 1921
 
5354 serge 1922
	lockdep_assert_held(&dev_priv->pps_mutex);
1923
 
2342 Serge 1924
	if (!is_edp(intel_dp))
1925
		return;
2327 Serge 1926
 
5354 serge 1927
	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1928
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 1929
 
5354 serge 1930
	if (WARN(edp_have_panel_power(intel_dp),
1931
		 "eDP port %c panel power already on\n",
1932
		 port_name(dp_to_dig_port(intel_dp)->port)))
2342 Serge 1933
		return;
1934
 
5060 serge 1935
	wait_panel_power_cycle(intel_dp);
2342 Serge 1936
 
4560 Serge 1937
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1938
	pp = ironlake_get_pp_control(intel_dp);
2342 Serge 1939
	if (IS_GEN5(dev)) {
6084 serge 1940
		/* ILK workaround: disable reset around power sequence */
1941
		pp &= ~PANEL_POWER_RESET;
4560 Serge 1942
		I915_WRITE(pp_ctrl_reg, pp);
1943
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1944
	}
2327 Serge 1945
 
2342 Serge 1946
	pp |= POWER_TARGET_ON;
1947
	if (!IS_GEN5(dev))
1948
		pp |= PANEL_POWER_RESET;
1949
 
3746 Serge 1950
	I915_WRITE(pp_ctrl_reg, pp);
1951
	POSTING_READ(pp_ctrl_reg);
1952
 
5060 serge 1953
	wait_panel_on(intel_dp);
1954
	intel_dp->last_power_on = jiffies;
2327 Serge 1955
 
2342 Serge 1956
	if (IS_GEN5(dev)) {
6084 serge 1957
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
4560 Serge 1958
		I915_WRITE(pp_ctrl_reg, pp);
1959
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1960
	}
2330 Serge 1961
}
2327 Serge 1962
 
5354 serge 1963
void intel_edp_panel_on(struct intel_dp *intel_dp)
2330 Serge 1964
{
5354 serge 1965
	if (!is_edp(intel_dp))
1966
		return;
1967
 
1968
	pps_lock(intel_dp);
1969
	edp_panel_on(intel_dp);
1970
	pps_unlock(intel_dp);
1971
}
1972
 
1973
 
1974
static void edp_panel_off(struct intel_dp *intel_dp)
1975
{
5060 serge 1976
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1977
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3243 Serge 1978
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1979
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1980
	enum intel_display_power_domain power_domain;
2342 Serge 1981
	u32 pp;
3746 Serge 1982
	u32 pp_ctrl_reg;
2327 Serge 1983
 
5354 serge 1984
	lockdep_assert_held(&dev_priv->pps_mutex);
1985
 
2342 Serge 1986
	if (!is_edp(intel_dp))
1987
		return;
2327 Serge 1988
 
5354 serge 1989
	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1990
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 1991
 
5354 serge 1992
	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1993
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 1994
 
3746 Serge 1995
	pp = ironlake_get_pp_control(intel_dp);
3031 serge 1996
	/* We need to switch off panel power _and_ force vdd, for otherwise some
1997
	 * panels get very unhappy and cease to work. */
5060 serge 1998
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1999
		EDP_BLC_ENABLE);
2327 Serge 2000
 
4560 Serge 2001
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2002
 
5060 serge 2003
	intel_dp->want_panel_vdd = false;
2004
 
3746 Serge 2005
	I915_WRITE(pp_ctrl_reg, pp);
2006
	POSTING_READ(pp_ctrl_reg);
2007
 
5060 serge 2008
	intel_dp->last_power_cycle = jiffies;
2009
	wait_panel_off(intel_dp);
2010
 
2011
	/* We got a reference when we enabled the VDD. */
6084 serge 2012
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 2013
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 2014
}
2327 Serge 2015
 
5354 serge 2016
void intel_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 2017
{
5354 serge 2018
	if (!is_edp(intel_dp))
2019
		return;
2020
 
2021
	pps_lock(intel_dp);
2022
	edp_panel_off(intel_dp);
2023
	pps_unlock(intel_dp);
2024
}
2025
 
2026
/* Enable backlight in the panel power control. */
2027
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2028
{
3243 Serge 2029
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2030
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2031
	struct drm_i915_private *dev_priv = dev->dev_private;
2032
	u32 pp;
3746 Serge 2033
	u32 pp_ctrl_reg;
2327 Serge 2034
 
2330 Serge 2035
	/*
2036
	 * If we enable the backlight right away following a panel power
2037
	 * on, we may see slight flicker as the panel syncs with the eDP
2038
	 * link.  So delay a bit to make sure the image is solid before
2039
	 * allowing it to appear.
2040
	 */
5060 serge 2041
	wait_backlight_on(intel_dp);
5354 serge 2042
 
2043
	pps_lock(intel_dp);
2044
 
3746 Serge 2045
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2046
	pp |= EDP_BLC_ENABLE;
3243 Serge 2047
 
4560 Serge 2048
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2049
 
2050
	I915_WRITE(pp_ctrl_reg, pp);
2051
	POSTING_READ(pp_ctrl_reg);
5354 serge 2052
 
2053
	pps_unlock(intel_dp);
2330 Serge 2054
}
2327 Serge 2055
 
5354 serge 2056
/* Enable backlight PWM and backlight PP control. */
2057
void intel_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 2058
{
5354 serge 2059
	if (!is_edp(intel_dp))
2060
		return;
2061
 
2062
	DRM_DEBUG_KMS("\n");
2063
 
2064
	intel_panel_enable_backlight(intel_dp->attached_connector);
2065
	_intel_edp_backlight_on(intel_dp);
2066
}
2067
 
2068
/* Disable backlight in the panel power control. */
2069
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2070
{
3243 Serge 2071
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2072
	struct drm_i915_private *dev_priv = dev->dev_private;
2073
	u32 pp;
3746 Serge 2074
	u32 pp_ctrl_reg;
2327 Serge 2075
 
2342 Serge 2076
	if (!is_edp(intel_dp))
2077
		return;
2078
 
5354 serge 2079
	pps_lock(intel_dp);
2080
 
3746 Serge 2081
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2082
	pp &= ~EDP_BLC_ENABLE;
3746 Serge 2083
 
4560 Serge 2084
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2085
 
2086
	I915_WRITE(pp_ctrl_reg, pp);
2087
	POSTING_READ(pp_ctrl_reg);
5354 serge 2088
 
2089
	pps_unlock(intel_dp);
2090
 
5060 serge 2091
	intel_dp->last_backlight_off = jiffies;
2092
	edp_wait_backlight_off(intel_dp);
5354 serge 2093
}
5060 serge 2094
 
5354 serge 2095
/* Disable backlight PP control and backlight PWM. */
2096
void intel_edp_backlight_off(struct intel_dp *intel_dp)
2097
{
2098
	if (!is_edp(intel_dp))
2099
		return;
2100
 
2101
	DRM_DEBUG_KMS("\n");
2102
 
2103
	_intel_edp_backlight_off(intel_dp);
5060 serge 2104
	intel_panel_disable_backlight(intel_dp->attached_connector);
2330 Serge 2105
}
2327 Serge 2106
 
5354 serge 2107
/*
2108
 * Hook for controlling the panel power control backlight through the bl_power
2109
 * sysfs attribute. Take care to handle multiple calls.
2110
 */
2111
static void intel_edp_backlight_power(struct intel_connector *connector,
2112
				      bool enable)
2113
{
2114
	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2115
	bool is_enabled;
2116
 
2117
	pps_lock(intel_dp);
2118
	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2119
	pps_unlock(intel_dp);
2120
 
2121
	if (is_enabled == enable)
2122
		return;
2123
 
2124
	DRM_DEBUG_KMS("panel power control backlight %s\n",
2125
		      enable ? "enable" : "disable");
2126
 
2127
	if (enable)
2128
		_intel_edp_backlight_on(intel_dp);
2129
	else
2130
		_intel_edp_backlight_off(intel_dp);
2131
}
2132
 
3031 serge 2133
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 2134
{
3243 Serge 2135
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2136
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2137
	struct drm_device *dev = crtc->dev;
2330 Serge 2138
	struct drm_i915_private *dev_priv = dev->dev_private;
2139
	u32 dpa_ctl;
2327 Serge 2140
 
3031 serge 2141
	assert_pipe_disabled(dev_priv,
2142
			     to_intel_crtc(crtc)->pipe);
2143
 
2330 Serge 2144
	DRM_DEBUG_KMS("\n");
2145
	dpa_ctl = I915_READ(DP_A);
3031 serge 2146
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2147
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2148
 
2149
	/* We don't adjust intel_dp->DP while tearing down the link, to
2150
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
2151
	 * enable bits here to ensure that we don't enable too much. */
2152
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2153
	intel_dp->DP |= DP_PLL_ENABLE;
2154
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2155
	POSTING_READ(DP_A);
2156
	udelay(200);
2157
}
2327 Serge 2158
 
3031 serge 2159
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 2160
{
3243 Serge 2161
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2162
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2163
	struct drm_device *dev = crtc->dev;
2330 Serge 2164
	struct drm_i915_private *dev_priv = dev->dev_private;
2165
	u32 dpa_ctl;
2327 Serge 2166
 
3031 serge 2167
	assert_pipe_disabled(dev_priv,
2168
			     to_intel_crtc(crtc)->pipe);
2169
 
2330 Serge 2170
	dpa_ctl = I915_READ(DP_A);
3031 serge 2171
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2172
	     "dp pll off, should be on\n");
2173
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2174
 
2175
	/* We can't rely on the value tracked for the DP register in
2176
	 * intel_dp->DP because link_down must not change that (otherwise link
2177
	 * re-training will fail. */
2330 Serge 2178
	dpa_ctl &= ~DP_PLL_ENABLE;
2179
	I915_WRITE(DP_A, dpa_ctl);
2180
	POSTING_READ(DP_A);
2181
	udelay(200);
2182
}
2327 Serge 2183
 
2330 Serge 2184
/* If the sink supports it, try to set the power state appropriately */
3243 Serge 2185
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2330 Serge 2186
{
2187
	int ret, i;
2327 Serge 2188
 
2330 Serge 2189
	/* Should have a valid DPCD by this point */
2190
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2191
		return;
2327 Serge 2192
 
2330 Serge 2193
	if (mode != DRM_MODE_DPMS_ON) {
5060 serge 2194
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2195
					 DP_SET_POWER_D3);
2330 Serge 2196
	} else {
2197
		/*
2198
		 * When turning on, we need to retry for 1ms to give the sink
2199
		 * time to wake up.
2200
		 */
2201
		for (i = 0; i < 3; i++) {
5060 serge 2202
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2203
						 DP_SET_POWER_D0);
2330 Serge 2204
			if (ret == 1)
2205
				break;
2206
			msleep(1);
2207
		}
2208
	}
5354 serge 2209
 
2210
	if (ret != 1)
2211
		DRM_DEBUG_KMS("failed to %s sink power state\n",
2212
			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2330 Serge 2213
}
2327 Serge 2214
 
3031 serge 2215
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2216
				  enum pipe *pipe)
2330 Serge 2217
{
3031 serge 2218
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2219
	enum port port = dp_to_dig_port(intel_dp)->port;
3031 serge 2220
	struct drm_device *dev = encoder->base.dev;
2221
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2222
	enum intel_display_power_domain power_domain;
2223
	u32 tmp;
2327 Serge 2224
 
5060 serge 2225
	power_domain = intel_display_port_power_domain(encoder);
5354 serge 2226
	if (!intel_display_power_is_enabled(dev_priv, power_domain))
5060 serge 2227
		return false;
2228
 
2229
	tmp = I915_READ(intel_dp->output_reg);
2230
 
3031 serge 2231
	if (!(tmp & DP_PORT_EN))
2232
		return false;
2342 Serge 2233
 
6084 serge 2234
	if (IS_GEN7(dev) && port == PORT_A) {
3031 serge 2235
		*pipe = PORT_TO_PIPE_CPT(tmp);
6084 serge 2236
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2237
		enum pipe p;
2327 Serge 2238
 
6084 serge 2239
		for_each_pipe(dev_priv, p) {
2240
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2241
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2242
				*pipe = p;
3031 serge 2243
				return true;
2244
			}
2245
		}
3243 Serge 2246
 
2247
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2248
			      intel_dp->output_reg);
6084 serge 2249
	} else if (IS_CHERRYVIEW(dev)) {
2250
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2251
	} else {
2252
		*pipe = PORT_TO_PIPE(tmp);
3031 serge 2253
	}
2254
 
2255
	return true;
2330 Serge 2256
}
2327 Serge 2257
 
4104 Serge 2258
static void intel_dp_get_config(struct intel_encoder *encoder,
6084 serge 2259
				struct intel_crtc_state *pipe_config)
4104 Serge 2260
{
2261
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2262
	u32 tmp, flags = 0;
2263
	struct drm_device *dev = encoder->base.dev;
2264
	struct drm_i915_private *dev_priv = dev->dev_private;
2265
	enum port port = dp_to_dig_port(intel_dp)->port;
2266
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2267
	int dotclock;
4104 Serge 2268
 
5060 serge 2269
	tmp = I915_READ(intel_dp->output_reg);
2270
 
6084 serge 2271
	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2272
 
2273
	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2274
		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2275
 
2276
		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
4104 Serge 2277
			flags |= DRM_MODE_FLAG_PHSYNC;
2278
		else
2279
			flags |= DRM_MODE_FLAG_NHSYNC;
2280
 
6084 serge 2281
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
4104 Serge 2282
			flags |= DRM_MODE_FLAG_PVSYNC;
2283
		else
2284
			flags |= DRM_MODE_FLAG_NVSYNC;
2285
	} else {
6084 serge 2286
		if (tmp & DP_SYNC_HS_HIGH)
4104 Serge 2287
			flags |= DRM_MODE_FLAG_PHSYNC;
2288
		else
2289
			flags |= DRM_MODE_FLAG_NHSYNC;
2290
 
6084 serge 2291
		if (tmp & DP_SYNC_VS_HIGH)
4104 Serge 2292
			flags |= DRM_MODE_FLAG_PVSYNC;
2293
		else
2294
			flags |= DRM_MODE_FLAG_NVSYNC;
2295
	}
2296
 
6084 serge 2297
	pipe_config->base.adjusted_mode.flags |= flags;
4104 Serge 2298
 
5139 serge 2299
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2300
	    tmp & DP_COLOR_RANGE_16_235)
2301
		pipe_config->limited_color_range = true;
2302
 
4560 Serge 2303
	pipe_config->has_dp_encoder = true;
2304
 
6084 serge 2305
	pipe_config->lane_count =
2306
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2307
 
4560 Serge 2308
	intel_dp_get_m_n(crtc, pipe_config);
2309
 
2310
	if (port == PORT_A) {
4104 Serge 2311
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2312
			pipe_config->port_clock = 162000;
2313
		else
2314
			pipe_config->port_clock = 270000;
2315
	}
4280 Serge 2316
 
4560 Serge 2317
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2318
					    &pipe_config->dp_m_n);
2319
 
2320
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2321
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2322
 
6084 serge 2323
	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
4560 Serge 2324
 
4280 Serge 2325
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2326
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2327
		/*
2328
		 * This is a big fat ugly hack.
2329
		 *
2330
		 * Some machines in UEFI boot mode provide us a VBT that has 18
2331
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2332
		 * unknown we fail to light up. Yet the same BIOS boots up with
2333
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2334
		 * max, not what it tells us to use.
2335
		 *
2336
		 * Note: This will still be broken if the eDP panel is not lit
2337
		 * up by the BIOS, and thus we can't get the mode at module
2338
		 * load.
2339
		 */
2340
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2341
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2342
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2343
	}
4104 Serge 2344
}
2345
 
3031 serge 2346
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 2347
{
3031 serge 2348
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2349
	struct drm_device *dev = encoder->base.dev;
5354 serge 2350
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2327 Serge 2351
 
6084 serge 2352
	if (crtc->config->has_audio)
5354 serge 2353
		intel_audio_codec_disable(encoder);
2354
 
6084 serge 2355
	if (HAS_PSR(dev) && !HAS_DDI(dev))
2356
		intel_psr_disable(intel_dp);
2357
 
3031 serge 2358
	/* Make sure the panel is off before trying to change the mode. But also
2359
	 * ensure that we have vdd while we switch off the panel. */
5060 serge 2360
	intel_edp_panel_vdd_on(intel_dp);
2361
	intel_edp_backlight_off(intel_dp);
4560 Serge 2362
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
5060 serge 2363
	intel_edp_panel_off(intel_dp);
2330 Serge 2364
 
5354 serge 2365
	/* disable the port before the pipe on g4x */
2366
	if (INTEL_INFO(dev)->gen < 5)
3031 serge 2367
		intel_dp_link_down(intel_dp);
2368
}
2330 Serge 2369
 
5354 serge 2370
static void ilk_post_disable_dp(struct intel_encoder *encoder)
3031 serge 2371
{
2372
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2373
	enum port port = dp_to_dig_port(intel_dp)->port;
5060 serge 2374
 
2375
	intel_dp_link_down(intel_dp);
5354 serge 2376
	if (port == PORT_A)
6084 serge 2377
		ironlake_edp_pll_off(intel_dp);
5060 serge 2378
}
2379
 
2380
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2381
{
2382
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2383
 
2384
	intel_dp_link_down(intel_dp);
2385
}
2386
 
6084 serge 2387
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2388
				     bool reset)
5060 serge 2389
{
6084 serge 2390
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2391
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2392
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2393
	enum pipe pipe = crtc->pipe;
2394
	uint32_t val;
3031 serge 2395
 
6084 serge 2396
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2397
	if (reset)
2398
		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2399
	else
2400
		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2401
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
5060 serge 2402
 
6084 serge 2403
	if (crtc->config->lane_count > 2) {
2404
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2405
		if (reset)
2406
			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2407
		else
2408
			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2409
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2410
	}
5060 serge 2411
 
2412
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2413
	val |= CHV_PCS_REQ_SOFTRESET_EN;
6084 serge 2414
	if (reset)
2415
		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2416
	else
2417
		val |= DPIO_PCS_CLK_SOFT_RESET;
5060 serge 2418
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2419
 
6084 serge 2420
	if (crtc->config->lane_count > 2) {
2421
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2422
		val |= CHV_PCS_REQ_SOFTRESET_EN;
2423
		if (reset)
2424
			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2425
		else
2426
			val |= DPIO_PCS_CLK_SOFT_RESET;
2427
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2428
	}
2429
}
5060 serge 2430
 
6084 serge 2431
static void chv_post_disable_dp(struct intel_encoder *encoder)
2432
{
2433
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2434
	struct drm_device *dev = encoder->base.dev;
2435
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2436
 
6084 serge 2437
	intel_dp_link_down(intel_dp);
5060 serge 2438
 
6084 serge 2439
	mutex_lock(&dev_priv->sb_lock);
2440
 
2441
	/* Assert data lane reset */
2442
	chv_data_lane_soft_reset(encoder, true);
2443
 
2444
	mutex_unlock(&dev_priv->sb_lock);
2330 Serge 2445
}
2446
 
5354 serge 2447
static void
2448
_intel_dp_set_link_train(struct intel_dp *intel_dp,
2449
			 uint32_t *DP,
2450
			 uint8_t dp_train_pat)
2451
{
2452
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2453
	struct drm_device *dev = intel_dig_port->base.base.dev;
2454
	struct drm_i915_private *dev_priv = dev->dev_private;
2455
	enum port port = intel_dig_port->port;
2456
 
2457
	if (HAS_DDI(dev)) {
2458
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2459
 
2460
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2461
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2462
		else
2463
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2464
 
2465
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2466
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2467
		case DP_TRAINING_PATTERN_DISABLE:
2468
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2469
 
2470
			break;
2471
		case DP_TRAINING_PATTERN_1:
2472
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2473
			break;
2474
		case DP_TRAINING_PATTERN_2:
2475
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2476
			break;
2477
		case DP_TRAINING_PATTERN_3:
2478
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2479
			break;
2480
		}
2481
		I915_WRITE(DP_TP_CTL(port), temp);
2482
 
6084 serge 2483
	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2484
		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
5354 serge 2485
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2486
 
2487
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2488
		case DP_TRAINING_PATTERN_DISABLE:
2489
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2490
			break;
2491
		case DP_TRAINING_PATTERN_1:
2492
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2493
			break;
2494
		case DP_TRAINING_PATTERN_2:
2495
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2496
			break;
2497
		case DP_TRAINING_PATTERN_3:
2498
			DRM_ERROR("DP training pattern 3 not supported\n");
2499
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2500
			break;
2501
		}
2502
 
2503
	} else {
2504
		if (IS_CHERRYVIEW(dev))
2505
			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2506
		else
2507
			*DP &= ~DP_LINK_TRAIN_MASK;
2508
 
2509
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2510
		case DP_TRAINING_PATTERN_DISABLE:
2511
			*DP |= DP_LINK_TRAIN_OFF;
2512
			break;
2513
		case DP_TRAINING_PATTERN_1:
2514
			*DP |= DP_LINK_TRAIN_PAT_1;
2515
			break;
2516
		case DP_TRAINING_PATTERN_2:
2517
			*DP |= DP_LINK_TRAIN_PAT_2;
2518
			break;
2519
		case DP_TRAINING_PATTERN_3:
2520
			if (IS_CHERRYVIEW(dev)) {
2521
				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2522
			} else {
2523
				DRM_ERROR("DP training pattern 3 not supported\n");
2524
				*DP |= DP_LINK_TRAIN_PAT_2;
2525
			}
2526
			break;
2527
		}
2528
	}
2529
}
2530
 
2531
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2532
{
2533
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2534
	struct drm_i915_private *dev_priv = dev->dev_private;
2535
 
2536
	/* enable with pattern 1 (as per spec) */
2537
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2538
				 DP_TRAINING_PATTERN_1);
2539
 
2540
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2541
	POSTING_READ(intel_dp->output_reg);
2542
 
2543
	/*
2544
	 * Magic for VLV/CHV. We _must_ first set up the register
2545
	 * without actually enabling the port, and then do another
2546
	 * write to enable the port. Otherwise link training will
2547
	 * fail when the power sequencer is freshly used for this port.
2548
	 */
2549
	intel_dp->DP |= DP_PORT_EN;
2550
 
2551
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2552
	POSTING_READ(intel_dp->output_reg);
2553
}
2554
 
3031 serge 2555
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 2556
{
3031 serge 2557
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2558
	struct drm_device *dev = encoder->base.dev;
2330 Serge 2559
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2560
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2330 Serge 2561
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2562
 
3031 serge 2563
	if (WARN_ON(dp_reg & DP_PORT_EN))
2564
		return;
2342 Serge 2565
 
5354 serge 2566
	pps_lock(intel_dp);
2567
 
2568
	if (IS_VALLEYVIEW(dev))
2569
		vlv_init_panel_power_sequencer(intel_dp);
2570
 
2571
	intel_dp_enable_port(intel_dp);
2572
 
2573
	edp_panel_vdd_on(intel_dp);
2574
	edp_panel_on(intel_dp);
2575
	edp_panel_vdd_off(intel_dp, true);
2576
 
2577
	pps_unlock(intel_dp);
2578
 
6084 serge 2579
	if (IS_VALLEYVIEW(dev)) {
2580
		unsigned int lane_mask = 0x0;
5354 serge 2581
 
6084 serge 2582
		if (IS_CHERRYVIEW(dev))
2583
			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2584
 
2585
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2586
				    lane_mask);
2587
	}
2588
 
3031 serge 2589
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
6084 serge 2590
	intel_dp_start_link_train(intel_dp);
3746 Serge 2591
	intel_dp_stop_link_train(intel_dp);
5354 serge 2592
 
6084 serge 2593
	if (crtc->config->has_audio) {
5354 serge 2594
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2595
				 pipe_name(crtc->pipe));
2596
		intel_audio_codec_enable(encoder);
2597
	}
4560 Serge 2598
}
2599
 
2600
static void g4x_enable_dp(struct intel_encoder *encoder)
2601
{
2602
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2603
 
2604
	intel_enable_dp(encoder);
5060 serge 2605
	intel_edp_backlight_on(intel_dp);
2330 Serge 2606
}
2607
 
4104 Serge 2608
static void vlv_enable_dp(struct intel_encoder *encoder)
2609
{
4560 Serge 2610
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2611
 
5060 serge 2612
	intel_edp_backlight_on(intel_dp);
6084 serge 2613
	intel_psr_enable(intel_dp);
4104 Serge 2614
}
2615
 
4560 Serge 2616
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
3031 serge 2617
{
2618
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2619
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3031 serge 2620
 
5060 serge 2621
	intel_dp_prepare(encoder);
2622
 
2623
	/* Only ilk+ has port A */
2624
	if (dport->port == PORT_A) {
2625
		ironlake_set_pll_cpu_edp(intel_dp);
3031 serge 2626
		ironlake_edp_pll_on(intel_dp);
5060 serge 2627
	}
3031 serge 2628
}
2629
 
5354 serge 2630
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2631
{
2632
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2633
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2634
	enum pipe pipe = intel_dp->pps_pipe;
2635
	int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2636
 
2637
	edp_panel_vdd_off_sync(intel_dp);
2638
 
2639
	/*
2640
	 * VLV seems to get confused when multiple power seqeuencers
2641
	 * have the same port selected (even if only one has power/vdd
2642
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2643
	 * CHV on the other hand doesn't seem to mind having the same port
2644
	 * selected in multiple power seqeuencers, but let's clear the
2645
	 * port select always when logically disconnecting a power sequencer
2646
	 * from a port.
2647
	 */
2648
	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2649
		      pipe_name(pipe), port_name(intel_dig_port->port));
2650
	I915_WRITE(pp_on_reg, 0);
2651
	POSTING_READ(pp_on_reg);
2652
 
2653
	intel_dp->pps_pipe = INVALID_PIPE;
2654
}
2655
 
2656
static void vlv_steal_power_sequencer(struct drm_device *dev,
2657
				      enum pipe pipe)
2658
{
2659
	struct drm_i915_private *dev_priv = dev->dev_private;
2660
	struct intel_encoder *encoder;
2661
 
2662
	lockdep_assert_held(&dev_priv->pps_mutex);
2663
 
2664
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2665
		return;
2666
 
2667
	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2668
			    base.head) {
2669
		struct intel_dp *intel_dp;
2670
		enum port port;
2671
 
2672
		if (encoder->type != INTEL_OUTPUT_EDP)
2673
			continue;
2674
 
2675
		intel_dp = enc_to_intel_dp(&encoder->base);
2676
		port = dp_to_dig_port(intel_dp)->port;
2677
 
2678
		if (intel_dp->pps_pipe != pipe)
2679
			continue;
2680
 
2681
		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2682
			      pipe_name(pipe), port_name(port));
2683
 
6084 serge 2684
		WARN(encoder->base.crtc,
5354 serge 2685
		     "stealing pipe %c power sequencer from active eDP port %c\n",
2686
		     pipe_name(pipe), port_name(port));
2687
 
2688
		/* make sure vdd is off before we steal it */
2689
		vlv_detach_power_sequencer(intel_dp);
2690
	}
2691
}
2692
 
2693
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2694
{
2695
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2696
	struct intel_encoder *encoder = &intel_dig_port->base;
2697
	struct drm_device *dev = encoder->base.dev;
2698
	struct drm_i915_private *dev_priv = dev->dev_private;
2699
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2700
 
2701
	lockdep_assert_held(&dev_priv->pps_mutex);
2702
 
2703
	if (!is_edp(intel_dp))
2704
		return;
2705
 
2706
	if (intel_dp->pps_pipe == crtc->pipe)
2707
		return;
2708
 
2709
	/*
2710
	 * If another power sequencer was being used on this
2711
	 * port previously make sure to turn off vdd there while
2712
	 * we still have control of it.
2713
	 */
2714
	if (intel_dp->pps_pipe != INVALID_PIPE)
2715
		vlv_detach_power_sequencer(intel_dp);
2716
 
2717
	/*
2718
	 * We may be stealing the power
2719
	 * sequencer from another port.
2720
	 */
2721
	vlv_steal_power_sequencer(dev, crtc->pipe);
2722
 
2723
	/* now it's all ours */
2724
	intel_dp->pps_pipe = crtc->pipe;
2725
 
2726
	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2727
		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2728
 
2729
	/* init power sequencer on this pipe and port */
2730
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2731
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2732
}
2733
 
4104 Serge 2734
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2735
{
2736
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2737
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2738
	struct drm_device *dev = encoder->base.dev;
2739
	struct drm_i915_private *dev_priv = dev->dev_private;
2740
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2741
	enum dpio_channel port = vlv_dport_to_channel(dport);
6084 serge 2742
	int pipe = intel_crtc->pipe;
2743
	u32 val;
4104 Serge 2744
 
6084 serge 2745
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 2746
 
4560 Serge 2747
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
6084 serge 2748
	val = 0;
2749
	if (pipe)
2750
		val |= (1<<21);
2751
	else
2752
		val &= ~(1<<21);
2753
	val |= 0x001000c4;
4560 Serge 2754
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2755
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2756
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
4104 Serge 2757
 
6084 serge 2758
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2759
 
2760
	intel_enable_dp(encoder);
4539 Serge 2761
}
4104 Serge 2762
 
4560 Serge 2763
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
4104 Serge 2764
{
2765
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2766
	struct drm_device *dev = encoder->base.dev;
2767
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2768
	struct intel_crtc *intel_crtc =
2769
		to_intel_crtc(encoder->base.crtc);
2770
	enum dpio_channel port = vlv_dport_to_channel(dport);
2771
	int pipe = intel_crtc->pipe;
4104 Serge 2772
 
5060 serge 2773
	intel_dp_prepare(encoder);
2774
 
4104 Serge 2775
	/* Program Tx lane resets to default */
6084 serge 2776
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 2777
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
4104 Serge 2778
			 DPIO_PCS_TX_LANE2_RESET |
2779
			 DPIO_PCS_TX_LANE1_RESET);
4560 Serge 2780
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
4104 Serge 2781
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2782
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2783
			 (1<
2784
				 DPIO_PCS_CLK_SOFT_RESET);
2785
 
2786
	/* Fix up inter-pair skew failure */
4560 Serge 2787
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2788
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2789
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
6084 serge 2790
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2791
}
2792
 
5060 serge 2793
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2794
{
2795
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2796
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2797
	struct drm_device *dev = encoder->base.dev;
2798
	struct drm_i915_private *dev_priv = dev->dev_private;
2799
	struct intel_crtc *intel_crtc =
2800
		to_intel_crtc(encoder->base.crtc);
2801
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2802
	int pipe = intel_crtc->pipe;
6084 serge 2803
	int data, i, stagger;
5060 serge 2804
	u32 val;
2805
 
6084 serge 2806
	mutex_lock(&dev_priv->sb_lock);
5060 serge 2807
 
5354 serge 2808
	/* allow hardware to manage TX FIFO reset source */
2809
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2810
	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2811
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2812
 
6084 serge 2813
	if (intel_crtc->config->lane_count > 2) {
2814
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2815
		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2816
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2817
	}
5354 serge 2818
 
5060 serge 2819
	/* Program Tx lane latency optimal setting*/
6084 serge 2820
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 2821
		/* Set the upar bit */
6084 serge 2822
		if (intel_crtc->config->lane_count == 1)
2823
			data = 0x0;
2824
		else
2825
			data = (i == 1) ? 0x0 : 0x1;
5060 serge 2826
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2827
				data << DPIO_UPAR_SHIFT);
2828
	}
2829
 
2830
	/* Data lane stagger programming */
6084 serge 2831
	if (intel_crtc->config->port_clock > 270000)
2832
		stagger = 0x18;
2833
	else if (intel_crtc->config->port_clock > 135000)
2834
		stagger = 0xd;
2835
	else if (intel_crtc->config->port_clock > 67500)
2836
		stagger = 0x7;
2837
	else if (intel_crtc->config->port_clock > 33750)
2838
		stagger = 0x4;
2839
	else
2840
		stagger = 0x2;
5060 serge 2841
 
6084 serge 2842
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2843
	val |= DPIO_TX2_STAGGER_MASK(0x1f);
2844
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
5060 serge 2845
 
6084 serge 2846
	if (intel_crtc->config->lane_count > 2) {
2847
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2848
		val |= DPIO_TX2_STAGGER_MASK(0x1f);
2849
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2850
	}
2851
 
2852
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2853
		       DPIO_LANESTAGGER_STRAP(stagger) |
2854
		       DPIO_LANESTAGGER_STRAP_OVRD |
2855
		       DPIO_TX1_STAGGER_MASK(0x1f) |
2856
		       DPIO_TX1_STAGGER_MULT(6) |
2857
		       DPIO_TX2_STAGGER_MULT(0));
2858
 
2859
	if (intel_crtc->config->lane_count > 2) {
2860
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2861
			       DPIO_LANESTAGGER_STRAP(stagger) |
2862
			       DPIO_LANESTAGGER_STRAP_OVRD |
2863
			       DPIO_TX1_STAGGER_MASK(0x1f) |
2864
			       DPIO_TX1_STAGGER_MULT(7) |
2865
			       DPIO_TX2_STAGGER_MULT(5));
2866
	}
2867
 
2868
	/* Deassert data lane reset */
2869
	chv_data_lane_soft_reset(encoder, false);
2870
 
2871
	mutex_unlock(&dev_priv->sb_lock);
2872
 
5060 serge 2873
	intel_enable_dp(encoder);
6084 serge 2874
 
2875
	/* Second common lane will stay alive on its own now */
2876
	if (dport->release_cl2_override) {
2877
		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2878
		dport->release_cl2_override = false;
2879
	}
5060 serge 2880
}
2881
 
2882
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2883
{
2884
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2885
	struct drm_device *dev = encoder->base.dev;
2886
	struct drm_i915_private *dev_priv = dev->dev_private;
2887
	struct intel_crtc *intel_crtc =
2888
		to_intel_crtc(encoder->base.crtc);
2889
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2890
	enum pipe pipe = intel_crtc->pipe;
6084 serge 2891
	unsigned int lane_mask =
2892
		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
5060 serge 2893
	u32 val;
2894
 
5354 serge 2895
	intel_dp_prepare(encoder);
2896
 
6084 serge 2897
	/*
2898
	 * Must trick the second common lane into life.
2899
	 * Otherwise we can't even access the PLL.
2900
	 */
2901
	if (ch == DPIO_CH0 && pipe == PIPE_B)
2902
		dport->release_cl2_override =
2903
			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
5060 serge 2904
 
6084 serge 2905
	chv_phy_powergate_lanes(encoder, true, lane_mask);
2906
 
2907
	mutex_lock(&dev_priv->sb_lock);
2908
 
2909
	/* Assert data lane reset */
2910
	chv_data_lane_soft_reset(encoder, true);
2911
 
5060 serge 2912
	/* program left/right clock distribution */
2913
	if (pipe != PIPE_B) {
2914
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2915
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2916
		if (ch == DPIO_CH0)
2917
			val |= CHV_BUFLEFTENA1_FORCE;
2918
		if (ch == DPIO_CH1)
2919
			val |= CHV_BUFRIGHTENA1_FORCE;
2920
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2921
	} else {
2922
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2923
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2924
		if (ch == DPIO_CH0)
2925
			val |= CHV_BUFLEFTENA2_FORCE;
2926
		if (ch == DPIO_CH1)
2927
			val |= CHV_BUFRIGHTENA2_FORCE;
2928
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2929
	}
2930
 
2931
	/* program clock channel usage */
2932
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2933
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2934
	if (pipe != PIPE_B)
2935
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2936
	else
2937
		val |= CHV_PCS_USEDCLKCHANNEL;
2938
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2939
 
6084 serge 2940
	if (intel_crtc->config->lane_count > 2) {
2941
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2942
		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2943
		if (pipe != PIPE_B)
2944
			val &= ~CHV_PCS_USEDCLKCHANNEL;
2945
		else
2946
			val |= CHV_PCS_USEDCLKCHANNEL;
2947
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2948
	}
5060 serge 2949
 
2950
	/*
2951
	 * This a a bit weird since generally CL
2952
	 * matches the pipe, but here we need to
2953
	 * pick the CL based on the port.
2954
	 */
2955
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2956
	if (pipe != PIPE_B)
2957
		val &= ~CHV_CMN_USEDCLKCHANNEL;
2958
	else
2959
		val |= CHV_CMN_USEDCLKCHANNEL;
2960
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2961
 
6084 serge 2962
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 2963
}
2964
 
6084 serge 2965
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2966
{
2967
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2968
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2969
	u32 val;
2970
 
2971
	mutex_lock(&dev_priv->sb_lock);
2972
 
2973
	/* disable left/right clock distribution */
2974
	if (pipe != PIPE_B) {
2975
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2976
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2977
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2978
	} else {
2979
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2980
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2981
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2982
	}
2983
 
2984
	mutex_unlock(&dev_priv->sb_lock);
2985
 
2986
	/*
2987
	 * Leave the power down bit cleared for at least one
2988
	 * lane so that chv_powergate_phy_ch() will power
2989
	 * on something when the channel is otherwise unused.
2990
	 * When the port is off and the override is removed
2991
	 * the lanes power down anyway, so otherwise it doesn't
2992
	 * really matter what the state of power down bits is
2993
	 * after this.
2994
	 */
2995
	chv_phy_powergate_lanes(encoder, false, 0x0);
2996
}
2997
 
2330 Serge 2998
/*
2999
 * Native read with retry for link status and receiver capability reads for
3000
 * cases where the sink may still be asleep.
5060 serge 3001
 *
3002
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3003
 * supposed to retry 3 times per the spec.
2330 Serge 3004
 */
5060 serge 3005
static ssize_t
3006
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3007
			void *buffer, size_t size)
2330 Serge 3008
{
5060 serge 3009
	ssize_t ret;
3010
	int i;
2330 Serge 3011
 
5354 serge 3012
	/*
3013
	 * Sometime we just get the same incorrect byte repeated
3014
	 * over the entire buffer. Doing just one throw away read
3015
	 * initially seems to "solve" it.
3016
	 */
3017
	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3018
 
2330 Serge 3019
	for (i = 0; i < 3; i++) {
5060 serge 3020
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3021
		if (ret == size)
3022
			return ret;
2330 Serge 3023
		msleep(1);
3024
	}
3025
 
5060 serge 3026
	return ret;
2330 Serge 3027
}
3028
 
3029
/*
3030
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3031
 * link status information
3032
 */
3033
static bool
2342 Serge 3034
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3035
{
5060 serge 3036
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 3037
				       DP_LANE0_1_STATUS,
3038
				       link_status,
5060 serge 3039
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2330 Serge 3040
}
3041
 
5060 serge 3042
/* These are source-specific values. */
2330 Serge 3043
static uint8_t
2342 Serge 3044
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 3045
{
3243 Serge 3046
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 3047
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3048
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3049
 
6084 serge 3050
	if (IS_BROXTON(dev))
3051
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3052
	else if (INTEL_INFO(dev)->gen >= 9) {
3053
		if (dev_priv->edp_low_vswing && port == PORT_A)
3054
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5354 serge 3055
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6084 serge 3056
	} else if (IS_VALLEYVIEW(dev))
5354 serge 3057
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4104 Serge 3058
	else if (IS_GEN7(dev) && port == PORT_A)
5354 serge 3059
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4104 Serge 3060
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
5354 serge 3061
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2342 Serge 3062
	else
5354 serge 3063
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2342 Serge 3064
}
3065
 
3066
static uint8_t
3067
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3068
{
3243 Serge 3069
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 3070
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3071
 
5354 serge 3072
	if (INTEL_INFO(dev)->gen >= 9) {
2342 Serge 3073
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3074
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3075
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3076
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3077
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3078
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3079
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
6084 serge 3080
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3081
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3082
		default:
5354 serge 3083
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3084
		}
5354 serge 3085
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3086
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3087
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3088
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3089
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3090
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3091
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3092
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3093
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3094
		default:
3095
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3096
		}
4104 Serge 3097
	} else if (IS_VALLEYVIEW(dev)) {
3243 Serge 3098
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3099
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3100
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3101
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3102
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3103
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3104
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3105
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3106
		default:
5354 serge 3107
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
4104 Serge 3108
		}
3109
	} else if (IS_GEN7(dev) && port == PORT_A) {
3110
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3111
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3112
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3113
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3114
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3115
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2342 Serge 3116
		default:
5354 serge 3117
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2342 Serge 3118
		}
3119
	} else {
6084 serge 3120
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3121
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3122
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3123
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3125
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3126
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3127
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
6084 serge 3128
		default:
5354 serge 3129
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
6084 serge 3130
		}
2330 Serge 3131
	}
3132
}
3133
 
6084 serge 3134
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
4104 Serge 3135
{
3136
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3137
	struct drm_i915_private *dev_priv = dev->dev_private;
3138
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
4560 Serge 3139
	struct intel_crtc *intel_crtc =
3140
		to_intel_crtc(dport->base.base.crtc);
4104 Serge 3141
	unsigned long demph_reg_value, preemph_reg_value,
3142
		uniqtranscale_reg_value;
3143
	uint8_t train_set = intel_dp->train_set[0];
4560 Serge 3144
	enum dpio_channel port = vlv_dport_to_channel(dport);
3145
	int pipe = intel_crtc->pipe;
4104 Serge 3146
 
3147
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3148
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4104 Serge 3149
		preemph_reg_value = 0x0004000;
3150
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3151
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3152
			demph_reg_value = 0x2B405555;
3153
			uniqtranscale_reg_value = 0x552AB83A;
3154
			break;
5354 serge 3155
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3156
			demph_reg_value = 0x2B404040;
3157
			uniqtranscale_reg_value = 0x5548B83A;
3158
			break;
5354 serge 3159
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3160
			demph_reg_value = 0x2B245555;
3161
			uniqtranscale_reg_value = 0x5560B83A;
3162
			break;
5354 serge 3163
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3164
			demph_reg_value = 0x2B405555;
3165
			uniqtranscale_reg_value = 0x5598DA3A;
3166
			break;
3167
		default:
3168
			return 0;
3169
		}
3170
		break;
5354 serge 3171
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4104 Serge 3172
		preemph_reg_value = 0x0002000;
3173
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3174
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3175
			demph_reg_value = 0x2B404040;
3176
			uniqtranscale_reg_value = 0x5552B83A;
3177
			break;
5354 serge 3178
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3179
			demph_reg_value = 0x2B404848;
3180
			uniqtranscale_reg_value = 0x5580B83A;
3181
			break;
5354 serge 3182
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3183
			demph_reg_value = 0x2B404040;
3184
			uniqtranscale_reg_value = 0x55ADDA3A;
3185
			break;
3186
		default:
3187
			return 0;
3188
		}
3189
		break;
5354 serge 3190
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4104 Serge 3191
		preemph_reg_value = 0x0000000;
3192
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3193
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3194
			demph_reg_value = 0x2B305555;
3195
			uniqtranscale_reg_value = 0x5570B83A;
3196
			break;
5354 serge 3197
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3198
			demph_reg_value = 0x2B2B4040;
3199
			uniqtranscale_reg_value = 0x55ADDA3A;
3200
			break;
3201
		default:
3202
			return 0;
3203
		}
3204
		break;
5354 serge 3205
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4104 Serge 3206
		preemph_reg_value = 0x0006000;
3207
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3208
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3209
			demph_reg_value = 0x1B405555;
3210
			uniqtranscale_reg_value = 0x55ADDA3A;
3211
			break;
3212
		default:
3213
			return 0;
3214
		}
3215
		break;
3216
	default:
3217
		return 0;
3218
	}
3219
 
6084 serge 3220
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 3221
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3222
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3223
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
4104 Serge 3224
			 uniqtranscale_reg_value);
4560 Serge 3225
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3226
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3227
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3228
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
6084 serge 3229
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 3230
 
3231
	return 0;
3232
}
3233
 
6084 serge 3234
static bool chv_need_uniq_trans_scale(uint8_t train_set)
5060 serge 3235
{
6084 serge 3236
	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3237
		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3238
}
3239
 
3240
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3241
{
5060 serge 3242
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3243
	struct drm_i915_private *dev_priv = dev->dev_private;
3244
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3245
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3246
	u32 deemph_reg_value, margin_reg_value, val;
3247
	uint8_t train_set = intel_dp->train_set[0];
3248
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3249
	enum pipe pipe = intel_crtc->pipe;
3250
	int i;
3251
 
3252
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3253
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
5060 serge 3254
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3255
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3256
			deemph_reg_value = 128;
3257
			margin_reg_value = 52;
3258
			break;
5354 serge 3259
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3260
			deemph_reg_value = 128;
3261
			margin_reg_value = 77;
3262
			break;
5354 serge 3263
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3264
			deemph_reg_value = 128;
3265
			margin_reg_value = 102;
3266
			break;
5354 serge 3267
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
5060 serge 3268
			deemph_reg_value = 128;
3269
			margin_reg_value = 154;
3270
			/* FIXME extra to set for 1200 */
3271
			break;
3272
		default:
3273
			return 0;
3274
		}
3275
		break;
5354 serge 3276
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
5060 serge 3277
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3278
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3279
			deemph_reg_value = 85;
3280
			margin_reg_value = 78;
3281
			break;
5354 serge 3282
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3283
			deemph_reg_value = 85;
3284
			margin_reg_value = 116;
3285
			break;
5354 serge 3286
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3287
			deemph_reg_value = 85;
3288
			margin_reg_value = 154;
3289
			break;
3290
		default:
3291
			return 0;
3292
		}
3293
		break;
5354 serge 3294
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
5060 serge 3295
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3296
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3297
			deemph_reg_value = 64;
3298
			margin_reg_value = 104;
3299
			break;
5354 serge 3300
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3301
			deemph_reg_value = 64;
3302
			margin_reg_value = 154;
3303
			break;
3304
		default:
3305
			return 0;
3306
		}
3307
		break;
5354 serge 3308
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
5060 serge 3309
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3310
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3311
			deemph_reg_value = 43;
3312
			margin_reg_value = 154;
3313
			break;
3314
		default:
3315
			return 0;
3316
		}
3317
		break;
3318
	default:
3319
		return 0;
3320
	}
3321
 
6084 serge 3322
	mutex_lock(&dev_priv->sb_lock);
5060 serge 3323
 
3324
	/* Clear calc init */
3325
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3326
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
5354 serge 3327
	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3328
	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
5060 serge 3329
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3330
 
6084 serge 3331
	if (intel_crtc->config->lane_count > 2) {
3332
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3333
		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3334
		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3335
		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3336
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3337
	}
5060 serge 3338
 
5354 serge 3339
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3340
	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3341
	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3342
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3343
 
6084 serge 3344
	if (intel_crtc->config->lane_count > 2) {
3345
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3346
		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3347
		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3348
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3349
	}
5354 serge 3350
 
5060 serge 3351
	/* Program swing deemph */
6084 serge 3352
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3353
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3354
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3355
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3356
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3357
	}
3358
 
3359
	/* Program swing margin */
6084 serge 3360
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3361
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
6084 serge 3362
 
5354 serge 3363
		val &= ~DPIO_SWING_MARGIN000_MASK;
3364
		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
6084 serge 3365
 
3366
		/*
3367
		 * Supposedly this value shouldn't matter when unique transition
3368
		 * scale is disabled, but in fact it does matter. Let's just
3369
		 * always program the same value and hope it's OK.
3370
		 */
3371
		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3372
		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3373
 
5060 serge 3374
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3375
	}
3376
 
6084 serge 3377
	/*
3378
	 * The document said it needs to set bit 27 for ch0 and bit 26
3379
	 * for ch1. Might be a typo in the doc.
3380
	 * For now, for this unique transition scale selection, set bit
3381
	 * 27 for ch0 and ch1.
3382
	 */
3383
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3384
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
6084 serge 3385
		if (chv_need_uniq_trans_scale(train_set))
3386
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3387
		else
3388
			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
5060 serge 3389
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3390
	}
3391
 
3392
	/* Start swing calculation */
3393
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3394
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3395
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3396
 
6084 serge 3397
	if (intel_crtc->config->lane_count > 2) {
3398
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3399
		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3400
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3401
	}
5060 serge 3402
 
6084 serge 3403
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3404
 
3405
	return 0;
3406
}
3407
 
2330 Serge 3408
static void
4560 Serge 3409
intel_get_adjust_train(struct intel_dp *intel_dp,
3410
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3411
{
3412
	uint8_t v = 0;
3413
	uint8_t p = 0;
3414
	int lane;
2342 Serge 3415
	uint8_t voltage_max;
3416
	uint8_t preemph_max;
2330 Serge 3417
 
3418
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3243 Serge 3419
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3420
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2330 Serge 3421
 
3422
		if (this_v > v)
3423
			v = this_v;
3424
		if (this_p > p)
3425
			p = this_p;
3426
	}
3427
 
2342 Serge 3428
	voltage_max = intel_dp_voltage_max(intel_dp);
3429
	if (v >= voltage_max)
3430
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2330 Serge 3431
 
2342 Serge 3432
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3433
	if (p >= preemph_max)
3434
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2330 Serge 3435
 
3436
	for (lane = 0; lane < 4; lane++)
3437
		intel_dp->train_set[lane] = v | p;
3438
}
3439
 
3440
static uint32_t
6084 serge 3441
gen4_signal_levels(uint8_t train_set)
2330 Serge 3442
{
3443
	uint32_t	signal_levels = 0;
3444
 
3445
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3446
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2330 Serge 3447
	default:
3448
		signal_levels |= DP_VOLTAGE_0_4;
3449
		break;
5354 serge 3450
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2330 Serge 3451
		signal_levels |= DP_VOLTAGE_0_6;
3452
		break;
5354 serge 3453
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2330 Serge 3454
		signal_levels |= DP_VOLTAGE_0_8;
3455
		break;
5354 serge 3456
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2330 Serge 3457
		signal_levels |= DP_VOLTAGE_1_2;
3458
		break;
3459
	}
3460
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3461
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3462
	default:
3463
		signal_levels |= DP_PRE_EMPHASIS_0;
3464
		break;
5354 serge 3465
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3466
		signal_levels |= DP_PRE_EMPHASIS_3_5;
3467
		break;
5354 serge 3468
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3469
		signal_levels |= DP_PRE_EMPHASIS_6;
3470
		break;
5354 serge 3471
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
2330 Serge 3472
		signal_levels |= DP_PRE_EMPHASIS_9_5;
3473
		break;
3474
	}
3475
	return signal_levels;
3476
}
3477
 
3478
/* Gen6's DP voltage swing and pre-emphasis control */
3479
static uint32_t
6084 serge 3480
gen6_edp_signal_levels(uint8_t train_set)
2330 Serge 3481
{
3482
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3483
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3484
	switch (signal_levels) {
5354 serge 3485
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3486
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3487
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5354 serge 3488
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3489
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
5354 serge 3490
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3491
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3492
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
5354 serge 3493
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3494
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3495
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
5354 serge 3496
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3497
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3498
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3499
	default:
3500
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3501
			      "0x%x\n", signal_levels);
3502
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3503
	}
3504
}
3505
 
2342 Serge 3506
/* Gen7's DP voltage swing and pre-emphasis control */
3507
static uint32_t
6084 serge 3508
gen7_edp_signal_levels(uint8_t train_set)
2342 Serge 3509
{
3510
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3511
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3512
	switch (signal_levels) {
5354 serge 3513
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3514
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
5354 serge 3515
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3516
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
5354 serge 3517
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2342 Serge 3518
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3519
 
5354 serge 3520
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3521
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
5354 serge 3522
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3523
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3524
 
5354 serge 3525
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3526
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
5354 serge 3527
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3528
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3529
 
3530
	default:
3531
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3532
			      "0x%x\n", signal_levels);
3533
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3534
	}
3535
}
3536
 
3480 Serge 3537
/* Properly updates "DP" with the correct signal levels. */
3538
static void
3539
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3540
{
3541
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 3542
	enum port port = intel_dig_port->port;
3480 Serge 3543
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 3544
	uint32_t signal_levels, mask = 0;
3480 Serge 3545
	uint8_t train_set = intel_dp->train_set[0];
3546
 
6084 serge 3547
	if (HAS_DDI(dev)) {
3548
		signal_levels = ddi_signal_levels(intel_dp);
3549
 
3550
		if (IS_BROXTON(dev))
3551
			signal_levels = 0;
3552
		else
3553
			mask = DDI_BUF_EMP_MASK;
5060 serge 3554
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 3555
		signal_levels = chv_signal_levels(intel_dp);
4104 Serge 3556
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 3557
		signal_levels = vlv_signal_levels(intel_dp);
4104 Serge 3558
	} else if (IS_GEN7(dev) && port == PORT_A) {
6084 serge 3559
		signal_levels = gen7_edp_signal_levels(train_set);
3480 Serge 3560
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4104 Serge 3561
	} else if (IS_GEN6(dev) && port == PORT_A) {
6084 serge 3562
		signal_levels = gen6_edp_signal_levels(train_set);
3480 Serge 3563
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3564
	} else {
6084 serge 3565
		signal_levels = gen4_signal_levels(train_set);
3480 Serge 3566
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3567
	}
3568
 
6084 serge 3569
	if (mask)
3570
		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3480 Serge 3571
 
6084 serge 3572
	DRM_DEBUG_KMS("Using vswing level %d\n",
3573
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3574
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3575
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3576
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3577
 
3480 Serge 3578
	*DP = (*DP & ~mask) | signal_levels;
3579
}
3580
 
2330 Serge 3581
static bool
3582
intel_dp_set_link_train(struct intel_dp *intel_dp,
4560 Serge 3583
			uint32_t *DP,
2330 Serge 3584
			uint8_t dp_train_pat)
3585
{
3243 Serge 3586
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3587
	struct drm_i915_private *dev_priv =
3588
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3589
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
3590
	int ret, len;
2330 Serge 3591
 
5354 serge 3592
	_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3243 Serge 3593
 
4560 Serge 3594
	I915_WRITE(intel_dp->output_reg, *DP);
2330 Serge 3595
	POSTING_READ(intel_dp->output_reg);
3596
 
4560 Serge 3597
	buf[0] = dp_train_pat;
3598
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3599
	    DP_TRAINING_PATTERN_DISABLE) {
3600
		/* don't write DP_TRAINING_LANEx_SET on disable */
3601
		len = 1;
3602
	} else {
3603
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3604
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3605
		len = intel_dp->lane_count + 1;
3606
	}
2330 Serge 3607
 
5060 serge 3608
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
6084 serge 3609
				buf, len);
4560 Serge 3610
 
3611
	return ret == len;
3612
}
3613
 
3614
static bool
3615
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3616
			uint8_t dp_train_pat)
3617
{
6084 serge 3618
	if (!intel_dp->train_set_valid)
3619
		memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
4560 Serge 3620
	intel_dp_set_signal_levels(intel_dp, DP);
3621
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3622
}
3623
 
3624
static bool
3625
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3626
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
3627
{
3628
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3629
	struct drm_i915_private *dev_priv =
3630
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3631
	int ret;
3632
 
3633
	intel_get_adjust_train(intel_dp, link_status);
3634
	intel_dp_set_signal_levels(intel_dp, DP);
3635
 
3636
	I915_WRITE(intel_dp->output_reg, *DP);
3637
	POSTING_READ(intel_dp->output_reg);
3638
 
5060 serge 3639
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3640
				intel_dp->train_set, intel_dp->lane_count);
2330 Serge 3641
 
4560 Serge 3642
	return ret == intel_dp->lane_count;
2330 Serge 3643
}
3644
 
3746 Serge 3645
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3646
{
3647
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3648
	struct drm_device *dev = intel_dig_port->base.base.dev;
3649
	struct drm_i915_private *dev_priv = dev->dev_private;
3650
	enum port port = intel_dig_port->port;
3651
	uint32_t val;
3652
 
3653
	if (!HAS_DDI(dev))
3654
		return;
3655
 
3656
	val = I915_READ(DP_TP_CTL(port));
3657
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3658
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3659
	I915_WRITE(DP_TP_CTL(port), val);
3660
 
3661
	/*
3662
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3663
	 * we need to set idle transmission mode is to work around a HW issue
3664
	 * where we enable the pipe while not in idle link-training mode.
3665
	 * In this case there is requirement to wait for a minimum number of
3666
	 * idle patterns to be sent.
3667
	 */
3668
	if (port == PORT_A)
3669
		return;
3670
 
3671
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3672
		     1))
3673
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3674
}
3675
 
2330 Serge 3676
/* Enable corresponding port and start training pattern 1 */
6084 serge 3677
static void
3678
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
2330 Serge 3679
{
3243 Serge 3680
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3681
	struct drm_device *dev = encoder->dev;
2330 Serge 3682
	int i;
3683
	uint8_t voltage;
2342 Serge 3684
	int voltage_tries, loop_tries;
2330 Serge 3685
	uint32_t DP = intel_dp->DP;
4560 Serge 3686
	uint8_t link_config[2];
6084 serge 3687
	uint8_t link_bw, rate_select;
2330 Serge 3688
 
3480 Serge 3689
	if (HAS_DDI(dev))
3243 Serge 3690
		intel_ddi_prepare_link_retrain(encoder);
3691
 
6084 serge 3692
	intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3693
			      &link_bw, &rate_select);
3694
 
2330 Serge 3695
	/* Write the link configuration data */
6084 serge 3696
	link_config[0] = link_bw;
4560 Serge 3697
	link_config[1] = intel_dp->lane_count;
3698
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3699
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
5060 serge 3700
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
6084 serge 3701
	if (intel_dp->num_sink_rates)
3702
		drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3703
				  &rate_select, 1);
2330 Serge 3704
 
4560 Serge 3705
	link_config[0] = 0;
3706
	link_config[1] = DP_SET_ANSI_8B10B;
5060 serge 3707
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
4560 Serge 3708
 
2330 Serge 3709
	DP |= DP_PORT_EN;
2342 Serge 3710
 
4560 Serge 3711
	/* clock recovery */
3712
	if (!intel_dp_reset_link_train(intel_dp, &DP,
3713
				       DP_TRAINING_PATTERN_1 |
3714
				       DP_LINK_SCRAMBLING_DISABLE)) {
3715
		DRM_ERROR("failed to enable link training\n");
3716
		return;
3717
	}
3718
 
2330 Serge 3719
	voltage = 0xff;
2342 Serge 3720
	voltage_tries = 0;
3721
	loop_tries = 0;
2330 Serge 3722
	for (;;) {
6084 serge 3723
		uint8_t link_status[DP_LINK_STATUS_SIZE];
2342 Serge 3724
 
3243 Serge 3725
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2342 Serge 3726
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3727
			DRM_ERROR("failed to get link status\n");
2330 Serge 3728
			break;
2342 Serge 3729
		}
2330 Serge 3730
 
3243 Serge 3731
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2342 Serge 3732
			DRM_DEBUG_KMS("clock recovery OK\n");
2330 Serge 3733
			break;
3734
		}
3735
 
6084 serge 3736
		/*
3737
		 * if we used previously trained voltage and pre-emphasis values
3738
		 * and we don't get clock recovery, reset link training values
3739
		 */
3740
		if (intel_dp->train_set_valid) {
3741
			DRM_DEBUG_KMS("clock recovery not ok, reset");
3742
			/* clear the flag as we are not reusing train set */
3743
			intel_dp->train_set_valid = false;
3744
			if (!intel_dp_reset_link_train(intel_dp, &DP,
3745
						       DP_TRAINING_PATTERN_1 |
3746
						       DP_LINK_SCRAMBLING_DISABLE)) {
3747
				DRM_ERROR("failed to enable link training\n");
3748
				return;
3749
			}
3750
			continue;
3751
		}
3752
 
2330 Serge 3753
		/* Check to see if we've tried the max voltage */
3754
		for (i = 0; i < intel_dp->lane_count; i++)
3755
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3756
				break;
3480 Serge 3757
		if (i == intel_dp->lane_count) {
2342 Serge 3758
			++loop_tries;
3759
			if (loop_tries == 5) {
4560 Serge 3760
				DRM_ERROR("too many full retries, give up\n");
6084 serge 3761
				break;
2342 Serge 3762
			}
4560 Serge 3763
			intel_dp_reset_link_train(intel_dp, &DP,
3764
						  DP_TRAINING_PATTERN_1 |
3765
						  DP_LINK_SCRAMBLING_DISABLE);
2342 Serge 3766
			voltage_tries = 0;
3767
			continue;
3768
		}
2330 Serge 3769
 
3770
		/* Check to see if we've tried the same voltage 5 times */
3771
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2342 Serge 3772
			++voltage_tries;
3773
			if (voltage_tries == 5) {
4560 Serge 3774
				DRM_ERROR("too many voltage retries, give up\n");
2330 Serge 3775
				break;
2342 Serge 3776
			}
2330 Serge 3777
		} else
2342 Serge 3778
			voltage_tries = 0;
2330 Serge 3779
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3780
 
4560 Serge 3781
		/* Update training set as requested by target */
3782
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3783
			DRM_ERROR("failed to update link training\n");
3784
			break;
3785
		}
2330 Serge 3786
	}
3787
 
3788
	intel_dp->DP = DP;
3789
}
3790
 
6084 serge 3791
static void
3792
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
2330 Serge 3793
{
6084 serge 3794
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3795
	struct drm_device *dev = dig_port->base.base.dev;
2330 Serge 3796
	bool channel_eq = false;
3797
	int tries, cr_tries;
3798
	uint32_t DP = intel_dp->DP;
5060 serge 3799
	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
2330 Serge 3800
 
6084 serge 3801
	/*
3802
	 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3803
	 *
3804
	 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3805
	 * also mandatory for downstream devices that support HBR2.
3806
	 *
3807
	 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3808
	 * supported but still not enabled.
3809
	 */
3810
	if (intel_dp_source_supports_hbr2(dev) &&
3811
	    drm_dp_tps3_supported(intel_dp->dpcd))
5060 serge 3812
		training_pattern = DP_TRAINING_PATTERN_3;
6084 serge 3813
	else if (intel_dp->link_rate == 540000)
3814
		DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
5060 serge 3815
 
2330 Serge 3816
	/* channel equalization */
4560 Serge 3817
	if (!intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3818
				     training_pattern |
4560 Serge 3819
				     DP_LINK_SCRAMBLING_DISABLE)) {
3820
		DRM_ERROR("failed to start channel equalization\n");
3821
		return;
3822
	}
3823
 
2330 Serge 3824
	tries = 0;
3825
	cr_tries = 0;
3826
	channel_eq = false;
3827
	for (;;) {
6084 serge 3828
		uint8_t link_status[DP_LINK_STATUS_SIZE];
2330 Serge 3829
 
3830
		if (cr_tries > 5) {
3831
			DRM_ERROR("failed to train DP, aborting\n");
3832
			break;
3833
		}
3834
 
3243 Serge 3835
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
4560 Serge 3836
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3837
			DRM_ERROR("failed to get link status\n");
2330 Serge 3838
			break;
4560 Serge 3839
		}
2330 Serge 3840
 
3841
		/* Make sure clock is still ok */
6084 serge 3842
		if (!drm_dp_clock_recovery_ok(link_status,
3843
					      intel_dp->lane_count)) {
3844
			intel_dp->train_set_valid = false;
3845
			intel_dp_link_training_clock_recovery(intel_dp);
4560 Serge 3846
			intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3847
						training_pattern |
4560 Serge 3848
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 3849
			cr_tries++;
3850
			continue;
3851
		}
3852
 
6084 serge 3853
		if (drm_dp_channel_eq_ok(link_status,
3854
					 intel_dp->lane_count)) {
2330 Serge 3855
			channel_eq = true;
3856
			break;
3857
		}
3858
 
3859
		/* Try 5 times, then try clock recovery if that fails */
3860
		if (tries > 5) {
6084 serge 3861
			intel_dp->train_set_valid = false;
3862
			intel_dp_link_training_clock_recovery(intel_dp);
4560 Serge 3863
			intel_dp_set_link_train(intel_dp, &DP,
5060 serge 3864
						training_pattern |
4560 Serge 3865
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 3866
			tries = 0;
3867
			cr_tries++;
3868
			continue;
3869
		}
3870
 
4560 Serge 3871
		/* Update training set as requested by target */
3872
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3873
			DRM_ERROR("failed to update link training\n");
3874
			break;
3875
		}
2330 Serge 3876
		++tries;
3877
	}
3878
 
3746 Serge 3879
	intel_dp_set_idle_link_train(intel_dp);
3880
 
3881
	intel_dp->DP = DP;
3882
 
6084 serge 3883
	if (channel_eq) {
3884
		intel_dp->train_set_valid = true;
3746 Serge 3885
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
6084 serge 3886
	}
2330 Serge 3887
}
3888
 
3746 Serge 3889
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3890
{
4560 Serge 3891
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3746 Serge 3892
				DP_TRAINING_PATTERN_DISABLE);
3893
}
3894
 
6084 serge 3895
void
3896
intel_dp_start_link_train(struct intel_dp *intel_dp)
3897
{
3898
	intel_dp_link_training_clock_recovery(intel_dp);
3899
	intel_dp_link_training_channel_equalization(intel_dp);
3900
}
3901
 
2330 Serge 3902
static void
3903
intel_dp_link_down(struct intel_dp *intel_dp)
3904
{
3243 Serge 3905
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3906
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4104 Serge 3907
	enum port port = intel_dig_port->port;
3243 Serge 3908
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 3909
	struct drm_i915_private *dev_priv = dev->dev_private;
3910
	uint32_t DP = intel_dp->DP;
3911
 
5060 serge 3912
	if (WARN_ON(HAS_DDI(dev)))
3243 Serge 3913
		return;
3914
 
3031 serge 3915
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 3916
		return;
3917
 
3918
	DRM_DEBUG_KMS("\n");
3919
 
6084 serge 3920
	if ((IS_GEN7(dev) && port == PORT_A) ||
3921
	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2330 Serge 3922
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
6084 serge 3923
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
2330 Serge 3924
	} else {
5354 serge 3925
		if (IS_CHERRYVIEW(dev))
3926
			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3927
		else
6084 serge 3928
			DP &= ~DP_LINK_TRAIN_MASK;
3929
		DP |= DP_LINK_TRAIN_PAT_IDLE;
2330 Serge 3930
	}
6084 serge 3931
	I915_WRITE(intel_dp->output_reg, DP);
2330 Serge 3932
	POSTING_READ(intel_dp->output_reg);
3933
 
6084 serge 3934
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3935
	I915_WRITE(intel_dp->output_reg, DP);
3936
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3937
 
6084 serge 3938
	/*
3939
	 * HW workaround for IBX, we need to move the port
3940
	 * to transcoder A after disabling it to allow the
3941
	 * matching HDMI port to be enabled on transcoder A.
3942
	 */
3943
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3944
		/* always enable with pattern 1 (as per spec) */
3945
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3946
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
2330 Serge 3947
		I915_WRITE(intel_dp->output_reg, DP);
6084 serge 3948
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3949
 
6084 serge 3950
		DP &= ~DP_PORT_EN;
3951
		I915_WRITE(intel_dp->output_reg, DP);
3952
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3953
	}
3954
 
2342 Serge 3955
	msleep(intel_dp->panel_power_down_delay);
2330 Serge 3956
}
3957
 
3958
static bool
3959
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3960
{
4560 Serge 3961
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3962
	struct drm_device *dev = dig_port->base.base.dev;
3963
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3964
	uint8_t rev;
4560 Serge 3965
 
5060 serge 3966
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3967
				    sizeof(intel_dp->dpcd)) < 0)
3031 serge 3968
		return false; /* aux transfer failed */
3969
 
5354 serge 3970
	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3480 Serge 3971
 
3031 serge 3972
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3973
		return false; /* DPCD not present */
3974
 
4104 Serge 3975
	/* Check if the panel supports PSR */
3976
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4560 Serge 3977
	if (is_edp(intel_dp)) {
5060 serge 3978
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
6084 serge 3979
					intel_dp->psr_dpcd,
3980
					sizeof(intel_dp->psr_dpcd));
4560 Serge 3981
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3982
			dev_priv->psr.sink_support = true;
6084 serge 3983
			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4560 Serge 3984
		}
6084 serge 3985
 
3986
		if (INTEL_INFO(dev)->gen >= 9 &&
3987
			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3988
			uint8_t frame_sync_cap;
3989
 
3990
			dev_priv->psr.sink_support = true;
3991
			intel_dp_dpcd_read_wake(&intel_dp->aux,
3992
					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3993
					&frame_sync_cap, 1);
3994
			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3995
			/* PSR2 needs frame sync as well */
3996
			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3997
			DRM_DEBUG_KMS("PSR2 %s on sink",
3998
				dev_priv->psr.psr2_support ? "supported" : "not supported");
3999
		}
4560 Serge 4000
	}
4001
 
6084 serge 4002
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4003
		      yesno(intel_dp_source_supports_hbr2(dev)),
4004
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
5060 serge 4005
 
6084 serge 4006
	/* Intermediate frequency support */
4007
	if (is_edp(intel_dp) &&
4008
	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4009
	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4010
	    (rev >= 0x03)) { /* eDp v1.4 or higher */
4011
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4012
		int i;
4013
 
4014
		intel_dp_dpcd_read_wake(&intel_dp->aux,
4015
				DP_SUPPORTED_LINK_RATES,
4016
				sink_rates,
4017
				sizeof(sink_rates));
4018
 
4019
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4020
			int val = le16_to_cpu(sink_rates[i]);
4021
 
4022
			if (val == 0)
4023
				break;
4024
 
4025
			/* Value read is in kHz while drm clock is saved in deca-kHz */
4026
			intel_dp->sink_rates[i] = (val * 200) / 10;
4027
		}
4028
		intel_dp->num_sink_rates = i;
4029
	}
4030
 
4031
	intel_dp_print_rates(intel_dp);
4032
 
3031 serge 4033
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4034
	      DP_DWN_STRM_PORT_PRESENT))
4035
		return true; /* native DP sink */
4036
 
4037
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4038
		return true; /* no per-port downstream info */
4039
 
5060 serge 4040
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
6084 serge 4041
				    intel_dp->downstream_ports,
5060 serge 4042
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3031 serge 4043
		return false; /* downstream port status fetch failed */
4044
 
6084 serge 4045
	return true;
3031 serge 4046
}
2330 Serge 4047
 
3031 serge 4048
static void
4049
intel_dp_probe_oui(struct intel_dp *intel_dp)
4050
{
4051
	u8 buf[3];
4052
 
4053
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4054
		return;
4055
 
5060 serge 4056
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3031 serge 4057
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4058
			      buf[0], buf[1], buf[2]);
4059
 
5060 serge 4060
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3031 serge 4061
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4062
			      buf[0], buf[1], buf[2]);
2330 Serge 4063
}
4064
 
2342 Serge 4065
static bool
5060 serge 4066
intel_dp_probe_mst(struct intel_dp *intel_dp)
4067
{
4068
	u8 buf[1];
4069
 
4070
	if (!intel_dp->can_mst)
4071
		return false;
4072
 
4073
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4074
		return false;
4075
 
4076
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4077
		if (buf[0] & DP_MST_CAP) {
4078
			DRM_DEBUG_KMS("Sink is MST capable\n");
4079
			intel_dp->is_mst = true;
4080
		} else {
4081
			DRM_DEBUG_KMS("Sink is not MST capable\n");
4082
			intel_dp->is_mst = false;
4083
		}
4084
	}
4085
 
4086
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4087
	return intel_dp->is_mst;
4088
}
4089
 
6084 serge 4090
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
5060 serge 4091
{
6084 serge 4092
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4093
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
5354 serge 4094
	u8 buf;
6084 serge 4095
	int ret = 0;
5060 serge 4096
 
6084 serge 4097
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4098
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4099
		ret = -EIO;
4100
		goto out;
4101
	}
4102
 
4103
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4104
			       buf & ~DP_TEST_SINK_START) < 0) {
4105
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4106
		ret = -EIO;
4107
		goto out;
4108
	}
4109
 
4110
	intel_dp->sink_crc.started = false;
4111
 out:
4112
	hsw_enable_ips(intel_crtc);
4113
	return ret;
4114
}
4115
 
4116
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4117
{
4118
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4119
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4120
	u8 buf;
4121
	int ret;
4122
 
4123
	if (intel_dp->sink_crc.started) {
4124
		ret = intel_dp_sink_crc_stop(intel_dp);
4125
		if (ret)
4126
			return ret;
4127
	}
4128
 
5354 serge 4129
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4130
		return -EIO;
5060 serge 4131
 
5354 serge 4132
	if (!(buf & DP_TEST_CRC_SUPPORTED))
5060 serge 4133
		return -ENOTTY;
4134
 
6084 serge 4135
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4136
 
5354 serge 4137
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4138
		return -EIO;
4139
 
6084 serge 4140
	hsw_disable_ips(intel_crtc);
4141
 
5060 serge 4142
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
6084 serge 4143
			       buf | DP_TEST_SINK_START) < 0) {
4144
		hsw_enable_ips(intel_crtc);
5354 serge 4145
		return -EIO;
6084 serge 4146
	}
5060 serge 4147
 
6084 serge 4148
	intel_dp->sink_crc.started = true;
4149
	return 0;
4150
}
5354 serge 4151
 
6084 serge 4152
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4153
{
4154
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4155
	struct drm_device *dev = dig_port->base.base.dev;
4156
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4157
	u8 buf;
4158
	int count, ret;
4159
	int attempts = 6;
4160
	bool old_equal_new;
4161
 
4162
	ret = intel_dp_sink_crc_start(intel_dp);
4163
	if (ret)
4164
		return ret;
4165
 
5354 serge 4166
	do {
6084 serge 4167
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4168
 
5354 serge 4169
		if (drm_dp_dpcd_readb(&intel_dp->aux,
6084 serge 4170
				      DP_TEST_SINK_MISC, &buf) < 0) {
4171
			ret = -EIO;
4172
			goto stop;
4173
		}
4174
		count = buf & DP_TEST_COUNT_MASK;
5060 serge 4175
 
6084 serge 4176
		/*
4177
		 * Count might be reset during the loop. In this case
4178
		 * last known count needs to be reset as well.
4179
		 */
4180
		if (count == 0)
4181
			intel_dp->sink_crc.last_count = 0;
4182
 
4183
		if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4184
			ret = -EIO;
4185
			goto stop;
4186
		}
4187
 
4188
		old_equal_new = (count == intel_dp->sink_crc.last_count &&
4189
				 !memcmp(intel_dp->sink_crc.last_crc, crc,
4190
					 6 * sizeof(u8)));
4191
 
4192
	} while (--attempts && (count == 0 || old_equal_new));
4193
 
4194
	intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4195
	memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4196
 
5354 serge 4197
	if (attempts == 0) {
6084 serge 4198
		if (old_equal_new) {
4199
			DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4200
		} else {
4201
			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4202
			ret = -ETIMEDOUT;
4203
			goto stop;
4204
		}
5354 serge 4205
	}
4206
 
6084 serge 4207
stop:
4208
	intel_dp_sink_crc_stop(intel_dp);
4209
	return ret;
5060 serge 4210
}
4211
 
4212
static bool
2342 Serge 4213
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4214
{
5060 serge 4215
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 4216
				       DP_DEVICE_SERVICE_IRQ_VECTOR,
5060 serge 4217
				       sink_irq_vector, 1) == 1;
4218
}
4219
 
4220
static bool
4221
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4222
{
2342 Serge 4223
	int ret;
4224
 
5060 serge 4225
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4226
					     DP_SINK_COUNT_ESI,
4227
					     sink_irq_vector, 14);
4228
	if (ret != 14)
2342 Serge 4229
		return false;
4230
 
4231
	return true;
4232
}
4233
 
6084 serge 4234
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
2342 Serge 4235
{
6084 serge 4236
	uint8_t test_result = DP_TEST_ACK;
4237
	return test_result;
2342 Serge 4238
}
4239
 
6084 serge 4240
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4241
{
4242
	uint8_t test_result = DP_TEST_NAK;
4243
	return test_result;
4244
}
4245
 
4246
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4247
{
4248
	uint8_t test_result = DP_TEST_NAK;
4249
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4250
	struct drm_connector *connector = &intel_connector->base;
4251
 
4252
	if (intel_connector->detect_edid == NULL ||
4253
	    connector->edid_corrupt ||
4254
	    intel_dp->aux.i2c_defer_count > 6) {
4255
		/* Check EDID read for NACKs, DEFERs and corruption
4256
		 * (DP CTS 1.2 Core r1.1)
4257
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4258
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4259
		 *    4.2.2.6 : EDID corruption detected
4260
		 * Use failsafe mode for all cases
4261
		 */
4262
		if (intel_dp->aux.i2c_nack_count > 0 ||
4263
			intel_dp->aux.i2c_defer_count > 0)
4264
			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4265
				      intel_dp->aux.i2c_nack_count,
4266
				      intel_dp->aux.i2c_defer_count);
4267
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4268
	} else {
4269
		struct edid *block = intel_connector->detect_edid;
4270
 
4271
		/* We have to write the checksum
4272
		 * of the last block read
4273
		 */
4274
		block += intel_connector->detect_edid->extensions;
4275
 
4276
		if (!drm_dp_dpcd_write(&intel_dp->aux,
4277
					DP_TEST_EDID_CHECKSUM,
4278
					&block->checksum,
4279
					1))
4280
			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4281
 
4282
		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4283
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4284
	}
4285
 
4286
	/* Set test active flag here so userspace doesn't interrupt things */
4287
	intel_dp->compliance_test_active = 1;
4288
 
4289
	return test_result;
4290
}
4291
 
4292
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4293
{
4294
	uint8_t test_result = DP_TEST_NAK;
4295
	return test_result;
4296
}
4297
 
4298
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4299
{
4300
	uint8_t response = DP_TEST_NAK;
4301
	uint8_t rxdata = 0;
4302
	int status = 0;
4303
 
4304
	intel_dp->compliance_test_active = 0;
4305
	intel_dp->compliance_test_type = 0;
4306
	intel_dp->compliance_test_data = 0;
4307
 
4308
	intel_dp->aux.i2c_nack_count = 0;
4309
	intel_dp->aux.i2c_defer_count = 0;
4310
 
4311
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4312
	if (status <= 0) {
4313
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4314
		goto update_status;
4315
	}
4316
 
4317
	switch (rxdata) {
4318
	case DP_TEST_LINK_TRAINING:
4319
		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4320
		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4321
		response = intel_dp_autotest_link_training(intel_dp);
4322
		break;
4323
	case DP_TEST_LINK_VIDEO_PATTERN:
4324
		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4325
		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4326
		response = intel_dp_autotest_video_pattern(intel_dp);
4327
		break;
4328
	case DP_TEST_LINK_EDID_READ:
4329
		DRM_DEBUG_KMS("EDID test requested\n");
4330
		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4331
		response = intel_dp_autotest_edid(intel_dp);
4332
		break;
4333
	case DP_TEST_LINK_PHY_TEST_PATTERN:
4334
		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4335
		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4336
		response = intel_dp_autotest_phy_pattern(intel_dp);
4337
		break;
4338
	default:
4339
		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4340
		break;
4341
	}
4342
 
4343
update_status:
4344
	status = drm_dp_dpcd_write(&intel_dp->aux,
4345
				   DP_TEST_RESPONSE,
4346
				   &response, 1);
4347
	if (status <= 0)
4348
		DRM_DEBUG_KMS("Could not write test response to sink\n");
4349
}
4350
 
5060 serge 4351
static int
4352
intel_dp_check_mst_status(struct intel_dp *intel_dp)
4353
{
4354
	bool bret;
4355
 
4356
	if (intel_dp->is_mst) {
4357
		u8 esi[16] = { 0 };
4358
		int ret = 0;
4359
		int retry;
4360
		bool handled;
4361
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4362
go_again:
4363
		if (bret == true) {
4364
 
4365
			/* check link status - esi[10] = 0x200c */
6084 serge 4366
			if (intel_dp->active_mst_links &&
4367
			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5060 serge 4368
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4369
				intel_dp_start_link_train(intel_dp);
4370
				intel_dp_stop_link_train(intel_dp);
4371
			}
4372
 
6084 serge 4373
			DRM_DEBUG_KMS("got esi %3ph\n", esi);
5060 serge 4374
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4375
 
4376
			if (handled) {
4377
				for (retry = 0; retry < 3; retry++) {
4378
					int wret;
4379
					wret = drm_dp_dpcd_write(&intel_dp->aux,
4380
								 DP_SINK_COUNT_ESI+1,
4381
								 &esi[1], 3);
4382
					if (wret == 3) {
4383
						break;
4384
					}
4385
				}
4386
 
4387
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4388
				if (bret == true) {
6084 serge 4389
					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
5060 serge 4390
					goto go_again;
4391
				}
4392
			} else
4393
				ret = 0;
4394
 
4395
			return ret;
4396
		} else {
4397
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4398
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4399
			intel_dp->is_mst = false;
4400
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4401
			/* send a hotplug event */
4402
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4403
		}
4404
	}
4405
	return -EINVAL;
4406
}
4407
 
2330 Serge 4408
/*
4409
 * According to DP spec
4410
 * 5.1.2:
4411
 *  1. Read DPCD
4412
 *  2. Configure link according to Receiver Capabilities
4413
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4414
 *  4. Check link status on receipt of hot-plug interrupt
4415
 */
6084 serge 4416
static void
2330 Serge 4417
intel_dp_check_link_status(struct intel_dp *intel_dp)
4418
{
5060 serge 4419
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3243 Serge 4420
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2342 Serge 4421
	u8 sink_irq_vector;
4422
	u8 link_status[DP_LINK_STATUS_SIZE];
4423
 
5060 serge 4424
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4425
 
6084 serge 4426
	if (!intel_encoder->base.crtc)
2330 Serge 4427
		return;
4428
 
5060 serge 4429
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4430
		return;
4431
 
2330 Serge 4432
	/* Try to read receiver status if the link appears to be up */
2342 Serge 4433
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 4434
		return;
4435
	}
4436
 
4437
	/* Now read the DPCD to see if it's actually running */
4438
	if (!intel_dp_get_dpcd(intel_dp)) {
4439
		return;
4440
	}
4441
 
2342 Serge 4442
	/* Try to read the source of the interrupt */
4443
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4444
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4445
		/* Clear interrupt source */
5060 serge 4446
		drm_dp_dpcd_writeb(&intel_dp->aux,
6084 serge 4447
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4448
				   sink_irq_vector);
2342 Serge 4449
 
4450
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
6084 serge 4451
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
2342 Serge 4452
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4453
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4454
	}
4455
 
3243 Serge 4456
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2330 Serge 4457
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
5060 serge 4458
			      intel_encoder->base.name);
2330 Serge 4459
		intel_dp_start_link_train(intel_dp);
3746 Serge 4460
		intel_dp_stop_link_train(intel_dp);
2330 Serge 4461
	}
4462
}
4463
 
3031 serge 4464
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 4465
static enum drm_connector_status
4466
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4467
{
3031 serge 4468
	uint8_t *dpcd = intel_dp->dpcd;
4469
	uint8_t type;
4470
 
4471
	if (!intel_dp_get_dpcd(intel_dp))
4472
		return connector_status_disconnected;
4473
 
4474
	/* if there's no downstream port, we're done */
4475
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 4476
		return connector_status_connected;
3031 serge 4477
 
4478
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4560 Serge 4479
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4480
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3031 serge 4481
		uint8_t reg;
5060 serge 4482
 
4483
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4484
					    ®, 1) < 0)
3031 serge 4485
			return connector_status_unknown;
5060 serge 4486
 
3031 serge 4487
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4488
					      : connector_status_disconnected;
4489
	}
4490
 
4491
	/* If no HPD, poke DDC gently */
5060 serge 4492
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3031 serge 4493
		return connector_status_connected;
4494
 
4495
	/* Well we tried, say unknown for unreliable port types */
4560 Serge 4496
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6084 serge 4497
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4560 Serge 4498
		if (type == DP_DS_PORT_TYPE_VGA ||
4499
		    type == DP_DS_PORT_TYPE_NON_EDID)
6084 serge 4500
			return connector_status_unknown;
4560 Serge 4501
	} else {
4502
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4503
			DP_DWN_STRM_PORT_TYPE_MASK;
4504
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4505
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5060 serge 4506
			return connector_status_unknown;
4560 Serge 4507
	}
3031 serge 4508
 
4509
	/* Anything else is out of spec, warn and ignore */
4510
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 4511
	return connector_status_disconnected;
4512
}
4513
 
4514
static enum drm_connector_status
5354 serge 4515
edp_detect(struct intel_dp *intel_dp)
2330 Serge 4516
{
3243 Serge 4517
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 4518
	enum drm_connector_status status;
4519
 
6084 serge 4520
	status = intel_panel_detect(dev);
4521
	if (status == connector_status_unknown)
4522
		status = connector_status_connected;
5354 serge 4523
 
6084 serge 4524
	return status;
5354 serge 4525
}
2330 Serge 4526
 
6084 serge 4527
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4528
				       struct intel_digital_port *port)
5354 serge 4529
{
6084 serge 4530
	u32 bit;
5354 serge 4531
 
6084 serge 4532
	switch (port->port) {
4533
	case PORT_A:
4534
		return true;
4535
	case PORT_B:
4536
		bit = SDE_PORTB_HOTPLUG;
4537
		break;
4538
	case PORT_C:
4539
		bit = SDE_PORTC_HOTPLUG;
4540
		break;
4541
	case PORT_D:
4542
		bit = SDE_PORTD_HOTPLUG;
4543
		break;
4544
	default:
4545
		MISSING_CASE(port->port);
4546
		return false;
4547
	}
3480 Serge 4548
 
6084 serge 4549
	return I915_READ(SDEISR) & bit;
2330 Serge 4550
}
4551
 
6084 serge 4552
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4553
				       struct intel_digital_port *port)
2330 Serge 4554
{
6084 serge 4555
	u32 bit;
2330 Serge 4556
 
6084 serge 4557
	switch (port->port) {
4558
	case PORT_A:
4559
		return true;
3480 Serge 4560
	case PORT_B:
6084 serge 4561
		bit = SDE_PORTB_HOTPLUG_CPT;
2330 Serge 4562
		break;
3480 Serge 4563
	case PORT_C:
6084 serge 4564
		bit = SDE_PORTC_HOTPLUG_CPT;
2330 Serge 4565
		break;
3480 Serge 4566
	case PORT_D:
6084 serge 4567
		bit = SDE_PORTD_HOTPLUG_CPT;
2330 Serge 4568
		break;
6084 serge 4569
	case PORT_E:
4570
		bit = SDE_PORTE_HOTPLUG_SPT;
4571
		break;
2330 Serge 4572
	default:
6084 serge 4573
		MISSING_CASE(port->port);
4574
		return false;
2330 Serge 4575
	}
6084 serge 4576
 
4577
	return I915_READ(SDEISR) & bit;
4578
}
4579
 
4580
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4581
				       struct intel_digital_port *port)
4582
{
4583
	u32 bit;
4584
 
4585
	switch (port->port) {
4586
	case PORT_B:
4587
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4588
		break;
4589
	case PORT_C:
4590
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4591
		break;
4592
	case PORT_D:
4593
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4594
		break;
4595
	default:
4596
		MISSING_CASE(port->port);
4597
		return false;
4560 Serge 4598
	}
2330 Serge 4599
 
6084 serge 4600
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5097 serge 4601
}
4602
 
6084 serge 4603
static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4604
				       struct intel_digital_port *port)
4605
{
4606
	u32 bit;
4607
 
4608
	switch (port->port) {
4609
	case PORT_B:
4610
		bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4611
		break;
4612
	case PORT_C:
4613
		bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4614
		break;
4615
	case PORT_D:
4616
		bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4617
		break;
4618
	default:
4619
		MISSING_CASE(port->port);
4620
		return false;
4621
	}
4622
 
4623
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4624
}
4625
 
4626
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4627
				       struct intel_digital_port *intel_dig_port)
4628
{
4629
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4630
	enum port port;
4631
    u32 bit = 0;
4632
 
4633
 
4634
	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4635
}
4636
 
4637
/*
4638
 * intel_digital_port_connected - is the specified port connected?
4639
 * @dev_priv: i915 private structure
4640
 * @port: the port to test
4641
 *
4642
 * Return %true if @port is connected, %false otherwise.
4643
 */
4644
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4645
					 struct intel_digital_port *port)
4646
{
4647
	if (HAS_PCH_IBX(dev_priv))
4648
		return ibx_digital_port_connected(dev_priv, port);
4649
	if (HAS_PCH_SPLIT(dev_priv))
4650
		return cpt_digital_port_connected(dev_priv, port);
4651
	else if (IS_BROXTON(dev_priv))
4652
		return bxt_digital_port_connected(dev_priv, port);
4653
	else if (IS_VALLEYVIEW(dev_priv))
4654
		return vlv_digital_port_connected(dev_priv, port);
4655
	else
4656
		return g4x_digital_port_connected(dev_priv, port);
4657
}
4658
 
5097 serge 4659
static enum drm_connector_status
6084 serge 4660
ironlake_dp_detect(struct intel_dp *intel_dp)
4661
{
4662
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4663
	struct drm_i915_private *dev_priv = dev->dev_private;
4664
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4665
 
4666
	if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4667
		return connector_status_disconnected;
4668
 
4669
	return intel_dp_detect_dpcd(intel_dp);
4670
}
4671
 
4672
static enum drm_connector_status
5097 serge 4673
g4x_dp_detect(struct intel_dp *intel_dp)
4674
{
4675
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4676
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4677
 
4678
	/* Can't disconnect eDP, but you can close the lid... */
4679
	if (is_edp(intel_dp)) {
4680
		enum drm_connector_status status;
4681
 
4682
		status = intel_panel_detect(dev);
4683
		if (status == connector_status_unknown)
4684
			status = connector_status_connected;
4685
		return status;
4686
	}
4687
 
6084 serge 4688
	if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
2330 Serge 4689
		return connector_status_disconnected;
4690
 
4691
	return intel_dp_detect_dpcd(intel_dp);
4692
}
4693
 
2342 Serge 4694
static struct edid *
5354 serge 4695
intel_dp_get_edid(struct intel_dp *intel_dp)
2342 Serge 4696
{
5354 serge 4697
	struct intel_connector *intel_connector = intel_dp->attached_connector;
3243 Serge 4698
 
4699
	/* use cached edid if we have one */
4700
	if (intel_connector->edid) {
4701
		/* invalid edid */
4702
		if (IS_ERR(intel_connector->edid))
3031 serge 4703
			return NULL;
4704
 
4560 Serge 4705
		return drm_edid_duplicate(intel_connector->edid);
5354 serge 4706
	} else
4707
		return drm_get_edid(&intel_connector->base,
4708
				    &intel_dp->aux.ddc);
4709
}
3031 serge 4710
 
5354 serge 4711
static void
4712
intel_dp_set_edid(struct intel_dp *intel_dp)
4713
{
4714
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4715
	struct edid *edid;
4716
 
4717
	edid = intel_dp_get_edid(intel_dp);
4718
	intel_connector->detect_edid = edid;
4719
 
4720
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4721
		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4722
	else
4723
		intel_dp->has_audio = drm_detect_monitor_audio(edid);
2342 Serge 4724
}
4725
 
5354 serge 4726
static void
4727
intel_dp_unset_edid(struct intel_dp *intel_dp)
2342 Serge 4728
{
5354 serge 4729
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2342 Serge 4730
 
5354 serge 4731
	kfree(intel_connector->detect_edid);
4732
	intel_connector->detect_edid = NULL;
3243 Serge 4733
 
5354 serge 4734
	intel_dp->has_audio = false;
4735
}
3031 serge 4736
 
2330 Serge 4737
static enum drm_connector_status
4738
intel_dp_detect(struct drm_connector *connector, bool force)
4739
{
4740
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 4741
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4742
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4743
	struct drm_device *dev = connector->dev;
2330 Serge 4744
	enum drm_connector_status status;
5060 serge 4745
	enum intel_display_power_domain power_domain;
4746
	bool ret;
6084 serge 4747
	u8 sink_irq_vector;
2330 Serge 4748
 
4104 Serge 4749
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5060 serge 4750
		      connector->base.id, connector->name);
5354 serge 4751
	intel_dp_unset_edid(intel_dp);
4104 Serge 4752
 
5060 serge 4753
	if (intel_dp->is_mst) {
4754
		/* MST devices are disconnected from a monitor POV */
4755
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4756
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5354 serge 4757
		return connector_status_disconnected;
5060 serge 4758
	}
4759
 
6084 serge 4760
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4761
	intel_display_power_get(to_i915(dev), power_domain);
2330 Serge 4762
 
5354 serge 4763
	/* Can't disconnect eDP, but you can close the lid... */
4764
	if (is_edp(intel_dp))
4765
		status = edp_detect(intel_dp);
4766
	else if (HAS_PCH_SPLIT(dev))
2330 Serge 4767
		status = ironlake_dp_detect(intel_dp);
4768
	else
4769
		status = g4x_dp_detect(intel_dp);
4770
	if (status != connector_status_connected)
4560 Serge 4771
		goto out;
3031 serge 4772
 
4773
	intel_dp_probe_oui(intel_dp);
4774
 
5060 serge 4775
	ret = intel_dp_probe_mst(intel_dp);
4776
	if (ret) {
4777
		/* if we are in MST mode then this connector
4778
		   won't appear connected or have anything with EDID on it */
4779
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4780
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4781
		status = connector_status_disconnected;
4782
		goto out;
4783
	}
4784
 
5354 serge 4785
	intel_dp_set_edid(intel_dp);
3243 Serge 4786
 
4787
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4788
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4560 Serge 4789
	status = connector_status_connected;
4790
 
6084 serge 4791
	/* Try to read the source of the interrupt */
4792
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4793
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4794
		/* Clear interrupt source */
4795
		drm_dp_dpcd_writeb(&intel_dp->aux,
4796
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4797
				   sink_irq_vector);
4798
 
4799
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4800
			intel_dp_handle_test_request(intel_dp);
4801
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4802
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4803
	}
4804
 
4560 Serge 4805
out:
6084 serge 4806
	intel_display_power_put(to_i915(dev), power_domain);
4560 Serge 4807
	return status;
2330 Serge 4808
}
4809
 
5354 serge 4810
static void
4811
intel_dp_force(struct drm_connector *connector)
2330 Serge 4812
{
4813
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5354 serge 4814
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
6084 serge 4815
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5060 serge 4816
	enum intel_display_power_domain power_domain;
2330 Serge 4817
 
5354 serge 4818
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4819
		      connector->base.id, connector->name);
4820
	intel_dp_unset_edid(intel_dp);
2330 Serge 4821
 
5354 serge 4822
	if (connector->status != connector_status_connected)
4823
		return;
5060 serge 4824
 
6084 serge 4825
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4826
	intel_display_power_get(dev_priv, power_domain);
5354 serge 4827
 
4828
	intel_dp_set_edid(intel_dp);
4829
 
6084 serge 4830
	intel_display_power_put(dev_priv, power_domain);
5354 serge 4831
 
4832
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4833
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4834
}
4835
 
4836
static int intel_dp_get_modes(struct drm_connector *connector)
4837
{
4838
	struct intel_connector *intel_connector = to_intel_connector(connector);
4839
	struct edid *edid;
4840
 
4841
	edid = intel_connector->detect_edid;
4842
	if (edid) {
4843
		int ret = intel_connector_update_modes(connector, edid);
6084 serge 4844
		if (ret)
4845
			return ret;
5354 serge 4846
	}
2330 Serge 4847
 
3243 Serge 4848
	/* if eDP has no EDID, fall back to fixed mode */
5354 serge 4849
	if (is_edp(intel_attached_dp(connector)) &&
4850
	    intel_connector->panel.fixed_mode) {
6084 serge 4851
		struct drm_display_mode *mode;
5354 serge 4852
 
4853
		mode = drm_mode_duplicate(connector->dev,
3243 Serge 4854
					  intel_connector->panel.fixed_mode);
4855
		if (mode) {
2330 Serge 4856
			drm_mode_probed_add(connector, mode);
4857
			return 1;
4858
		}
4859
	}
5354 serge 4860
 
2330 Serge 4861
	return 0;
4862
}
4863
 
3243 Serge 4864
static bool
4865
intel_dp_detect_audio(struct drm_connector *connector)
4866
{
5354 serge 4867
	bool has_audio = false;
3243 Serge 4868
	struct edid *edid;
2330 Serge 4869
 
5354 serge 4870
	edid = to_intel_connector(connector)->detect_edid;
4871
	if (edid)
3243 Serge 4872
		has_audio = drm_detect_monitor_audio(edid);
2330 Serge 4873
 
3243 Serge 4874
	return has_audio;
4875
}
2330 Serge 4876
 
4877
static int
4878
intel_dp_set_property(struct drm_connector *connector,
4879
		      struct drm_property *property,
4880
		      uint64_t val)
4881
{
4882
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3243 Serge 4883
	struct intel_connector *intel_connector = to_intel_connector(connector);
4884
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4885
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 4886
	int ret;
4887
 
3243 Serge 4888
	ret = drm_object_property_set_value(&connector->base, property, val);
2330 Serge 4889
	if (ret)
4890
		return ret;
3480 Serge 4891
 
2330 Serge 4892
	if (property == dev_priv->force_audio_property) {
4893
		int i = val;
4894
		bool has_audio;
4895
 
4896
		if (i == intel_dp->force_audio)
4897
			return 0;
4898
 
4899
		intel_dp->force_audio = i;
4900
 
3031 serge 4901
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 4902
			has_audio = intel_dp_detect_audio(connector);
4903
		else
3031 serge 4904
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 4905
 
4906
		if (has_audio == intel_dp->has_audio)
4907
			return 0;
4908
 
4909
		intel_dp->has_audio = has_audio;
4910
		goto done;
4911
	}
4912
 
4913
	if (property == dev_priv->broadcast_rgb_property) {
3746 Serge 4914
		bool old_auto = intel_dp->color_range_auto;
6084 serge 4915
		bool old_range = intel_dp->limited_color_range;
3746 Serge 4916
 
3480 Serge 4917
		switch (val) {
4918
		case INTEL_BROADCAST_RGB_AUTO:
4919
			intel_dp->color_range_auto = true;
4920
			break;
4921
		case INTEL_BROADCAST_RGB_FULL:
4922
			intel_dp->color_range_auto = false;
6084 serge 4923
			intel_dp->limited_color_range = false;
3480 Serge 4924
			break;
4925
		case INTEL_BROADCAST_RGB_LIMITED:
4926
			intel_dp->color_range_auto = false;
6084 serge 4927
			intel_dp->limited_color_range = true;
3480 Serge 4928
			break;
4929
		default:
4930
			return -EINVAL;
4931
		}
3746 Serge 4932
 
4933
		if (old_auto == intel_dp->color_range_auto &&
6084 serge 4934
		    old_range == intel_dp->limited_color_range)
3746 Serge 4935
			return 0;
4936
 
6084 serge 4937
		goto done;
2330 Serge 4938
	}
4939
 
3243 Serge 4940
	if (is_edp(intel_dp) &&
4941
	    property == connector->dev->mode_config.scaling_mode_property) {
4942
		if (val == DRM_MODE_SCALE_NONE) {
4943
			DRM_DEBUG_KMS("no scaling not supported\n");
4944
			return -EINVAL;
4945
		}
4946
 
4947
		if (intel_connector->panel.fitting_mode == val) {
4948
			/* the eDP scaling property is not changed */
4949
			return 0;
4950
		}
4951
		intel_connector->panel.fitting_mode = val;
4952
 
4953
		goto done;
4954
	}
4955
 
2330 Serge 4956
	return -EINVAL;
4957
 
4958
done:
3480 Serge 4959
	if (intel_encoder->base.crtc)
4960
		intel_crtc_restore_mode(intel_encoder->base.crtc);
2330 Serge 4961
 
4962
	return 0;
4963
}
4964
 
4965
static void
4104 Serge 4966
intel_dp_connector_destroy(struct drm_connector *connector)
2330 Serge 4967
{
3243 Serge 4968
	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 Serge 4969
 
5354 serge 4970
	kfree(intel_connector->detect_edid);
4971
 
3243 Serge 4972
	if (!IS_ERR_OR_NULL(intel_connector->edid))
4973
		kfree(intel_connector->edid);
4974
 
4104 Serge 4975
	/* Can't call is_edp() since the encoder may have been destroyed
4976
	 * already. */
4977
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3243 Serge 4978
		intel_panel_fini(&intel_connector->panel);
2330 Serge 4979
 
4980
	drm_connector_cleanup(connector);
4981
	kfree(connector);
4982
}
4983
 
3243 Serge 4984
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4985
{
3243 Serge 4986
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4987
	struct intel_dp *intel_dp = &intel_dig_port->dp;
2330 Serge 4988
 
5060 serge 4989
	drm_dp_aux_unregister(&intel_dp->aux);
4990
	intel_dp_mst_encoder_cleanup(intel_dig_port);
2342 Serge 4991
	if (is_edp(intel_dp)) {
4293 Serge 4992
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 4993
		/*
4994
		 * vdd might still be enabled do to the delayed vdd off.
4995
		 * Make sure vdd is actually turned off here.
4996
		 */
4997
		pps_lock(intel_dp);
5060 serge 4998
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 4999
		pps_unlock(intel_dp);
5000
 
2342 Serge 5001
	}
6084 serge 5002
	drm_encoder_cleanup(encoder);
3243 Serge 5003
	kfree(intel_dig_port);
2330 Serge 5004
}
5005
 
5060 serge 5006
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5007
{
5008
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5009
 
5010
	if (!is_edp(intel_dp))
5011
		return;
5012
 
5354 serge 5013
	/*
5014
	 * vdd might still be enabled do to the delayed vdd off.
5015
	 * Make sure vdd is actually turned off here.
5016
	 */
5017
	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5018
	pps_lock(intel_dp);
5060 serge 5019
	edp_panel_vdd_off_sync(intel_dp);
5354 serge 5020
	pps_unlock(intel_dp);
5060 serge 5021
}
5022
 
5354 serge 5023
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5024
{
5025
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5026
	struct drm_device *dev = intel_dig_port->base.base.dev;
5027
	struct drm_i915_private *dev_priv = dev->dev_private;
5028
	enum intel_display_power_domain power_domain;
5029
 
5030
	lockdep_assert_held(&dev_priv->pps_mutex);
5031
 
5032
	if (!edp_have_panel_vdd(intel_dp))
5033
		return;
5034
 
5035
	/*
5036
	 * The VDD bit needs a power domain reference, so if the bit is
5037
	 * already enabled when we boot or resume, grab this reference and
5038
	 * schedule a vdd off, so we don't hold on to the reference
5039
	 * indefinitely.
5040
	 */
5041
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6084 serge 5042
	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5354 serge 5043
	intel_display_power_get(dev_priv, power_domain);
5044
 
5045
	edp_panel_vdd_schedule_off(intel_dp);
5046
}
5047
 
5060 serge 5048
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5049
{
5354 serge 5050
	struct intel_dp *intel_dp;
5051
 
5052
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5053
		return;
5054
 
5055
	intel_dp = enc_to_intel_dp(encoder);
5056
 
5057
	pps_lock(intel_dp);
5058
 
5059
	/*
5060
	 * Read out the current power sequencer assignment,
5061
	 * in case the BIOS did something with it.
5062
	 */
5063
	if (IS_VALLEYVIEW(encoder->dev))
5064
		vlv_initial_power_sequencer_setup(intel_dp);
5065
 
5066
	intel_edp_panel_vdd_sanitize(intel_dp);
5067
 
5068
	pps_unlock(intel_dp);
5060 serge 5069
}
5070
 
2330 Serge 5071
static const struct drm_connector_funcs intel_dp_connector_funcs = {
6084 serge 5072
	.dpms = drm_atomic_helper_connector_dpms,
2330 Serge 5073
	.detect = intel_dp_detect,
5354 serge 5074
	.force = intel_dp_force,
2330 Serge 5075
	.fill_modes = drm_helper_probe_single_connector_modes,
5076
	.set_property = intel_dp_set_property,
6084 serge 5077
	.atomic_get_property = intel_connector_atomic_get_property,
4104 Serge 5078
	.destroy = intel_dp_connector_destroy,
6084 serge 5079
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5080
	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
2330 Serge 5081
};
5082
 
5083
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5084
	.get_modes = intel_dp_get_modes,
5085
	.mode_valid = intel_dp_mode_valid,
5086
	.best_encoder = intel_best_encoder,
5087
};
5088
 
5089
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5060 serge 5090
	.reset = intel_dp_encoder_reset,
2330 Serge 5091
	.destroy = intel_dp_encoder_destroy,
5092
};
5093
 
6084 serge 5094
enum irqreturn
5060 serge 5095
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5096
{
5097
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5098
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5099
	struct drm_device *dev = intel_dig_port->base.base.dev;
5100
	struct drm_i915_private *dev_priv = dev->dev_private;
5101
	enum intel_display_power_domain power_domain;
6084 serge 5102
	enum irqreturn ret = IRQ_NONE;
5060 serge 5103
 
6084 serge 5104
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5105
	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5060 serge 5106
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5107
 
5354 serge 5108
	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5109
		/*
5110
		 * vdd off can generate a long pulse on eDP which
5111
		 * would require vdd on to handle it, and thus we
5112
		 * would end up in an endless cycle of
5113
		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5114
		 */
5115
		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5116
			      port_name(intel_dig_port->port));
6084 serge 5117
		return IRQ_HANDLED;
5354 serge 5118
	}
5119
 
5120
	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5121
		      port_name(intel_dig_port->port),
5060 serge 5122
		      long_hpd ? "long" : "short");
5123
 
6084 serge 5124
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 5125
	intel_display_power_get(dev_priv, power_domain);
5126
 
5127
	if (long_hpd) {
6084 serge 5128
		/* indicate that we need to restart link training */
5129
		intel_dp->train_set_valid = false;
5097 serge 5130
 
6084 serge 5131
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5060 serge 5132
			goto mst_fail;
5133
 
5134
		if (!intel_dp_get_dpcd(intel_dp)) {
5135
			goto mst_fail;
5136
		}
5137
 
5138
		intel_dp_probe_oui(intel_dp);
5139
 
6084 serge 5140
		if (!intel_dp_probe_mst(intel_dp)) {
5141
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5142
			intel_dp_check_link_status(intel_dp);
5143
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5060 serge 5144
			goto mst_fail;
6084 serge 5145
		}
5060 serge 5146
	} else {
5147
		if (intel_dp->is_mst) {
5148
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5149
				goto mst_fail;
5150
		}
5151
 
5152
		if (!intel_dp->is_mst) {
5153
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5154
			intel_dp_check_link_status(intel_dp);
5155
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5156
		}
5157
	}
6084 serge 5158
 
5159
	ret = IRQ_HANDLED;
5160
 
5060 serge 5161
	goto put_power;
5162
mst_fail:
5163
	/* if we were in MST mode, and device is not there get out of MST mode */
5164
	if (intel_dp->is_mst) {
5165
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5166
		intel_dp->is_mst = false;
5167
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5168
	}
5169
put_power:
5170
	intel_display_power_put(dev_priv, power_domain);
5171
 
5172
	return ret;
2330 Serge 5173
}
5174
 
2327 Serge 5175
/* Return which DP Port should be selected for Transcoder DP control */
5176
int
2342 Serge 5177
intel_trans_dp_port_sel(struct drm_crtc *crtc)
2327 Serge 5178
{
5179
	struct drm_device *dev = crtc->dev;
3243 Serge 5180
	struct intel_encoder *intel_encoder;
5181
	struct intel_dp *intel_dp;
2327 Serge 5182
 
3243 Serge 5183
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5184
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2327 Serge 5185
 
3243 Serge 5186
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5187
		    intel_encoder->type == INTEL_OUTPUT_EDP)
2327 Serge 5188
			return intel_dp->output_reg;
5189
	}
5190
 
5191
	return -1;
5192
}
2330 Serge 5193
 
6084 serge 5194
/* check the VBT to see whether the eDP is on another port */
4560 Serge 5195
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
2330 Serge 5196
{
5197
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 5198
	union child_device_config *p_child;
2330 Serge 5199
	int i;
4560 Serge 5200
	static const short port_mapping[] = {
6084 serge 5201
		[PORT_B] = DVO_PORT_DPB,
5202
		[PORT_C] = DVO_PORT_DPC,
5203
		[PORT_D] = DVO_PORT_DPD,
5204
		[PORT_E] = DVO_PORT_DPE,
4560 Serge 5205
	};
2330 Serge 5206
 
6084 serge 5207
	/*
5208
	 * eDP not supported on g4x. so bail out early just
5209
	 * for a bit extra safety in case the VBT is bonkers.
5210
	 */
5211
	if (INTEL_INFO(dev)->gen < 5)
5212
		return false;
5213
 
4560 Serge 5214
	if (port == PORT_A)
5215
		return true;
5216
 
4104 Serge 5217
	if (!dev_priv->vbt.child_dev_num)
2330 Serge 5218
		return false;
5219
 
4104 Serge 5220
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5221
		p_child = dev_priv->vbt.child_dev + i;
2330 Serge 5222
 
4560 Serge 5223
		if (p_child->common.dvo_port == port_mapping[port] &&
5224
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5225
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
2330 Serge 5226
			return true;
5227
	}
5228
	return false;
5229
}
5230
 
5060 serge 5231
void
2330 Serge 5232
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5233
{
3243 Serge 5234
	struct intel_connector *intel_connector = to_intel_connector(connector);
5235
 
2330 Serge 5236
	intel_attach_force_audio_property(connector);
5237
	intel_attach_broadcast_rgb_property(connector);
3480 Serge 5238
	intel_dp->color_range_auto = true;
3243 Serge 5239
 
5240
	if (is_edp(intel_dp)) {
5241
		drm_mode_create_scaling_mode_property(connector->dev);
5242
		drm_object_attach_property(
5243
			&connector->base,
5244
			connector->dev->mode_config.scaling_mode_property,
5245
			DRM_MODE_SCALE_ASPECT);
5246
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5247
	}
2330 Serge 5248
}
5249
 
5060 serge 5250
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5251
{
5252
	intel_dp->last_power_cycle = jiffies;
5253
	intel_dp->last_power_on = jiffies;
5254
	intel_dp->last_backlight_off = jiffies;
5255
}
5256
 
3243 Serge 5257
static void
5258
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 5259
				    struct intel_dp *intel_dp)
3243 Serge 5260
{
5261
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 5262
	struct edp_power_seq cur, vbt, spec,
5263
		*final = &intel_dp->pps_delays;
6084 serge 5264
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5265
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
3243 Serge 5266
 
5354 serge 5267
	lockdep_assert_held(&dev_priv->pps_mutex);
5268
 
5269
	/* already initialized? */
5270
	if (final->t11_t12 != 0)
5271
		return;
5272
 
6084 serge 5273
	if (IS_BROXTON(dev)) {
5274
		/*
5275
		 * TODO: BXT has 2 sets of PPS registers.
5276
		 * Correct Register for Broxton need to be identified
5277
		 * using VBT. hardcoding for now
5278
		 */
5279
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5280
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5281
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5282
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 5283
		pp_ctrl_reg = PCH_PP_CONTROL;
3746 Serge 5284
		pp_on_reg = PCH_PP_ON_DELAYS;
5285
		pp_off_reg = PCH_PP_OFF_DELAYS;
5286
		pp_div_reg = PCH_PP_DIVISOR;
5287
	} else {
4560 Serge 5288
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5289
 
5290
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5291
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5292
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5293
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5294
	}
5295
 
3243 Serge 5296
	/* Workaround: Need to write PP_CONTROL with the unlock key as
5297
	 * the very first thing. */
6084 serge 5298
	pp_ctl = ironlake_get_pp_control(intel_dp);
3243 Serge 5299
 
3746 Serge 5300
	pp_on = I915_READ(pp_on_reg);
5301
	pp_off = I915_READ(pp_off_reg);
6084 serge 5302
	if (!IS_BROXTON(dev)) {
5303
		I915_WRITE(pp_ctrl_reg, pp_ctl);
5304
		pp_div = I915_READ(pp_div_reg);
5305
	}
3243 Serge 5306
 
5307
	/* Pull timing values out of registers */
5308
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5309
		PANEL_POWER_UP_DELAY_SHIFT;
5310
 
5311
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5312
		PANEL_LIGHT_ON_DELAY_SHIFT;
5313
 
5314
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5315
		PANEL_LIGHT_OFF_DELAY_SHIFT;
5316
 
5317
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5318
		PANEL_POWER_DOWN_DELAY_SHIFT;
5319
 
6084 serge 5320
	if (IS_BROXTON(dev)) {
5321
		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5322
			BXT_POWER_CYCLE_DELAY_SHIFT;
5323
		if (tmp > 0)
5324
			cur.t11_t12 = (tmp - 1) * 1000;
5325
		else
5326
			cur.t11_t12 = 0;
5327
	} else {
5328
		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3243 Serge 5329
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
6084 serge 5330
	}
3243 Serge 5331
 
5332
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5333
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5334
 
4104 Serge 5335
	vbt = dev_priv->vbt.edp_pps;
3243 Serge 5336
 
5337
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5338
	 * our hw here, which are all in 100usec. */
5339
	spec.t1_t3 = 210 * 10;
5340
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5341
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5342
	spec.t10 = 500 * 10;
5343
	/* This one is special and actually in units of 100ms, but zero
5344
	 * based in the hw (so we need to add 100 ms). But the sw vbt
5345
	 * table multiplies it with 1000 to make it in units of 100usec,
5346
	 * too. */
5347
	spec.t11_t12 = (510 + 100) * 10;
5348
 
5349
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5350
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5351
 
5352
	/* Use the max of the register settings and vbt. If both are
5353
	 * unset, fall back to the spec limits. */
5354 serge 5354
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
3243 Serge 5355
				       spec.field : \
5356
				       max(cur.field, vbt.field))
5357
	assign_final(t1_t3);
5358
	assign_final(t8);
5359
	assign_final(t9);
5360
	assign_final(t10);
5361
	assign_final(t11_t12);
5362
#undef assign_final
5363
 
5354 serge 5364
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
3243 Serge 5365
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5366
	intel_dp->backlight_on_delay = get_delay(t8);
5367
	intel_dp->backlight_off_delay = get_delay(t9);
5368
	intel_dp->panel_power_down_delay = get_delay(t10);
5369
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5370
#undef get_delay
5371
 
5372
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5373
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5374
		      intel_dp->panel_power_cycle_delay);
5375
 
5376
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5377
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5378
}
5379
 
5380
static void
5381
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 5382
					      struct intel_dp *intel_dp)
3243 Serge 5383
{
5384
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5385
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5386
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
6084 serge 5387
	int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5354 serge 5388
	enum port port = dp_to_dig_port(intel_dp)->port;
5389
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
3243 Serge 5390
 
5354 serge 5391
	lockdep_assert_held(&dev_priv->pps_mutex);
5392
 
6084 serge 5393
	if (IS_BROXTON(dev)) {
5394
		/*
5395
		 * TODO: BXT has 2 sets of PPS registers.
5396
		 * Correct Register for Broxton need to be identified
5397
		 * using VBT. hardcoding for now
5398
		 */
5399
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5400
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5401
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5402
 
5403
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 5404
		pp_on_reg = PCH_PP_ON_DELAYS;
5405
		pp_off_reg = PCH_PP_OFF_DELAYS;
5406
		pp_div_reg = PCH_PP_DIVISOR;
5407
	} else {
4560 Serge 5408
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5409
 
5410
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5411
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5412
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5413
	}
5414
 
5060 serge 5415
	/*
5416
	 * And finally store the new values in the power sequencer. The
5417
	 * backlight delays are set to 1 because we do manual waits on them. For
5418
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5419
	 * we'll end up waiting for the backlight off delay twice: once when we
5420
	 * do the manual sleep, and once when we disable the panel and wait for
5421
	 * the PP_STATUS bit to become zero.
5422
	 */
3243 Serge 5423
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5060 serge 5424
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5425
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3243 Serge 5426
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5427
	/* Compute the divisor for the pp clock, simply match the Bspec
5428
	 * formula. */
6084 serge 5429
	if (IS_BROXTON(dev)) {
5430
		pp_div = I915_READ(pp_ctrl_reg);
5431
		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5432
		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5433
				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5434
	} else {
5435
		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5436
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5437
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5438
	}
3243 Serge 5439
 
5440
	/* Haswell doesn't have any port selection bits for the panel
5441
	 * power sequencer any more. */
4104 Serge 5442
	if (IS_VALLEYVIEW(dev)) {
5354 serge 5443
		port_sel = PANEL_PORT_SELECT_VLV(port);
4104 Serge 5444
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5354 serge 5445
		if (port == PORT_A)
4560 Serge 5446
			port_sel = PANEL_PORT_SELECT_DPA;
3243 Serge 5447
		else
4560 Serge 5448
			port_sel = PANEL_PORT_SELECT_DPD;
3243 Serge 5449
	}
5450
 
3746 Serge 5451
	pp_on |= port_sel;
3243 Serge 5452
 
3746 Serge 5453
	I915_WRITE(pp_on_reg, pp_on);
5454
	I915_WRITE(pp_off_reg, pp_off);
6084 serge 5455
	if (IS_BROXTON(dev))
5456
		I915_WRITE(pp_ctrl_reg, pp_div);
5457
	else
5458
		I915_WRITE(pp_div_reg, pp_div);
3746 Serge 5459
 
3243 Serge 5460
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3746 Serge 5461
		      I915_READ(pp_on_reg),
5462
		      I915_READ(pp_off_reg),
6084 serge 5463
		      IS_BROXTON(dev) ?
5464
		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
3746 Serge 5465
		      I915_READ(pp_div_reg));
3243 Serge 5466
}
5467
 
6084 serge 5468
/**
5469
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5470
 * @dev: DRM device
5471
 * @refresh_rate: RR to be programmed
5472
 *
5473
 * This function gets called when refresh rate (RR) has to be changed from
5474
 * one frequency to another. Switches can be between high and low RR
5475
 * supported by the panel or to any other RR based on media playback (in
5476
 * this case, RR value needs to be passed from user space).
5477
 *
5478
 * The caller of this function needs to take a lock on dev_priv->drrs.
5479
 */
5480
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5060 serge 5481
{
5482
	struct drm_i915_private *dev_priv = dev->dev_private;
5483
	struct intel_encoder *encoder;
6084 serge 5484
	struct intel_digital_port *dig_port = NULL;
5485
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5486
	struct intel_crtc_state *config = NULL;
5060 serge 5487
	struct intel_crtc *intel_crtc = NULL;
6084 serge 5488
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5060 serge 5489
 
5490
	if (refresh_rate <= 0) {
5491
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5492
		return;
5493
	}
5494
 
6084 serge 5495
	if (intel_dp == NULL) {
5496
		DRM_DEBUG_KMS("DRRS not supported.\n");
5060 serge 5497
		return;
5498
	}
5499
 
5500
	/*
6084 serge 5501
	 * FIXME: This needs proper synchronization with psr state for some
5502
	 * platforms that cannot have PSR and DRRS enabled at the same time.
5060 serge 5503
	 */
5504
 
6084 serge 5505
	dig_port = dp_to_dig_port(intel_dp);
5506
	encoder = &dig_port->base;
5507
	intel_crtc = to_intel_crtc(encoder->base.crtc);
5060 serge 5508
 
5509
	if (!intel_crtc) {
5510
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5511
		return;
5512
	}
5513
 
6084 serge 5514
	config = intel_crtc->config;
5060 serge 5515
 
6084 serge 5516
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5060 serge 5517
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5518
		return;
5519
	}
5520
 
6084 serge 5521
	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5522
			refresh_rate)
5060 serge 5523
		index = DRRS_LOW_RR;
5524
 
6084 serge 5525
	if (index == dev_priv->drrs.refresh_rate_type) {
5060 serge 5526
		DRM_DEBUG_KMS(
5527
			"DRRS requested for previously set RR...ignoring\n");
5528
		return;
5529
	}
5530
 
5531
	if (!intel_crtc->active) {
5532
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5533
		return;
5534
	}
5535
 
6084 serge 5536
	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5537
		switch (index) {
5538
		case DRRS_HIGH_RR:
5539
			intel_dp_set_m_n(intel_crtc, M1_N1);
5540
			break;
5541
		case DRRS_LOW_RR:
5542
			intel_dp_set_m_n(intel_crtc, M2_N2);
5543
			break;
5544
		case DRRS_MAX_RR:
5545
		default:
5546
			DRM_ERROR("Unsupported refreshrate type\n");
5547
		}
5548
	} else if (INTEL_INFO(dev)->gen > 6) {
5549
		u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5550
		u32 val;
5551
 
5060 serge 5552
		val = I915_READ(reg);
5553
		if (index > DRRS_HIGH_RR) {
6084 serge 5554
			if (IS_VALLEYVIEW(dev))
5555
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5556
			else
5557
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5558
		} else {
6084 serge 5559
			if (IS_VALLEYVIEW(dev))
5560
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5561
			else
5562
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5563
		}
5564
		I915_WRITE(reg, val);
5565
	}
5566
 
6084 serge 5567
	dev_priv->drrs.refresh_rate_type = index;
5568
 
5569
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5570
}
5571
 
5572
/**
5573
 * intel_edp_drrs_enable - init drrs struct if supported
5574
 * @intel_dp: DP struct
5575
 *
5576
 * Initializes frontbuffer_bits and drrs.dp
5577
 */
5578
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5579
{
5580
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5581
	struct drm_i915_private *dev_priv = dev->dev_private;
5582
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5583
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5584
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5585
 
5586
	if (!intel_crtc->config->has_drrs) {
5587
		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5588
		return;
5589
	}
5590
 
5591
	mutex_lock(&dev_priv->drrs.mutex);
5592
	if (WARN_ON(dev_priv->drrs.dp)) {
5593
		DRM_ERROR("DRRS already enabled\n");
5594
		goto unlock;
5595
	}
5596
 
5597
	dev_priv->drrs.busy_frontbuffer_bits = 0;
5598
 
5599
	dev_priv->drrs.dp = intel_dp;
5600
 
5601
unlock:
5602
	mutex_unlock(&dev_priv->drrs.mutex);
5603
}
5604
 
5605
/**
5606
 * intel_edp_drrs_disable - Disable DRRS
5607
 * @intel_dp: DP struct
5608
 *
5609
 */
5610
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5611
{
5612
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5613
	struct drm_i915_private *dev_priv = dev->dev_private;
5614
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5615
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5616
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5617
 
5618
	if (!intel_crtc->config->has_drrs)
5619
		return;
5620
 
5621
	mutex_lock(&dev_priv->drrs.mutex);
5622
	if (!dev_priv->drrs.dp) {
5623
		mutex_unlock(&dev_priv->drrs.mutex);
5624
		return;
5625
	}
5626
 
5627
	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5628
		intel_dp_set_drrs_state(dev_priv->dev,
5629
			intel_dp->attached_connector->panel.
5630
			fixed_mode->vrefresh);
5631
 
5632
	dev_priv->drrs.dp = NULL;
5633
	mutex_unlock(&dev_priv->drrs.mutex);
5634
 
5635
	cancel_delayed_work_sync(&dev_priv->drrs.work);
5636
}
5637
 
5638
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5639
{
5640
	struct drm_i915_private *dev_priv =
5641
		container_of(work, typeof(*dev_priv), drrs.work.work);
5642
	struct intel_dp *intel_dp;
5643
 
5644
	mutex_lock(&dev_priv->drrs.mutex);
5645
 
5646
	intel_dp = dev_priv->drrs.dp;
5647
 
5648
	if (!intel_dp)
5649
		goto unlock;
5650
 
5060 serge 5651
	/*
6084 serge 5652
	 * The delayed work can race with an invalidate hence we need to
5653
	 * recheck.
5060 serge 5654
	 */
5655
 
6084 serge 5656
	if (dev_priv->drrs.busy_frontbuffer_bits)
5657
		goto unlock;
5060 serge 5658
 
6084 serge 5659
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5660
		intel_dp_set_drrs_state(dev_priv->dev,
5661
			intel_dp->attached_connector->panel.
5662
			downclock_mode->vrefresh);
5060 serge 5663
 
6084 serge 5664
unlock:
5665
	mutex_unlock(&dev_priv->drrs.mutex);
5666
}
5060 serge 5667
 
6084 serge 5668
/**
5669
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5670
 * @dev: DRM device
5671
 * @frontbuffer_bits: frontbuffer plane tracking bits
5672
 *
5673
 * This function gets called everytime rendering on the given planes start.
5674
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5675
 *
5676
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5677
 */
5678
void intel_edp_drrs_invalidate(struct drm_device *dev,
5679
		unsigned frontbuffer_bits)
5680
{
5681
	struct drm_i915_private *dev_priv = dev->dev_private;
5682
	struct drm_crtc *crtc;
5683
	enum pipe pipe;
5684
 
5685
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5686
		return;
5687
 
5688
	cancel_delayed_work(&dev_priv->drrs.work);
5689
 
5690
	mutex_lock(&dev_priv->drrs.mutex);
5691
	if (!dev_priv->drrs.dp) {
5692
		mutex_unlock(&dev_priv->drrs.mutex);
5693
		return;
5694
	}
5695
 
5696
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5697
	pipe = to_intel_crtc(crtc)->pipe;
5698
 
5699
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5700
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5701
 
5702
	/* invalidate means busy screen hence upclock */
5703
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5704
		intel_dp_set_drrs_state(dev_priv->dev,
5705
				dev_priv->drrs.dp->attached_connector->panel.
5706
				fixed_mode->vrefresh);
5707
 
5708
	mutex_unlock(&dev_priv->drrs.mutex);
5060 serge 5709
}
5710
 
6084 serge 5711
/**
5712
 * intel_edp_drrs_flush - Restart Idleness DRRS
5713
 * @dev: DRM device
5714
 * @frontbuffer_bits: frontbuffer plane tracking bits
5715
 *
5716
 * This function gets called every time rendering on the given planes has
5717
 * completed or flip on a crtc is completed. So DRRS should be upclocked
5718
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5719
 * if no other planes are dirty.
5720
 *
5721
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5722
 */
5723
void intel_edp_drrs_flush(struct drm_device *dev,
5724
		unsigned frontbuffer_bits)
5725
{
5726
	struct drm_i915_private *dev_priv = dev->dev_private;
5727
	struct drm_crtc *crtc;
5728
	enum pipe pipe;
5729
 
5730
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5731
		return;
5732
 
5733
//	cancel_delayed_work(&dev_priv->drrs.work);
5734
 
5735
	mutex_lock(&dev_priv->drrs.mutex);
5736
	if (!dev_priv->drrs.dp) {
5737
		mutex_unlock(&dev_priv->drrs.mutex);
5738
		return;
5739
	}
5740
 
5741
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5742
	pipe = to_intel_crtc(crtc)->pipe;
5743
 
5744
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5745
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5746
 
5747
	/* flush means busy screen hence upclock */
5748
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5749
		intel_dp_set_drrs_state(dev_priv->dev,
5750
				dev_priv->drrs.dp->attached_connector->panel.
5751
				fixed_mode->vrefresh);
5752
 
5753
	mutex_unlock(&dev_priv->drrs.mutex);
5754
}
5755
 
5756
/**
5757
 * DOC: Display Refresh Rate Switching (DRRS)
5758
 *
5759
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5760
 * which enables swtching between low and high refresh rates,
5761
 * dynamically, based on the usage scenario. This feature is applicable
5762
 * for internal panels.
5763
 *
5764
 * Indication that the panel supports DRRS is given by the panel EDID, which
5765
 * would list multiple refresh rates for one resolution.
5766
 *
5767
 * DRRS is of 2 types - static and seamless.
5768
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5769
 * (may appear as a blink on screen) and is used in dock-undock scenario.
5770
 * Seamless DRRS involves changing RR without any visual effect to the user
5771
 * and can be used during normal system usage. This is done by programming
5772
 * certain registers.
5773
 *
5774
 * Support for static/seamless DRRS may be indicated in the VBT based on
5775
 * inputs from the panel spec.
5776
 *
5777
 * DRRS saves power by switching to low RR based on usage scenarios.
5778
 *
5779
 * eDP DRRS:-
5780
 *        The implementation is based on frontbuffer tracking implementation.
5781
 * When there is a disturbance on the screen triggered by user activity or a
5782
 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5783
 * When there is no movement on screen, after a timeout of 1 second, a switch
5784
 * to low RR is made.
5785
 *        For integration with frontbuffer tracking code,
5786
 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5787
 *
5788
 * DRRS can be further extended to support other internal panels and also
5789
 * the scenario of video playback wherein RR is set based on the rate
5790
 * requested by userspace.
5791
 */
5792
 
5793
/**
5794
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5795
 * @intel_connector: eDP connector
5796
 * @fixed_mode: preferred mode of panel
5797
 *
5798
 * This function is  called only once at driver load to initialize basic
5799
 * DRRS stuff.
5800
 *
5801
 * Returns:
5802
 * Downclock mode if panel supports it, else return NULL.
5803
 * DRRS support is determined by the presence of downclock mode (apart
5804
 * from VBT setting).
5805
 */
5060 serge 5806
static struct drm_display_mode *
6084 serge 5807
intel_dp_drrs_init(struct intel_connector *intel_connector,
5808
		struct drm_display_mode *fixed_mode)
5060 serge 5809
{
5810
	struct drm_connector *connector = &intel_connector->base;
6084 serge 5811
	struct drm_device *dev = connector->dev;
5060 serge 5812
	struct drm_i915_private *dev_priv = dev->dev_private;
5813
	struct drm_display_mode *downclock_mode = NULL;
5814
 
6084 serge 5815
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5816
	mutex_init(&dev_priv->drrs.mutex);
5817
 
5060 serge 5818
	if (INTEL_INFO(dev)->gen <= 6) {
5819
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5820
		return NULL;
5821
	}
5822
 
5823
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5354 serge 5824
		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5060 serge 5825
		return NULL;
5826
	}
5827
 
5828
	downclock_mode = intel_find_panel_downclock
5829
					(dev, fixed_mode, connector);
5830
 
5831
	if (!downclock_mode) {
6084 serge 5832
		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5060 serge 5833
		return NULL;
5834
	}
5835
 
6084 serge 5836
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5060 serge 5837
 
6084 serge 5838
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5354 serge 5839
	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5060 serge 5840
	return downclock_mode;
5841
}
5842
 
4104 Serge 5843
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5354 serge 5844
				     struct intel_connector *intel_connector)
4104 Serge 5845
{
5846
	struct drm_connector *connector = &intel_connector->base;
5847
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5060 serge 5848
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5849
	struct drm_device *dev = intel_encoder->base.dev;
4104 Serge 5850
	struct drm_i915_private *dev_priv = dev->dev_private;
5851
	struct drm_display_mode *fixed_mode = NULL;
5060 serge 5852
	struct drm_display_mode *downclock_mode = NULL;
4104 Serge 5853
	bool has_dpcd;
5854
	struct drm_display_mode *scan;
5855
	struct edid *edid;
5354 serge 5856
	enum pipe pipe = INVALID_PIPE;
4104 Serge 5857
 
5858
	if (!is_edp(intel_dp))
5859
		return true;
5860
 
5354 serge 5861
	pps_lock(intel_dp);
5862
	intel_edp_panel_vdd_sanitize(intel_dp);
5863
	pps_unlock(intel_dp);
4104 Serge 5864
 
5865
	/* Cache DPCD and EDID for edp. */
5866
	has_dpcd = intel_dp_get_dpcd(intel_dp);
5867
 
5868
	if (has_dpcd) {
5869
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5870
			dev_priv->no_aux_handshake =
5871
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5872
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5873
	} else {
5874
		/* if this fails, presume the device is a ghost */
5875
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5876
		return false;
5877
	}
5878
 
5879
	/* We now know it's not a ghost, init power sequence regs. */
5354 serge 5880
	pps_lock(intel_dp);
5881
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5882
	pps_unlock(intel_dp);
4104 Serge 5883
 
5060 serge 5884
	mutex_lock(&dev->mode_config.mutex);
5885
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4104 Serge 5886
	if (edid) {
5887
		if (drm_add_edid_modes(connector, edid)) {
5888
			drm_mode_connector_update_edid_property(connector,
5889
								edid);
5890
			drm_edid_to_eld(connector, edid);
5891
		} else {
5892
			kfree(edid);
5893
			edid = ERR_PTR(-EINVAL);
5894
		}
5895
	} else {
5896
		edid = ERR_PTR(-ENOENT);
5897
	}
5898
	intel_connector->edid = edid;
5899
 
5900
	/* prefer fixed mode from EDID if available */
5901
	list_for_each_entry(scan, &connector->probed_modes, head) {
5902
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5903
			fixed_mode = drm_mode_duplicate(dev, scan);
5060 serge 5904
			downclock_mode = intel_dp_drrs_init(
5905
						intel_connector, fixed_mode);
4104 Serge 5906
			break;
5907
		}
5908
	}
5909
 
5910
	/* fallback to VBT if available for eDP */
5911
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5912
		fixed_mode = drm_mode_duplicate(dev,
5913
					dev_priv->vbt.lfp_lvds_vbt_mode);
5914
		if (fixed_mode)
5915
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5916
	}
5060 serge 5917
	mutex_unlock(&dev->mode_config.mutex);
4104 Serge 5918
 
5354 serge 5919
	if (IS_VALLEYVIEW(dev)) {
5920
 
5921
		/*
5922
		 * Figure out the current pipe for the initial backlight setup.
5923
		 * If the current pipe isn't valid, try the PPS pipe, and if that
5924
		 * fails just assume pipe A.
5925
		 */
5926
		if (IS_CHERRYVIEW(dev))
5927
			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5928
		else
5929
			pipe = PORT_TO_PIPE(intel_dp->DP);
5930
 
5931
		if (pipe != PIPE_A && pipe != PIPE_B)
5932
			pipe = intel_dp->pps_pipe;
5933
 
5934
		if (pipe != PIPE_A && pipe != PIPE_B)
5935
			pipe = PIPE_A;
5936
 
5937
		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5938
			      pipe_name(pipe));
5939
	}
5940
 
5060 serge 5941
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6084 serge 5942
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5354 serge 5943
	intel_panel_setup_backlight(connector, pipe);
4104 Serge 5944
 
5945
	return true;
5946
}
5947
 
5948
bool
3243 Serge 5949
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5950
			struct intel_connector *intel_connector)
2330 Serge 5951
{
3243 Serge 5952
	struct drm_connector *connector = &intel_connector->base;
5953
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5954
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5955
	struct drm_device *dev = intel_encoder->base.dev;
2330 Serge 5956
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5957
	enum port port = intel_dig_port->port;
5060 serge 5958
	int type;
2330 Serge 5959
 
5354 serge 5960
	intel_dp->pps_pipe = INVALID_PIPE;
5961
 
5060 serge 5962
	/* intel_dp vfuncs */
5354 serge 5963
	if (INTEL_INFO(dev)->gen >= 9)
5964
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5965
	else if (IS_VALLEYVIEW(dev))
5060 serge 5966
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5967
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5968
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5969
	else if (HAS_PCH_SPLIT(dev))
5970
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5971
	else
5972
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5973
 
5354 serge 5974
	if (INTEL_INFO(dev)->gen >= 9)
5975
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5976
	else
6084 serge 5977
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5060 serge 5978
 
3031 serge 5979
	/* Preserve the current hw state. */
5980
	intel_dp->DP = I915_READ(intel_dp->output_reg);
3243 Serge 5981
	intel_dp->attached_connector = intel_connector;
2330 Serge 5982
 
4560 Serge 5983
	if (intel_dp_is_edp(dev, port))
5984
		type = DRM_MODE_CONNECTOR_eDP;
5985
	else
6084 serge 5986
		type = DRM_MODE_CONNECTOR_DisplayPort;
2330 Serge 5987
 
4104 Serge 5988
	/*
5989
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5990
	 * for DP the encoder type can be set by the caller to
5991
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5992
	 */
5993
	if (type == DRM_MODE_CONNECTOR_eDP)
5994
		intel_encoder->type = INTEL_OUTPUT_EDP;
5995
 
5354 serge 5996
	/* eDP only on port B and/or C on vlv/chv */
5997
	if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5998
		    port != PORT_B && port != PORT_C))
5999
		return false;
6000
 
4104 Serge 6001
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6002
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6003
			port_name(port));
6004
 
2330 Serge 6005
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6006
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6007
 
6008
	connector->interlace_allowed = true;
6009
	connector->doublescan_allowed = 0;
6010
 
3243 Serge 6011
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5060 serge 6012
			  edp_panel_vdd_work);
2330 Serge 6013
 
6014
	intel_connector_attach_encoder(intel_connector, intel_encoder);
5060 serge 6015
	drm_connector_register(connector);
2330 Serge 6016
 
3480 Serge 6017
	if (HAS_DDI(dev))
3243 Serge 6018
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6019
	else
6084 serge 6020
		intel_connector->get_hw_state = intel_connector_get_hw_state;
5060 serge 6021
	intel_connector->unregister = intel_dp_connector_unregister;
3031 serge 6022
 
5060 serge 6023
	/* Set up the hotplug pin. */
3031 serge 6024
	switch (port) {
6025
	case PORT_A:
3746 Serge 6026
		intel_encoder->hpd_pin = HPD_PORT_A;
6084 serge 6027
		break;
3031 serge 6028
	case PORT_B:
3746 Serge 6029
		intel_encoder->hpd_pin = HPD_PORT_B;
6084 serge 6030
		if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6031
			intel_encoder->hpd_pin = HPD_PORT_A;
6032
		break;
3031 serge 6033
	case PORT_C:
3746 Serge 6034
		intel_encoder->hpd_pin = HPD_PORT_C;
6084 serge 6035
		break;
3031 serge 6036
	case PORT_D:
3746 Serge 6037
		intel_encoder->hpd_pin = HPD_PORT_D;
6084 serge 6038
		break;
6039
	case PORT_E:
6040
		intel_encoder->hpd_pin = HPD_PORT_E;
6041
		break;
3031 serge 6042
	default:
3746 Serge 6043
		BUG();
2330 Serge 6044
	}
6045
 
5060 serge 6046
	if (is_edp(intel_dp)) {
5354 serge 6047
		pps_lock(intel_dp);
5060 serge 6048
		intel_dp_init_panel_power_timestamps(intel_dp);
5354 serge 6049
		if (IS_VALLEYVIEW(dev))
6050
			vlv_initial_power_sequencer_setup(intel_dp);
6051
		else
6052
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
6053
		pps_unlock(intel_dp);
5060 serge 6054
	}
2330 Serge 6055
 
5060 serge 6056
	intel_dp_aux_init(intel_dp, intel_connector);
3031 serge 6057
 
5060 serge 6058
	/* init MST on ports that can support it */
6084 serge 6059
	if (HAS_DP_MST(dev) &&
6060
	    (port == PORT_B || port == PORT_C || port == PORT_D))
6061
		intel_dp_mst_encoder_init(intel_dig_port,
6062
					  intel_connector->base.base.id);
5060 serge 6063
 
5354 serge 6064
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5060 serge 6065
		drm_dp_aux_unregister(&intel_dp->aux);
6084 serge 6066
		if (is_edp(intel_dp)) {
4293 Serge 6067
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 6068
			/*
6069
			 * vdd might still be enabled do to the delayed vdd off.
6070
			 * Make sure vdd is actually turned off here.
6071
			 */
6072
			pps_lock(intel_dp);
5060 serge 6073
			edp_panel_vdd_off_sync(intel_dp);
5354 serge 6074
			pps_unlock(intel_dp);
2330 Serge 6075
		}
5060 serge 6076
		drm_connector_unregister(connector);
4104 Serge 6077
		drm_connector_cleanup(connector);
6078
		return false;
2330 Serge 6079
	}
6080
 
6081
	intel_dp_add_properties(intel_dp, connector);
6082
 
6083
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6084
	 * 0xd.  Failure to do so will result in spurious interrupts being
6085
	 * generated on the port when a cable is not attached.
6086
	 */
6087
	if (IS_G4X(dev) && !IS_GM45(dev)) {
6088
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6089
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6090
	}
4104 Serge 6091
 
6092
	return true;
2330 Serge 6093
}
3243 Serge 6094
 
6095
void
6096
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6097
{
5060 serge 6098
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6099
	struct intel_digital_port *intel_dig_port;
6100
	struct intel_encoder *intel_encoder;
6101
	struct drm_encoder *encoder;
6102
	struct intel_connector *intel_connector;
6103
 
4560 Serge 6104
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3243 Serge 6105
	if (!intel_dig_port)
6106
		return;
6107
 
6084 serge 6108
	intel_connector = intel_connector_alloc();
6109
	if (!intel_connector)
6110
		goto err_connector_alloc;
3243 Serge 6111
 
6112
	intel_encoder = &intel_dig_port->base;
6113
	encoder = &intel_encoder->base;
6114
 
6115
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6116
			 DRM_MODE_ENCODER_TMDS);
6117
 
3746 Serge 6118
	intel_encoder->compute_config = intel_dp_compute_config;
3243 Serge 6119
	intel_encoder->disable = intel_disable_dp;
6120
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4104 Serge 6121
	intel_encoder->get_config = intel_dp_get_config;
5060 serge 6122
	intel_encoder->suspend = intel_dp_encoder_suspend;
6123
	if (IS_CHERRYVIEW(dev)) {
6124
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6125
		intel_encoder->pre_enable = chv_pre_enable_dp;
6126
		intel_encoder->enable = vlv_enable_dp;
6127
		intel_encoder->post_disable = chv_post_disable_dp;
6084 serge 6128
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5060 serge 6129
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 6130
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4104 Serge 6131
		intel_encoder->pre_enable = vlv_pre_enable_dp;
6132
		intel_encoder->enable = vlv_enable_dp;
5060 serge 6133
		intel_encoder->post_disable = vlv_post_disable_dp;
4104 Serge 6134
	} else {
4560 Serge 6135
		intel_encoder->pre_enable = g4x_pre_enable_dp;
6136
		intel_encoder->enable = g4x_enable_dp;
5354 serge 6137
		if (INTEL_INFO(dev)->gen >= 5)
6138
			intel_encoder->post_disable = ilk_post_disable_dp;
4104 Serge 6139
	}
3243 Serge 6140
 
6141
	intel_dig_port->port = port;
6142
	intel_dig_port->dp.output_reg = output_reg;
6143
 
6144
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5060 serge 6145
	if (IS_CHERRYVIEW(dev)) {
6146
		if (port == PORT_D)
6147
			intel_encoder->crtc_mask = 1 << 2;
6148
		else
6149
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6150
	} else {
6084 serge 6151
		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5060 serge 6152
	}
6153
	intel_encoder->cloneable = 0;
3243 Serge 6154
 
5060 serge 6155
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6084 serge 6156
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5060 serge 6157
 
6084 serge 6158
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6159
		goto err_init_connector;
6160
 
6161
	return;
6162
 
6163
err_init_connector:
6164
	drm_encoder_cleanup(encoder);
6165
	kfree(intel_connector);
6166
err_connector_alloc:
6167
	kfree(intel_dig_port);
6168
 
6169
	return;
3243 Serge 6170
}
5060 serge 6171
 
6172
void intel_dp_mst_suspend(struct drm_device *dev)
6173
{
6174
	struct drm_i915_private *dev_priv = dev->dev_private;
6175
	int i;
6176
 
6177
	/* disable MST */
6178
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6179
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6180
		if (!intel_dig_port)
6181
			continue;
6182
 
6183
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6184
			if (!intel_dig_port->dp.can_mst)
6185
				continue;
6186
			if (intel_dig_port->dp.is_mst)
6187
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6188
		}
6189
	}
6190
}
6191
 
6192
void intel_dp_mst_resume(struct drm_device *dev)
6193
{
6194
	struct drm_i915_private *dev_priv = dev->dev_private;
6195
	int i;
6196
 
6197
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6198
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6199
		if (!intel_dig_port)
6200
			continue;
6201
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6202
			int ret;
6203
 
6204
			if (!intel_dig_port->dp.can_mst)
6205
				continue;
6206
 
6207
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6208
			if (ret != 0) {
6209
				intel_dp_check_mst_status(&intel_dig_port->dp);
6210
			}
6211
		}
6212
	}
6213
}