Subversion Repositories Kolibri OS

Rev

Rev 6935 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
6937 serge 31
#include 
3031 serge 32
#include 
6084 serge 33
#include 
3031 serge 34
#include 
35
#include 
36
#include 
2327 Serge 37
#include "intel_drv.h"
3031 serge 38
#include 
2327 Serge 39
#include "i915_drv.h"
40
 
5060 serge 41
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
2327 Serge 42
 
6084 serge 43
/* Compliance test status bits  */
44
#define INTEL_DP_RESOLUTION_SHIFT_MASK	0
45
#define INTEL_DP_RESOLUTION_PREFERRED	(1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
46
#define INTEL_DP_RESOLUTION_STANDARD	(2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47
#define INTEL_DP_RESOLUTION_FAILSAFE	(3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48
 
4560 Serge 49
struct dp_link_dpll {
6084 serge 50
	int clock;
4560 Serge 51
	struct dpll dpll;
52
};
53
 
54
static const struct dp_link_dpll gen4_dpll[] = {
6084 serge 55
	{ 162000,
4560 Serge 56
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
6084 serge 57
	{ 270000,
4560 Serge 58
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
59
};
60
 
61
static const struct dp_link_dpll pch_dpll[] = {
6084 serge 62
	{ 162000,
4560 Serge 63
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
6084 serge 64
	{ 270000,
4560 Serge 65
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
66
};
67
 
68
static const struct dp_link_dpll vlv_dpll[] = {
6084 serge 69
	{ 162000,
4560 Serge 70
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
6084 serge 71
	{ 270000,
4560 Serge 72
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
73
};
74
 
5060 serge 75
/*
76
 * CHV supports eDP 1.4 that have  more link rates.
77
 * Below only provides the fixed rate but exclude variable rate.
78
 */
79
static const struct dp_link_dpll chv_dpll[] = {
80
	/*
81
	 * CHV requires to program fractional division for m2.
82
	 * m2 is stored in fixed point format using formula below
83
	 * (m2_int << 22) | m2_fraction
84
	 */
6084 serge 85
	{ 162000,	/* m2_int = 32, m2_fraction = 1677722 */
5060 serge 86
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
6084 serge 87
	{ 270000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 88
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
6084 serge 89
	{ 540000,	/* m2_int = 27, m2_fraction = 0 */
5060 serge 90
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
91
};
92
 
6084 serge 93
static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
94
				  324000, 432000, 540000 };
95
static const int skl_rates[] = { 162000, 216000, 270000,
96
				  324000, 432000, 540000 };
97
static const int default_rates[] = { 162000, 270000, 540000 };
98
 
2327 Serge 99
/**
100
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
101
 * @intel_dp: DP struct
102
 *
103
 * If a CPU or PCH DP output is attached to an eDP panel, this function
104
 * will return true, and false otherwise.
105
 */
106
static bool is_edp(struct intel_dp *intel_dp)
107
{
3243 Serge 108
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
 
110
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
2327 Serge 111
}
112
 
3243 Serge 113
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
2327 Serge 114
{
3243 Serge 115
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116
 
117
	return intel_dig_port->base.base.dev;
2327 Serge 118
}
119
 
2330 Serge 120
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
121
{
3243 Serge 122
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2330 Serge 123
}
2327 Serge 124
 
2330 Serge 125
static void intel_dp_link_down(struct intel_dp *intel_dp);
5354 serge 126
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
5060 serge 127
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
5354 serge 128
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
129
static void vlv_steal_power_sequencer(struct drm_device *dev,
130
				      enum pipe pipe);
2330 Serge 131
 
6084 serge 132
static unsigned int intel_dp_unused_lane_mask(int lane_count)
2330 Serge 133
{
6084 serge 134
	return ~((1 << lane_count) - 1) & 0xf;
135
}
136
 
137
static int
138
intel_dp_max_link_bw(struct intel_dp  *intel_dp)
139
{
2330 Serge 140
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 141
 
2330 Serge 142
	switch (max_link_bw) {
143
	case DP_LINK_BW_1_62:
144
	case DP_LINK_BW_2_7:
6084 serge 145
	case DP_LINK_BW_5_4:
2330 Serge 146
		break;
147
	default:
4104 Serge 148
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
149
		     max_link_bw);
2330 Serge 150
		max_link_bw = DP_LINK_BW_1_62;
151
		break;
152
	}
153
	return max_link_bw;
154
}
2327 Serge 155
 
5060 serge 156
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
157
{
158
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
159
	struct drm_device *dev = intel_dig_port->base.base.dev;
160
	u8 source_max, sink_max;
161
 
162
	source_max = 4;
163
	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
164
	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
165
		source_max = 2;
166
 
167
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
168
 
169
	return min(source_max, sink_max);
170
}
171
 
2342 Serge 172
/*
173
 * The units on the numbers in the next two are... bizarre.  Examples will
174
 * make it clearer; this one parallels an example in the eDP spec.
175
 *
176
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
177
 *
178
 *     270000 * 1 * 8 / 10 == 216000
179
 *
180
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
181
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
182
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
183
 * 119000.  At 18bpp that's 2142000 kilobits per second.
184
 *
185
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
186
 * get the result in decakilobits instead of kilobits.
187
 */
188
 
2330 Serge 189
static int
2351 Serge 190
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 191
{
2342 Serge 192
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 193
}
2327 Serge 194
 
2330 Serge 195
static int
196
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
197
{
198
	return (max_link_clock * max_lanes * 8) / 10;
199
}
2327 Serge 200
 
4560 Serge 201
static enum drm_mode_status
2330 Serge 202
intel_dp_mode_valid(struct drm_connector *connector,
203
		    struct drm_display_mode *mode)
204
{
205
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 206
	struct intel_connector *intel_connector = to_intel_connector(connector);
207
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
3746 Serge 208
	int target_clock = mode->clock;
209
	int max_rate, mode_rate, max_lanes, max_link_clock;
2327 Serge 210
 
3243 Serge 211
	if (is_edp(intel_dp) && fixed_mode) {
212
		if (mode->hdisplay > fixed_mode->hdisplay)
2330 Serge 213
			return MODE_PANEL;
2327 Serge 214
 
3243 Serge 215
		if (mode->vdisplay > fixed_mode->vdisplay)
2330 Serge 216
			return MODE_PANEL;
3746 Serge 217
 
218
		target_clock = fixed_mode->clock;
2330 Serge 219
	}
2327 Serge 220
 
6084 serge 221
	max_link_clock = intel_dp_max_link_rate(intel_dp);
5060 serge 222
	max_lanes = intel_dp_max_lane_count(intel_dp);
3746 Serge 223
 
224
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
225
	mode_rate = intel_dp_link_required(target_clock, 18);
226
 
227
	if (mode_rate > max_rate)
2330 Serge 228
		return MODE_CLOCK_HIGH;
2327 Serge 229
 
2330 Serge 230
	if (mode->clock < 10000)
231
		return MODE_CLOCK_LOW;
232
 
3031 serge 233
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
234
		return MODE_H_ILLEGAL;
235
 
2330 Serge 236
	return MODE_OK;
237
}
238
 
5354 serge 239
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
2330 Serge 240
{
241
	int	i;
242
	uint32_t v = 0;
243
 
244
	if (src_bytes > 4)
245
		src_bytes = 4;
246
	for (i = 0; i < src_bytes; i++)
247
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
248
	return v;
249
}
250
 
6084 serge 251
static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
2330 Serge 252
{
253
	int i;
254
	if (dst_bytes > 4)
255
		dst_bytes = 4;
256
	for (i = 0; i < dst_bytes; i++)
257
		dst[i] = src >> ((3-i) * 8);
258
}
259
 
4560 Serge 260
static void
261
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 262
				    struct intel_dp *intel_dp);
4560 Serge 263
static void
264
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 265
					      struct intel_dp *intel_dp);
4560 Serge 266
 
5354 serge 267
static void pps_lock(struct intel_dp *intel_dp)
268
{
269
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270
	struct intel_encoder *encoder = &intel_dig_port->base;
271
	struct drm_device *dev = encoder->base.dev;
272
	struct drm_i915_private *dev_priv = dev->dev_private;
273
	enum intel_display_power_domain power_domain;
274
 
275
	/*
276
	 * See vlv_power_sequencer_reset() why we need
277
	 * a power domain reference here.
278
	 */
6084 serge 279
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 280
	intel_display_power_get(dev_priv, power_domain);
281
 
282
	mutex_lock(&dev_priv->pps_mutex);
283
}
284
 
285
static void pps_unlock(struct intel_dp *intel_dp)
286
{
287
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
288
	struct intel_encoder *encoder = &intel_dig_port->base;
289
	struct drm_device *dev = encoder->base.dev;
290
	struct drm_i915_private *dev_priv = dev->dev_private;
291
	enum intel_display_power_domain power_domain;
292
 
293
	mutex_unlock(&dev_priv->pps_mutex);
294
 
6084 serge 295
	power_domain = intel_display_port_aux_power_domain(encoder);
5354 serge 296
	intel_display_power_put(dev_priv, power_domain);
297
}
298
 
299
static void
300
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
301
{
302
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303
	struct drm_device *dev = intel_dig_port->base.base.dev;
304
	struct drm_i915_private *dev_priv = dev->dev_private;
305
	enum pipe pipe = intel_dp->pps_pipe;
6084 serge 306
	bool pll_enabled, release_cl_override = false;
307
	enum dpio_phy phy = DPIO_PHY(pipe);
308
	enum dpio_channel ch = vlv_pipe_to_channel(pipe);
5354 serge 309
	uint32_t DP;
310
 
311
	if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
312
		 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
313
		 pipe_name(pipe), port_name(intel_dig_port->port)))
314
		return;
315
 
316
	DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
317
		      pipe_name(pipe), port_name(intel_dig_port->port));
318
 
319
	/* Preserve the BIOS-computed detected bit. This is
320
	 * supposed to be read-only.
321
	 */
322
	DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
323
	DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
324
	DP |= DP_PORT_WIDTH(1);
325
	DP |= DP_LINK_TRAIN_PAT_1;
326
 
327
	if (IS_CHERRYVIEW(dev))
328
		DP |= DP_PIPE_SELECT_CHV(pipe);
329
	else if (pipe == PIPE_B)
330
		DP |= DP_PIPEB_SELECT;
331
 
332
	pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
333
 
334
	/*
335
	 * The DPLL for the pipe must be enabled for this to work.
336
	 * So enable temporarily it if it's not already enabled.
337
	 */
6084 serge 338
	if (!pll_enabled) {
339
		release_cl_override = IS_CHERRYVIEW(dev) &&
340
			!chv_phy_powergate_ch(dev_priv, phy, ch, true);
341
 
5354 serge 342
		vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
343
				 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
6084 serge 344
	}
5354 serge 345
 
346
	/*
347
	 * Similar magic as in intel_dp_enable_port().
348
	 * We _must_ do this port enable + disable trick
349
	 * to make this power seqeuencer lock onto the port.
350
	 * Otherwise even VDD force bit won't work.
351
	 */
352
	I915_WRITE(intel_dp->output_reg, DP);
353
	POSTING_READ(intel_dp->output_reg);
354
 
355
	I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
356
	POSTING_READ(intel_dp->output_reg);
357
 
358
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
359
	POSTING_READ(intel_dp->output_reg);
360
 
6084 serge 361
	if (!pll_enabled) {
5354 serge 362
		vlv_force_pll_off(dev, pipe);
6084 serge 363
 
364
		if (release_cl_override)
365
			chv_phy_powergate_ch(dev_priv, phy, ch, false);
366
	}
5354 serge 367
}
368
 
4560 Serge 369
static enum pipe
370
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
371
{
372
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
373
	struct drm_device *dev = intel_dig_port->base.base.dev;
374
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 375
	struct intel_encoder *encoder;
376
	unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
4560 Serge 377
	enum pipe pipe;
378
 
5354 serge 379
	lockdep_assert_held(&dev_priv->pps_mutex);
4560 Serge 380
 
5354 serge 381
	/* We should never land here with regular DP ports */
382
	WARN_ON(!is_edp(intel_dp));
383
 
384
	if (intel_dp->pps_pipe != INVALID_PIPE)
385
		return intel_dp->pps_pipe;
386
 
387
	/*
388
	 * We don't have power sequencer currently.
389
	 * Pick one that's not used by other ports.
390
	 */
6937 serge 391
	for_each_intel_encoder(dev, encoder) {
5354 serge 392
		struct intel_dp *tmp;
393
 
394
		if (encoder->type != INTEL_OUTPUT_EDP)
395
			continue;
396
 
397
		tmp = enc_to_intel_dp(&encoder->base);
398
 
399
		if (tmp->pps_pipe != INVALID_PIPE)
400
			pipes &= ~(1 << tmp->pps_pipe);
401
	}
402
 
403
	/*
404
	 * Didn't find one. This should not happen since there
405
	 * are two power sequencers and up to two eDP ports.
406
	 */
407
	if (WARN_ON(pipes == 0))
408
		pipe = PIPE_A;
409
	else
410
		pipe = ffs(pipes) - 1;
411
 
412
	vlv_steal_power_sequencer(dev, pipe);
413
	intel_dp->pps_pipe = pipe;
414
 
415
	DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
416
		      pipe_name(intel_dp->pps_pipe),
417
		      port_name(intel_dig_port->port));
418
 
419
	/* init power sequencer on this pipe and port */
420
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
421
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
422
 
423
	/*
424
	 * Even vdd force doesn't work until we've made
425
	 * the power sequencer lock in on the port.
426
	 */
427
	vlv_power_sequencer_kick(intel_dp);
428
 
429
	return intel_dp->pps_pipe;
430
}
431
 
432
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
433
			       enum pipe pipe);
434
 
435
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
436
			       enum pipe pipe)
437
{
438
	return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
439
}
440
 
441
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
442
				enum pipe pipe)
443
{
444
	return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
445
}
446
 
447
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
448
			 enum pipe pipe)
449
{
450
	return true;
451
}
452
 
453
static enum pipe
454
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
455
		     enum port port,
456
		     vlv_pipe_check pipe_check)
457
{
458
	enum pipe pipe;
459
 
4560 Serge 460
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
461
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
462
			PANEL_PORT_SELECT_MASK;
5354 serge 463
 
464
		if (port_sel != PANEL_PORT_SELECT_VLV(port))
465
			continue;
466
 
467
		if (!pipe_check(dev_priv, pipe))
468
			continue;
469
 
6084 serge 470
		return pipe;
4560 Serge 471
	}
472
 
5354 serge 473
	return INVALID_PIPE;
4560 Serge 474
}
475
 
5354 serge 476
static void
477
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
478
{
479
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
480
	struct drm_device *dev = intel_dig_port->base.base.dev;
481
	struct drm_i915_private *dev_priv = dev->dev_private;
482
	enum port port = intel_dig_port->port;
483
 
484
	lockdep_assert_held(&dev_priv->pps_mutex);
485
 
486
	/* try to find a pipe with this port selected */
487
	/* first pick one where the panel is on */
488
	intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489
						  vlv_pipe_has_pp_on);
490
	/* didn't find one? pick one where vdd is on */
491
	if (intel_dp->pps_pipe == INVALID_PIPE)
492
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493
							  vlv_pipe_has_vdd_on);
494
	/* didn't find one? pick one with just the correct port */
495
	if (intel_dp->pps_pipe == INVALID_PIPE)
496
		intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497
							  vlv_pipe_any);
498
 
499
	/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
500
	if (intel_dp->pps_pipe == INVALID_PIPE) {
501
		DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
502
			      port_name(port));
503
		return;
504
	}
505
 
506
	DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
507
		      port_name(port), pipe_name(intel_dp->pps_pipe));
508
 
509
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
510
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
511
}
512
 
513
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
514
{
515
	struct drm_device *dev = dev_priv->dev;
516
	struct intel_encoder *encoder;
517
 
6937 serge 518
	if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
5354 serge 519
		return;
520
 
521
	/*
522
	 * We can't grab pps_mutex here due to deadlock with power_domain
523
	 * mutex when power_domain functions are called while holding pps_mutex.
524
	 * That also means that in order to use pps_pipe the code needs to
525
	 * hold both a power domain reference and pps_mutex, and the power domain
526
	 * reference get/put must be done while _not_ holding pps_mutex.
527
	 * pps_{lock,unlock}() do these steps in the correct order, so one
528
	 * should use them always.
529
	 */
530
 
6937 serge 531
	for_each_intel_encoder(dev, encoder) {
5354 serge 532
		struct intel_dp *intel_dp;
533
 
534
		if (encoder->type != INTEL_OUTPUT_EDP)
535
			continue;
536
 
537
		intel_dp = enc_to_intel_dp(&encoder->base);
538
		intel_dp->pps_pipe = INVALID_PIPE;
539
	}
540
}
541
 
6937 serge 542
static i915_reg_t
543
_pp_ctrl_reg(struct intel_dp *intel_dp)
4560 Serge 544
{
545
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
546
 
6084 serge 547
	if (IS_BROXTON(dev))
548
		return BXT_PP_CONTROL(0);
549
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 550
		return PCH_PP_CONTROL;
551
	else
552
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
553
}
554
 
6937 serge 555
static i915_reg_t
556
_pp_stat_reg(struct intel_dp *intel_dp)
4560 Serge 557
{
558
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
 
6084 serge 560
	if (IS_BROXTON(dev))
561
		return BXT_PP_STATUS(0);
562
	else if (HAS_PCH_SPLIT(dev))
4560 Serge 563
		return PCH_PP_STATUS;
564
	else
565
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566
}
567
 
5354 serge 568
#if 0
569
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570
   This function only applicable when panel PM state is not to be tracked */
571
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572
			      void *unused)
573
{
574
	struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575
						 edp_notifier);
576
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 578
 
5354 serge 579
	if (!is_edp(intel_dp) || code != SYS_RESTART)
580
		return 0;
581
 
582
	pps_lock(intel_dp);
583
 
6937 serge 584
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 585
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
6937 serge 586
		i915_reg_t pp_ctrl_reg, pp_div_reg;
6084 serge 587
		u32 pp_div;
5354 serge 588
 
589
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590
		pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
591
		pp_div = I915_READ(pp_div_reg);
592
		pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
 
594
		/* 0x1F write to PP_DIV_REG sets max cycle delay */
595
		I915_WRITE(pp_div_reg, pp_div | 0x1F);
596
		I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597
		msleep(intel_dp->panel_power_cycle_delay);
598
	}
599
 
600
	pps_unlock(intel_dp);
601
 
602
	return 0;
603
}
604
#endif
605
 
5060 serge 606
static bool edp_have_panel_power(struct intel_dp *intel_dp)
2342 Serge 607
{
3243 Serge 608
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 609
	struct drm_i915_private *dev_priv = dev->dev_private;
610
 
5354 serge 611
	lockdep_assert_held(&dev_priv->pps_mutex);
612
 
6937 serge 613
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5354 serge 614
	    intel_dp->pps_pipe == INVALID_PIPE)
615
		return false;
616
 
4560 Serge 617
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
2342 Serge 618
}
619
 
5060 serge 620
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
2342 Serge 621
{
3243 Serge 622
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 623
	struct drm_i915_private *dev_priv = dev->dev_private;
624
 
5354 serge 625
	lockdep_assert_held(&dev_priv->pps_mutex);
626
 
6937 serge 627
	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5354 serge 628
	    intel_dp->pps_pipe == INVALID_PIPE)
629
		return false;
630
 
631
	return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
2342 Serge 632
}
633
 
634
static void
635
intel_dp_check_edp(struct intel_dp *intel_dp)
636
{
3243 Serge 637
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 638
	struct drm_i915_private *dev_priv = dev->dev_private;
639
 
640
	if (!is_edp(intel_dp))
641
		return;
3746 Serge 642
 
5060 serge 643
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
2342 Serge 644
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
645
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
4560 Serge 646
			      I915_READ(_pp_stat_reg(intel_dp)),
647
			      I915_READ(_pp_ctrl_reg(intel_dp)));
2342 Serge 648
	}
649
}
650
 
3480 Serge 651
static uint32_t
652
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653
{
654
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655
	struct drm_device *dev = intel_dig_port->base.base.dev;
656
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 657
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
3480 Serge 658
	uint32_t status;
659
	bool done;
660
 
661
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662
	if (has_aux_irq)
663
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
4104 Serge 664
					  msecs_to_jiffies_timeout(10));
3480 Serge 665
	else
666
		done = wait_for_atomic(C, 10) == 0;
667
	if (!done)
668
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
669
			  has_aux_irq);
670
#undef C
671
 
672
	return status;
673
}
674
 
5060 serge 675
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
4104 Serge 676
{
677
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
678
	struct drm_device *dev = intel_dig_port->base.base.dev;
679
 
5060 serge 680
	/*
681
	 * The clock divider is based off the hrawclk, and would like to run at
682
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
4104 Serge 683
	 */
6937 serge 684
	return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
5060 serge 685
}
686
 
687
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
688
{
689
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
690
	struct drm_device *dev = intel_dig_port->base.base.dev;
6084 serge 691
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 692
 
693
	if (index)
694
		return 0;
695
 
696
	if (intel_dig_port->port == PORT_A) {
6937 serge 697
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
6084 serge 698
 
5060 serge 699
	} else {
6937 serge 700
		return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
5060 serge 701
	}
702
}
703
 
704
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705
{
706
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707
	struct drm_device *dev = intel_dig_port->base.base.dev;
708
	struct drm_i915_private *dev_priv = dev->dev_private;
709
 
710
	if (intel_dig_port->port == PORT_A) {
711
		if (index)
712
			return 0;
6084 serge 713
		return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
6937 serge 714
	} else if (HAS_PCH_LPT_H(dev_priv)) {
4104 Serge 715
		/* Workaround for non-ULT HSW */
716
		switch (index) {
717
		case 0: return 63;
718
		case 1: return 72;
719
		default: return 0;
720
		}
5060 serge 721
	} else  {
6937 serge 722
		return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
4104 Serge 723
	}
724
}
725
 
5060 serge 726
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
727
{
728
	return index ? 0 : 100;
729
}
730
 
5354 serge 731
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732
{
733
	/*
734
	 * SKL doesn't need us to program the AUX clock divider (Hardware will
735
	 * derive the clock from CDCLK automatically). We still implement the
736
	 * get_aux_clock_divider vfunc to plug-in into the existing code.
737
	 */
738
	return index ? 0 : 1;
739
}
740
 
5060 serge 741
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
742
				      bool has_aux_irq,
743
				      int send_bytes,
744
				      uint32_t aux_clock_divider)
745
{
746
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
747
	struct drm_device *dev = intel_dig_port->base.base.dev;
748
	uint32_t precharge, timeout;
749
 
750
	if (IS_GEN6(dev))
751
		precharge = 3;
752
	else
753
		precharge = 5;
754
 
6937 serge 755
	if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5060 serge 756
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
757
	else
758
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
759
 
760
	return DP_AUX_CH_CTL_SEND_BUSY |
761
	       DP_AUX_CH_CTL_DONE |
762
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
763
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
764
	       timeout |
765
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
766
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
767
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
768
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769
}
770
 
5354 serge 771
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
772
				      bool has_aux_irq,
773
				      int send_bytes,
774
				      uint32_t unused)
775
{
776
	return DP_AUX_CH_CTL_SEND_BUSY |
777
	       DP_AUX_CH_CTL_DONE |
778
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
779
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
780
	       DP_AUX_CH_CTL_TIME_OUT_1600us |
781
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
782
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
783
	       DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784
}
785
 
2330 Serge 786
static int
787
intel_dp_aux_ch(struct intel_dp *intel_dp,
5354 serge 788
		const uint8_t *send, int send_bytes,
2330 Serge 789
		uint8_t *recv, int recv_size)
790
{
3243 Serge 791
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
792
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 793
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 794
	i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
4104 Serge 795
	uint32_t aux_clock_divider;
3480 Serge 796
	int i, ret, recv_bytes;
2330 Serge 797
	uint32_t status;
5060 serge 798
	int try, clock = 0;
799
	bool has_aux_irq = HAS_AUX_IRQ(dev);
800
	bool vdd;
2330 Serge 801
 
5354 serge 802
	pps_lock(intel_dp);
5060 serge 803
 
5354 serge 804
	/*
805
	 * We will be called with VDD already enabled for dpcd/edid/oui reads.
806
	 * In such cases we want to leave VDD enabled and it's up to upper layers
807
	 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
808
	 * ourselves.
809
	 */
810
	vdd = edp_panel_vdd_on(intel_dp);
811
 
3480 Serge 812
	/* dp aux is extremely sensitive to irq latency, hence request the
813
	 * lowest possible wakeup latency and so prevent the cpu from going into
814
	 * deep sleep states.
815
	 */
816
 
2342 Serge 817
	intel_dp_check_edp(intel_dp);
2330 Serge 818
 
819
	/* Try to wait for any previous AUX channel activity */
820
	for (try = 0; try < 3; try++) {
3480 Serge 821
		status = I915_READ_NOTRACE(ch_ctl);
2330 Serge 822
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823
			break;
824
		msleep(1);
825
	}
826
 
827
	if (try == 3) {
6084 serge 828
		static u32 last_status = -1;
829
		const u32 status = I915_READ(ch_ctl);
830
 
831
		if (status != last_status) {
832
			WARN(1, "dp_aux_ch not started status 0x%08x\n",
833
			     status);
834
			last_status = status;
835
		}
836
 
3480 Serge 837
		ret = -EBUSY;
838
		goto out;
2330 Serge 839
	}
840
 
4560 Serge 841
	/* Only 5 data registers! */
842
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843
		ret = -E2BIG;
844
		goto out;
845
	}
846
 
5060 serge 847
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849
							  has_aux_irq,
850
							  send_bytes,
851
							  aux_clock_divider);
852
 
6084 serge 853
		/* Must try at least 3 times according to DP spec */
854
		for (try = 0; try < 5; try++) {
855
			/* Load the send data into the aux channel data registers */
856
			for (i = 0; i < send_bytes; i += 4)
6937 serge 857
				I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
5354 serge 858
					   intel_dp_pack_aux(send + i,
859
							     send_bytes - i));
2330 Serge 860
 
6084 serge 861
			/* Send the command and wait for it to complete */
5060 serge 862
			I915_WRITE(ch_ctl, send_ctl);
2330 Serge 863
 
6084 serge 864
			status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
3480 Serge 865
 
6084 serge 866
			/* Clear done status and any errors */
867
			I915_WRITE(ch_ctl,
868
				   status |
869
				   DP_AUX_CH_CTL_DONE |
870
				   DP_AUX_CH_CTL_TIME_OUT_ERROR |
871
				   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 872
 
6084 serge 873
			if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874
				continue;
875
 
876
			/* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877
			 *   400us delay required for errors and timeouts
878
			 *   Timeout errors from the HW already meet this
879
			 *   requirement so skip to next iteration
880
			 */
881
			if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882
				usleep_range(400, 500);
883
				continue;
884
			}
885
			if (status & DP_AUX_CH_CTL_DONE)
886
				goto done;
887
		}
2330 Serge 888
	}
889
 
890
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
3480 Serge 892
		ret = -EBUSY;
893
		goto out;
2330 Serge 894
	}
895
 
6084 serge 896
done:
2330 Serge 897
	/* Check for timeout or receive error.
898
	 * Timeouts occur when the sink is not connected
899
	 */
900
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
3480 Serge 902
		ret = -EIO;
903
		goto out;
2330 Serge 904
	}
905
 
906
	/* Timeouts occur when the device isn't connected, so they're
907
	 * "normal" -- don't fill the kernel log with these */
908
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
3480 Serge 910
		ret = -ETIMEDOUT;
911
		goto out;
2330 Serge 912
	}
913
 
914
	/* Unload any bytes sent back from the other side */
915
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
6937 serge 917
 
918
	/*
919
	 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920
	 * We have no idea of what happened so we return -EBUSY so
921
	 * drm layer takes care for the necessary retries.
922
	 */
923
	if (recv_bytes == 0 || recv_bytes > 20) {
924
		DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925
			      recv_bytes);
926
		/*
927
		 * FIXME: This patch was created on top of a series that
928
		 * organize the retries at drm level. There EBUSY should
929
		 * also take care for 1ms wait before retrying.
930
		 * That aux retries re-org is still needed and after that is
931
		 * merged we remove this sleep from here.
932
		 */
933
		usleep_range(1000, 1500);
934
		ret = -EBUSY;
935
		goto out;
936
	}
937
 
2330 Serge 938
	if (recv_bytes > recv_size)
939
		recv_bytes = recv_size;
940
 
941
	for (i = 0; i < recv_bytes; i += 4)
6937 serge 942
		intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
6084 serge 943
				    recv + i, recv_bytes - i);
2330 Serge 944
 
3480 Serge 945
	ret = recv_bytes;
946
out:
947
 
5060 serge 948
	if (vdd)
949
		edp_panel_vdd_off(intel_dp, false);
950
 
5354 serge 951
	pps_unlock(intel_dp);
952
 
3480 Serge 953
	return ret;
2330 Serge 954
}
955
 
5060 serge 956
#define BARE_ADDRESS_SIZE	3
957
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
958
static ssize_t
959
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
2330 Serge 960
{
5060 serge 961
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
962
	uint8_t txbuf[20], rxbuf[20];
963
	size_t txsize, rxsize;
2330 Serge 964
	int ret;
965
 
6084 serge 966
	txbuf[0] = (msg->request << 4) |
967
		((msg->address >> 16) & 0xf);
968
	txbuf[1] = (msg->address >> 8) & 0xff;
5060 serge 969
	txbuf[2] = msg->address & 0xff;
970
	txbuf[3] = msg->size - 1;
971
 
972
	switch (msg->request & ~DP_AUX_I2C_MOT) {
973
	case DP_AUX_NATIVE_WRITE:
974
	case DP_AUX_I2C_WRITE:
6084 serge 975
	case DP_AUX_I2C_WRITE_STATUS_UPDATE:
5060 serge 976
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
6084 serge 977
		rxsize = 2; /* 0 or 1 data bytes */
5060 serge 978
 
979
		if (WARN_ON(txsize > 20))
6084 serge 980
			return -E2BIG;
4560 Serge 981
 
5060 serge 982
		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
983
 
984
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985
		if (ret > 0) {
986
			msg->reply = rxbuf[0] >> 4;
987
 
6084 serge 988
			if (ret > 1) {
989
				/* Number of bytes written in a short write. */
990
				ret = clamp_t(int, rxbuf[1], 0, msg->size);
991
			} else {
992
				/* Return payload size. */
993
				ret = msg->size;
994
			}
5060 serge 995
		}
6084 serge 996
		break;
2330 Serge 997
 
5060 serge 998
	case DP_AUX_NATIVE_READ:
999
	case DP_AUX_I2C_READ:
1000
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1001
		rxsize = msg->size + 1;
2330 Serge 1002
 
5060 serge 1003
		if (WARN_ON(rxsize > 20))
6084 serge 1004
			return -E2BIG;
4560 Serge 1005
 
5060 serge 1006
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1007
		if (ret > 0) {
1008
			msg->reply = rxbuf[0] >> 4;
1009
			/*
1010
			 * Assume happy day, and copy the data. The caller is
1011
			 * expected to check msg->reply before touching it.
1012
			 *
1013
			 * Return payload size.
1014
			 */
1015
			ret--;
1016
			memcpy(msg->buffer, rxbuf + 1, ret);
1017
		}
1018
		break;
2330 Serge 1019
 
5060 serge 1020
	default:
1021
		ret = -EINVAL;
1022
		break;
1023
	}
2330 Serge 1024
 
6084 serge 1025
	return ret;
2330 Serge 1026
}
1027
 
6937 serge 1028
static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1029
				       enum port port)
2330 Serge 1030
{
6937 serge 1031
	switch (port) {
1032
	case PORT_B:
1033
	case PORT_C:
1034
	case PORT_D:
1035
		return DP_AUX_CH_CTL(port);
1036
	default:
1037
		MISSING_CASE(port);
1038
		return DP_AUX_CH_CTL(PORT_B);
1039
	}
1040
}
2330 Serge 1041
 
6937 serge 1042
static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1043
					enum port port, int index)
1044
{
1045
	switch (port) {
1046
	case PORT_B:
1047
	case PORT_C:
1048
	case PORT_D:
1049
		return DP_AUX_CH_DATA(port, index);
1050
	default:
1051
		MISSING_CASE(port);
1052
		return DP_AUX_CH_DATA(PORT_B, index);
1053
	}
1054
}
1055
 
1056
static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1057
				       enum port port)
1058
{
1059
	switch (port) {
1060
	case PORT_A:
1061
		return DP_AUX_CH_CTL(port);
1062
	case PORT_B:
1063
	case PORT_C:
1064
	case PORT_D:
1065
		return PCH_DP_AUX_CH_CTL(port);
1066
	default:
1067
		MISSING_CASE(port);
1068
		return DP_AUX_CH_CTL(PORT_A);
1069
	}
1070
}
1071
 
1072
static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1073
					enum port port, int index)
1074
{
1075
	switch (port) {
1076
	case PORT_A:
1077
		return DP_AUX_CH_DATA(port, index);
1078
	case PORT_B:
1079
	case PORT_C:
1080
	case PORT_D:
1081
		return PCH_DP_AUX_CH_DATA(port, index);
1082
	default:
1083
		MISSING_CASE(port);
1084
		return DP_AUX_CH_DATA(PORT_A, index);
1085
	}
1086
}
1087
 
1088
/*
1089
 * On SKL we don't have Aux for port E so we rely
1090
 * on VBT to set a proper alternate aux channel.
1091
 */
1092
static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1093
{
1094
	const struct ddi_vbt_port_info *info =
1095
		&dev_priv->vbt.ddi_port_info[PORT_E];
1096
 
6084 serge 1097
		switch (info->alternate_aux_channel) {
6937 serge 1098
	case DP_AUX_A:
1099
		return PORT_A;
6084 serge 1100
		case DP_AUX_B:
6937 serge 1101
		return PORT_B;
6084 serge 1102
		case DP_AUX_C:
6937 serge 1103
		return PORT_C;
6084 serge 1104
		case DP_AUX_D:
6937 serge 1105
		return PORT_D;
1106
	default:
1107
		MISSING_CASE(info->alternate_aux_channel);
1108
		return PORT_A;
1109
	}
1110
}
1111
 
1112
static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1113
				       enum port port)
1114
{
1115
	if (port == PORT_E)
1116
		port = skl_porte_aux_port(dev_priv);
1117
 
1118
	switch (port) {
1119
	case PORT_A:
1120
	case PORT_B:
1121
	case PORT_C:
1122
	case PORT_D:
1123
		return DP_AUX_CH_CTL(port);
6084 serge 1124
		default:
6937 serge 1125
		MISSING_CASE(port);
1126
		return DP_AUX_CH_CTL(PORT_A);
6084 serge 1127
		}
1128
	}
1129
 
6937 serge 1130
static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1131
					enum port port, int index)
1132
{
1133
	if (port == PORT_E)
1134
		port = skl_porte_aux_port(dev_priv);
1135
 
5060 serge 1136
	switch (port) {
1137
	case PORT_A:
1138
	case PORT_B:
1139
	case PORT_C:
1140
	case PORT_D:
6937 serge 1141
		return DP_AUX_CH_DATA(port, index);
2330 Serge 1142
	default:
6937 serge 1143
		MISSING_CASE(port);
1144
		return DP_AUX_CH_DATA(PORT_A, index);
2330 Serge 1145
	}
6937 serge 1146
}
2330 Serge 1147
 
6937 serge 1148
static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1149
					 enum port port)
1150
{
1151
	if (INTEL_INFO(dev_priv)->gen >= 9)
1152
		return skl_aux_ctl_reg(dev_priv, port);
1153
	else if (HAS_PCH_SPLIT(dev_priv))
1154
		return ilk_aux_ctl_reg(dev_priv, port);
1155
	else
1156
		return g4x_aux_ctl_reg(dev_priv, port);
1157
}
2330 Serge 1158
 
6937 serge 1159
static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1160
					  enum port port, int index)
1161
{
1162
	if (INTEL_INFO(dev_priv)->gen >= 9)
1163
		return skl_aux_data_reg(dev_priv, port, index);
1164
	else if (HAS_PCH_SPLIT(dev_priv))
1165
		return ilk_aux_data_reg(dev_priv, port, index);
1166
	else
1167
		return g4x_aux_data_reg(dev_priv, port, index);
1168
}
1169
 
1170
static void intel_aux_reg_init(struct intel_dp *intel_dp)
1171
{
1172
	struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1173
	enum port port = dp_to_dig_port(intel_dp)->port;
1174
	int i;
1175
 
1176
	intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1177
	for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1178
		intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1179
}
1180
 
1181
static void
1182
intel_dp_aux_fini(struct intel_dp *intel_dp)
1183
{
1184
	drm_dp_aux_unregister(&intel_dp->aux);
1185
	kfree(intel_dp->aux.name);
1186
}
1187
 
1188
static int
1189
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1190
{
1191
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1192
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1193
	enum port port = intel_dig_port->port;
1194
	int ret;
1195
 
1196
	intel_aux_reg_init(intel_dp);
1197
 
1198
	intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1199
	if (!intel_dp->aux.name)
1200
		return -ENOMEM;
1201
 
5060 serge 1202
	intel_dp->aux.dev = dev->dev;
1203
	intel_dp->aux.transfer = intel_dp_aux_transfer;
2330 Serge 1204
 
6937 serge 1205
	DRM_DEBUG_KMS("registering %s bus for %s\n",
1206
		      intel_dp->aux.name,
1207
		      connector->base.kdev->kobj.name);
5060 serge 1208
 
1209
	ret = drm_dp_aux_register(&intel_dp->aux);
6084 serge 1210
	if (ret < 0) {
5060 serge 1211
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
6937 serge 1212
			  intel_dp->aux.name, ret);
1213
		kfree(intel_dp->aux.name);
1214
		return ret;
2330 Serge 1215
	}
6103 serge 1216
 
1217
	ret = sysfs_create_link(&connector->base.kdev->kobj,
1218
				&intel_dp->aux.ddc.dev.kobj,
1219
				intel_dp->aux.ddc.dev.kobj.name);
1220
	if (ret < 0) {
6937 serge 1221
		DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1222
			  intel_dp->aux.name, ret);
1223
		intel_dp_aux_fini(intel_dp);
1224
		return ret;
6103 serge 1225
	}
6937 serge 1226
 
1227
	return 0;
5060 serge 1228
}
2330 Serge 1229
 
5060 serge 1230
static void
1231
intel_dp_connector_unregister(struct intel_connector *intel_connector)
1232
{
1233
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
4560 Serge 1234
 
6103 serge 1235
	if (!intel_connector->mst_port)
1236
		sysfs_remove_link(&intel_connector->base.kdev->kobj,
1237
				  intel_dp->aux.ddc.dev.kobj.name);
5060 serge 1238
	intel_connector_unregister(intel_connector);
2330 Serge 1239
}
1240
 
5060 serge 1241
static void
6084 serge 1242
skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5354 serge 1243
{
1244
	u32 ctrl1;
1245
 
6084 serge 1246
	memset(&pipe_config->dpll_hw_state, 0,
1247
	       sizeof(pipe_config->dpll_hw_state));
1248
 
5354 serge 1249
	pipe_config->ddi_pll_sel = SKL_DPLL0;
1250
	pipe_config->dpll_hw_state.cfgcr1 = 0;
1251
	pipe_config->dpll_hw_state.cfgcr2 = 0;
1252
 
1253
	ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
6084 serge 1254
	switch (pipe_config->port_clock / 2) {
1255
	case 81000:
1256
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5354 serge 1257
					      SKL_DPLL0);
1258
		break;
6084 serge 1259
	case 135000:
1260
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5354 serge 1261
					      SKL_DPLL0);
1262
		break;
6084 serge 1263
	case 270000:
1264
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5354 serge 1265
					      SKL_DPLL0);
1266
		break;
6084 serge 1267
	case 162000:
1268
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1269
					      SKL_DPLL0);
1270
		break;
1271
	/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1272
	results in CDCLK change. Need to handle the change of CDCLK by
1273
	disabling pipes and re-enabling them */
1274
	case 108000:
1275
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1276
					      SKL_DPLL0);
1277
		break;
1278
	case 216000:
1279
		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1280
					      SKL_DPLL0);
1281
		break;
1282
 
5354 serge 1283
	}
1284
	pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1285
}
1286
 
6084 serge 1287
void
1288
hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
2330 Serge 1289
{
6084 serge 1290
	memset(&pipe_config->dpll_hw_state, 0,
1291
	       sizeof(pipe_config->dpll_hw_state));
1292
 
1293
	switch (pipe_config->port_clock / 2) {
1294
	case 81000:
5060 serge 1295
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1296
		break;
6084 serge 1297
	case 135000:
5060 serge 1298
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1299
		break;
6084 serge 1300
	case 270000:
5060 serge 1301
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1302
		break;
1303
	}
2330 Serge 1304
}
1305
 
6084 serge 1306
static int
1307
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1308
{
1309
	if (intel_dp->num_sink_rates) {
1310
		*sink_rates = intel_dp->sink_rates;
1311
		return intel_dp->num_sink_rates;
1312
	}
1313
 
1314
	*sink_rates = default_rates;
1315
 
1316
	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1317
}
1318
 
6937 serge 1319
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
6084 serge 1320
{
6937 serge 1321
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1322
	struct drm_device *dev = dig_port->base.base.dev;
1323
 
6084 serge 1324
	/* WaDisableHBR2:skl */
6937 serge 1325
	if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
6084 serge 1326
		return false;
1327
 
1328
	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1329
	    (INTEL_INFO(dev)->gen >= 9))
1330
		return true;
1331
	else
1332
		return false;
1333
}
1334
 
1335
static int
6937 serge 1336
intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
6084 serge 1337
{
6937 serge 1338
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1339
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 1340
	int size;
1341
 
1342
	if (IS_BROXTON(dev)) {
1343
		*source_rates = bxt_rates;
1344
		size = ARRAY_SIZE(bxt_rates);
6937 serge 1345
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
6084 serge 1346
		*source_rates = skl_rates;
1347
		size = ARRAY_SIZE(skl_rates);
1348
	} else {
1349
		*source_rates = default_rates;
1350
		size = ARRAY_SIZE(default_rates);
1351
	}
1352
 
1353
	/* This depends on the fact that 5.4 is last value in the array */
6937 serge 1354
	if (!intel_dp_source_supports_hbr2(intel_dp))
6084 serge 1355
		size--;
1356
 
1357
	return size;
1358
}
1359
 
4104 Serge 1360
static void
1361
intel_dp_set_clock(struct intel_encoder *encoder,
6084 serge 1362
		   struct intel_crtc_state *pipe_config)
4104 Serge 1363
{
1364
	struct drm_device *dev = encoder->base.dev;
4560 Serge 1365
	const struct dp_link_dpll *divisor = NULL;
1366
	int i, count = 0;
4104 Serge 1367
 
1368
	if (IS_G4X(dev)) {
4560 Serge 1369
		divisor = gen4_dpll;
1370
		count = ARRAY_SIZE(gen4_dpll);
4104 Serge 1371
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 1372
		divisor = pch_dpll;
1373
		count = ARRAY_SIZE(pch_dpll);
5060 serge 1374
	} else if (IS_CHERRYVIEW(dev)) {
1375
		divisor = chv_dpll;
1376
		count = ARRAY_SIZE(chv_dpll);
4560 Serge 1377
	} else if (IS_VALLEYVIEW(dev)) {
1378
		divisor = vlv_dpll;
1379
		count = ARRAY_SIZE(vlv_dpll);
5060 serge 1380
	}
4560 Serge 1381
 
1382
	if (divisor && count) {
1383
		for (i = 0; i < count; i++) {
6084 serge 1384
			if (pipe_config->port_clock == divisor[i].clock) {
4560 Serge 1385
				pipe_config->dpll = divisor[i].dpll;
5060 serge 1386
				pipe_config->clock_set = true;
4560 Serge 1387
				break;
1388
			}
1389
		}
4104 Serge 1390
	}
1391
}
1392
 
6084 serge 1393
static int intersect_rates(const int *source_rates, int source_len,
1394
			   const int *sink_rates, int sink_len,
1395
			   int *common_rates)
1396
{
1397
	int i = 0, j = 0, k = 0;
1398
 
1399
	while (i < source_len && j < sink_len) {
1400
		if (source_rates[i] == sink_rates[j]) {
1401
			if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1402
				return k;
1403
			common_rates[k] = source_rates[i];
1404
			++k;
1405
			++i;
1406
			++j;
1407
		} else if (source_rates[i] < sink_rates[j]) {
1408
			++i;
1409
		} else {
1410
			++j;
1411
		}
1412
	}
1413
	return k;
1414
}
1415
 
1416
static int intel_dp_common_rates(struct intel_dp *intel_dp,
1417
				 int *common_rates)
1418
{
1419
	const int *source_rates, *sink_rates;
1420
	int source_len, sink_len;
1421
 
1422
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
6937 serge 1423
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
6084 serge 1424
 
1425
	return intersect_rates(source_rates, source_len,
1426
			       sink_rates, sink_len,
1427
			       common_rates);
1428
}
1429
 
1430
static void snprintf_int_array(char *str, size_t len,
1431
			       const int *array, int nelem)
1432
{
1433
	int i;
1434
 
1435
	str[0] = '\0';
1436
 
1437
	for (i = 0; i < nelem; i++) {
1438
		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1439
		if (r >= len)
1440
			return;
1441
		str += r;
1442
		len -= r;
1443
	}
1444
}
1445
 
1446
static void intel_dp_print_rates(struct intel_dp *intel_dp)
1447
{
1448
	const int *source_rates, *sink_rates;
1449
	int source_len, sink_len, common_len;
1450
	int common_rates[DP_MAX_SUPPORTED_RATES];
1451
	char str[128]; /* FIXME: too big for stack? */
1452
 
1453
	if ((drm_debug & DRM_UT_KMS) == 0)
1454
		return;
1455
 
6937 serge 1456
	source_len = intel_dp_source_rates(intel_dp, &source_rates);
6084 serge 1457
	snprintf_int_array(str, sizeof(str), source_rates, source_len);
1458
	DRM_DEBUG_KMS("source rates: %s\n", str);
1459
 
1460
	sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1461
	snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1462
	DRM_DEBUG_KMS("sink rates: %s\n", str);
1463
 
1464
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1465
	snprintf_int_array(str, sizeof(str), common_rates, common_len);
1466
	DRM_DEBUG_KMS("common rates: %s\n", str);
1467
}
1468
 
1469
static int rate_to_index(int find, const int *rates)
1470
{
1471
	int i = 0;
1472
 
1473
	for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1474
		if (find == rates[i])
1475
			break;
1476
 
1477
	return i;
1478
}
1479
 
1480
int
1481
intel_dp_max_link_rate(struct intel_dp *intel_dp)
1482
{
1483
	int rates[DP_MAX_SUPPORTED_RATES] = {};
1484
	int len;
1485
 
1486
	len = intel_dp_common_rates(intel_dp, rates);
1487
	if (WARN_ON(len <= 0))
1488
		return 162000;
1489
 
1490
	return rates[rate_to_index(0, rates) - 1];
1491
}
1492
 
1493
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1494
{
1495
	return rate_to_index(rate, intel_dp->sink_rates);
1496
}
1497
 
6937 serge 1498
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
6084 serge 1499
				  uint8_t *link_bw, uint8_t *rate_select)
1500
{
1501
	if (intel_dp->num_sink_rates) {
1502
		*link_bw = 0;
1503
		*rate_select =
1504
			intel_dp_rate_select(intel_dp, port_clock);
1505
	} else {
1506
		*link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1507
		*rate_select = 0;
1508
	}
1509
}
1510
 
3243 Serge 1511
bool
3746 Serge 1512
intel_dp_compute_config(struct intel_encoder *encoder,
6084 serge 1513
			struct intel_crtc_state *pipe_config)
2330 Serge 1514
{
3746 Serge 1515
	struct drm_device *dev = encoder->base.dev;
1516
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 1517
	struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
3746 Serge 1518
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1519
	enum port port = dp_to_dig_port(intel_dp)->port;
6084 serge 1520
	struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
3243 Serge 1521
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2330 Serge 1522
	int lane_count, clock;
5060 serge 1523
	int min_lane_count = 1;
1524
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
1525
	/* Conveniently, the link BW constants become indices with a shift...*/
1526
	int min_clock = 0;
6084 serge 1527
	int max_clock;
3031 serge 1528
	int bpp, mode_rate;
4104 Serge 1529
	int link_avail, link_clock;
6084 serge 1530
	int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1531
	int common_len;
1532
	uint8_t link_bw, rate_select;
2330 Serge 1533
 
6084 serge 1534
	common_len = intel_dp_common_rates(intel_dp, common_rates);
1535
 
1536
	/* No common link rates between source and sink */
1537
	WARN_ON(common_len <= 0);
1538
 
1539
	max_clock = common_len - 1;
1540
 
4104 Serge 1541
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
3746 Serge 1542
		pipe_config->has_pch_encoder = true;
1543
 
1544
	pipe_config->has_dp_encoder = true;
5354 serge 1545
	pipe_config->has_drrs = false;
6084 serge 1546
	pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
3746 Serge 1547
 
3243 Serge 1548
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1549
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1550
				       adjusted_mode);
6084 serge 1551
 
1552
		if (INTEL_INFO(dev)->gen >= 9) {
1553
			int ret;
1554
			ret = skl_update_scaler_crtc(pipe_config);
1555
			if (ret)
1556
				return ret;
1557
		}
1558
 
6937 serge 1559
		if (HAS_GMCH_DISPLAY(dev))
4104 Serge 1560
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
1561
						 intel_connector->panel.fitting_mode);
1562
		else
1563
			intel_pch_panel_fitting(intel_crtc, pipe_config,
1564
						intel_connector->panel.fitting_mode);
2330 Serge 1565
	}
1566
 
3031 serge 1567
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1568
		return false;
1569
 
1570
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
6084 serge 1571
		      "max bw %d pixel clock %iKHz\n",
1572
		      max_lane_count, common_rates[max_clock],
4560 Serge 1573
		      adjusted_mode->crtc_clock);
3031 serge 1574
 
3746 Serge 1575
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
1576
	 * bpc in between. */
4104 Serge 1577
	bpp = pipe_config->pipe_bpp;
5060 serge 1578
	if (is_edp(intel_dp)) {
3746 Serge 1579
 
6084 serge 1580
		/* Get bpp from vbt only for panels that dont have bpp in edid */
1581
		if (intel_connector->base.display_info.bpc == 0 &&
1582
			(dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1583
			DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1584
				      dev_priv->vbt.edp_bpp);
1585
			bpp = dev_priv->vbt.edp_bpp;
1586
		}
1587
 
5354 serge 1588
		/*
1589
		 * Use the maximum clock and number of lanes the eDP panel
1590
		 * advertizes being capable of. The panels are generally
1591
		 * designed to support only a single clock and lane
1592
		 * configuration, and typically these values correspond to the
1593
		 * native resolution of the panel.
1594
		 */
6084 serge 1595
		min_lane_count = max_lane_count;
5354 serge 1596
		min_clock = max_clock;
5060 serge 1597
	}
1598
 
3746 Serge 1599
	for (; bpp >= 6*3; bpp -= 2*3) {
4560 Serge 1600
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1601
						   bpp);
3746 Serge 1602
 
5060 serge 1603
		for (clock = min_clock; clock <= max_clock; clock++) {
6084 serge 1604
			for (lane_count = min_lane_count;
1605
				lane_count <= max_lane_count;
1606
				lane_count <<= 1) {
1607
 
1608
				link_clock = common_rates[clock];
3746 Serge 1609
				link_avail = intel_dp_max_data_rate(link_clock,
1610
								    lane_count);
1611
 
1612
				if (mode_rate <= link_avail) {
1613
					goto found;
1614
				}
1615
			}
1616
		}
1617
	}
1618
 
6084 serge 1619
	return false;
3031 serge 1620
 
3746 Serge 1621
found:
3480 Serge 1622
	if (intel_dp->color_range_auto) {
1623
		/*
1624
		 * See:
1625
		 * CEA-861-E - 5.1 Default Encoding Parameters
1626
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1627
		 */
6084 serge 1628
		pipe_config->limited_color_range =
1629
			bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1630
	} else {
1631
		pipe_config->limited_color_range =
1632
			intel_dp->limited_color_range;
3480 Serge 1633
	}
1634
 
6084 serge 1635
	pipe_config->lane_count = lane_count;
3480 Serge 1636
 
3746 Serge 1637
	pipe_config->pipe_bpp = bpp;
6084 serge 1638
	pipe_config->port_clock = common_rates[clock];
3746 Serge 1639
 
6084 serge 1640
	intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1641
			      &link_bw, &rate_select);
1642
 
1643
	DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1644
		      link_bw, rate_select, pipe_config->lane_count,
4104 Serge 1645
		      pipe_config->port_clock, bpp);
6084 serge 1646
	DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1647
		      mode_rate, link_avail);
2330 Serge 1648
 
3746 Serge 1649
	intel_link_compute_m_n(bpp, lane_count,
4560 Serge 1650
			       adjusted_mode->crtc_clock,
1651
			       pipe_config->port_clock,
3746 Serge 1652
			       &pipe_config->dp_m_n);
2330 Serge 1653
 
5060 serge 1654
	if (intel_connector->panel.downclock_mode != NULL &&
6084 serge 1655
		dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
5354 serge 1656
			pipe_config->has_drrs = true;
5060 serge 1657
			intel_link_compute_m_n(bpp, lane_count,
1658
				intel_connector->panel.downclock_mode->clock,
1659
				pipe_config->port_clock,
1660
				&pipe_config->dp_m2_n2);
1661
	}
1662
 
6937 serge 1663
	if ((IS_SKYLAKE(dev)  || IS_KABYLAKE(dev)) && is_edp(intel_dp))
6084 serge 1664
		skl_edp_set_pll_config(pipe_config);
1665
	else if (IS_BROXTON(dev))
1666
		/* handled in ddi */;
5354 serge 1667
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6084 serge 1668
		hsw_dp_set_ddi_pll_sel(pipe_config);
5060 serge 1669
	else
6084 serge 1670
		intel_dp_set_clock(encoder, pipe_config);
4104 Serge 1671
 
3746 Serge 1672
	return true;
2327 Serge 1673
}
1674
 
6084 serge 1675
void intel_dp_set_link_params(struct intel_dp *intel_dp,
1676
			      const struct intel_crtc_state *pipe_config)
1677
{
1678
	intel_dp->link_rate = pipe_config->port_clock;
1679
	intel_dp->lane_count = pipe_config->lane_count;
1680
}
1681
 
5060 serge 1682
static void intel_dp_prepare(struct intel_encoder *encoder)
2330 Serge 1683
{
4104 Serge 1684
	struct drm_device *dev = encoder->base.dev;
2342 Serge 1685
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 1686
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1687
	enum port port = dp_to_dig_port(intel_dp)->port;
1688
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6084 serge 1689
	const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
2327 Serge 1690
 
6084 serge 1691
	intel_dp_set_link_params(intel_dp, crtc->config);
1692
 
2342 Serge 1693
	/*
1694
	 * There are four kinds of DP registers:
1695
	 *
1696
	 * 	IBX PCH
1697
	 * 	SNB CPU
1698
	 *	IVB CPU
1699
	 * 	CPT PCH
1700
	 *
1701
	 * IBX PCH and CPU are the same for almost everything,
1702
	 * except that the CPU DP PLL is configured in this
1703
	 * register
1704
	 *
1705
	 * CPT PCH is quite different, having many bits moved
1706
	 * to the TRANS_DP_CTL register instead. That
1707
	 * configuration happens (oddly) in ironlake_pch_enable
1708
	 */
2327 Serge 1709
 
2342 Serge 1710
	/* Preserve the BIOS-computed detected bit. This is
1711
	 * supposed to be read-only.
1712
	 */
1713
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 1714
 
2342 Serge 1715
	/* Handle DP bits in common between all three register formats */
1716
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
6084 serge 1717
	intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
2342 Serge 1718
 
1719
	/* Split out the IBX/CPU vs CPT settings */
1720
 
6084 serge 1721
	if (IS_GEN7(dev) && port == PORT_A) {
2342 Serge 1722
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1723
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1724
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1725
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1726
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1727
 
4560 Serge 1728
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 1729
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1730
 
4104 Serge 1731
		intel_dp->DP |= crtc->pipe << 29;
6084 serge 1732
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1733
		u32 trans_dp;
2342 Serge 1734
 
6084 serge 1735
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1736
 
1737
		trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1738
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1739
			trans_dp |= TRANS_DP_ENH_FRAMING;
1740
		else
1741
			trans_dp &= ~TRANS_DP_ENH_FRAMING;
1742
		I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1743
	} else {
1744
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
6937 serge 1745
		    !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
6084 serge 1746
			intel_dp->DP |= DP_COLOR_RANGE_16_235;
1747
 
2342 Serge 1748
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1749
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1750
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1751
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1752
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1753
 
4560 Serge 1754
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
6084 serge 1755
			intel_dp->DP |= DP_ENHANCED_FRAMING;
2342 Serge 1756
 
6084 serge 1757
		if (IS_CHERRYVIEW(dev))
5060 serge 1758
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
6084 serge 1759
		else if (crtc->pipe == PIPE_B)
1760
			intel_dp->DP |= DP_PIPEB_SELECT;
2342 Serge 1761
	}
2330 Serge 1762
}
2327 Serge 1763
 
5060 serge 1764
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1765
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2342 Serge 1766
 
5060 serge 1767
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1768
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
2342 Serge 1769
 
5060 serge 1770
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1771
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2342 Serge 1772
 
5060 serge 1773
static void wait_panel_status(struct intel_dp *intel_dp,
2342 Serge 1774
				       u32 mask,
1775
				       u32 value)
1776
{
3243 Serge 1777
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 1778
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 1779
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2342 Serge 1780
 
5354 serge 1781
	lockdep_assert_held(&dev_priv->pps_mutex);
1782
 
4560 Serge 1783
	pp_stat_reg = _pp_stat_reg(intel_dp);
1784
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1785
 
2342 Serge 1786
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
6084 serge 1787
			mask, value,
3746 Serge 1788
			I915_READ(pp_stat_reg),
1789
			I915_READ(pp_ctrl_reg));
2342 Serge 1790
 
3746 Serge 1791
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
2342 Serge 1792
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
3746 Serge 1793
				I915_READ(pp_stat_reg),
1794
				I915_READ(pp_ctrl_reg));
2342 Serge 1795
	}
4560 Serge 1796
 
1797
	DRM_DEBUG_KMS("Wait complete\n");
2342 Serge 1798
}
1799
 
5060 serge 1800
static void wait_panel_on(struct intel_dp *intel_dp)
2342 Serge 1801
{
1802
	DRM_DEBUG_KMS("Wait for panel power on\n");
5060 serge 1803
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2342 Serge 1804
}
1805
 
5060 serge 1806
static void wait_panel_off(struct intel_dp *intel_dp)
2342 Serge 1807
{
1808
	DRM_DEBUG_KMS("Wait for panel power off time\n");
5060 serge 1809
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2342 Serge 1810
}
1811
 
5060 serge 1812
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2342 Serge 1813
{
1814
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
5060 serge 1815
 
1816
	/* When we disable the VDD override bit last we have to do the manual
1817
	 * wait. */
1818
	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1819
				       intel_dp->panel_power_cycle_delay);
1820
 
1821
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2342 Serge 1822
}
1823
 
5060 serge 1824
static void wait_backlight_on(struct intel_dp *intel_dp)
1825
{
1826
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1827
				       intel_dp->backlight_on_delay);
1828
}
2342 Serge 1829
 
5060 serge 1830
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1831
{
1832
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1833
				       intel_dp->backlight_off_delay);
1834
}
1835
 
2342 Serge 1836
/* Read the current pp_control value, unlocking the register if it
1837
 * is locked
1838
 */
1839
 
3746 Serge 1840
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2342 Serge 1841
{
3746 Serge 1842
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843
	struct drm_i915_private *dev_priv = dev->dev_private;
1844
	u32 control;
2342 Serge 1845
 
5354 serge 1846
	lockdep_assert_held(&dev_priv->pps_mutex);
1847
 
4560 Serge 1848
	control = I915_READ(_pp_ctrl_reg(intel_dp));
6084 serge 1849
	if (!IS_BROXTON(dev)) {
1850
		control &= ~PANEL_UNLOCK_MASK;
1851
		control |= PANEL_UNLOCK_REGS;
1852
	}
2342 Serge 1853
	return control;
1854
}
1855
 
5354 serge 1856
/*
1857
 * Must be paired with edp_panel_vdd_off().
1858
 * Must hold pps_mutex around the whole on/off sequence.
1859
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1860
 */
1861
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1862
{
3243 Serge 1863
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5060 serge 1864
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
2330 Serge 1866
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 1867
	enum intel_display_power_domain power_domain;
2330 Serge 1868
	u32 pp;
6937 serge 1869
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
5060 serge 1870
	bool need_to_disable = !intel_dp->want_panel_vdd;
2327 Serge 1871
 
5354 serge 1872
	lockdep_assert_held(&dev_priv->pps_mutex);
1873
 
2342 Serge 1874
	if (!is_edp(intel_dp))
5060 serge 1875
		return false;
2327 Serge 1876
 
6937 serge 1877
//	cancel_delayed_work(&intel_dp->panel_vdd_work);
2342 Serge 1878
	intel_dp->want_panel_vdd = true;
1879
 
5060 serge 1880
	if (edp_have_panel_vdd(intel_dp))
1881
		return need_to_disable;
2342 Serge 1882
 
6084 serge 1883
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 1884
	intel_display_power_get(dev_priv, power_domain);
4560 Serge 1885
 
5354 serge 1886
	DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1887
		      port_name(intel_dig_port->port));
4560 Serge 1888
 
5060 serge 1889
	if (!edp_have_panel_power(intel_dp))
1890
		wait_panel_power_cycle(intel_dp);
2342 Serge 1891
 
3746 Serge 1892
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1893
	pp |= EDP_FORCE_VDD;
2342 Serge 1894
 
4560 Serge 1895
	pp_stat_reg = _pp_stat_reg(intel_dp);
1896
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1897
 
1898
	I915_WRITE(pp_ctrl_reg, pp);
1899
	POSTING_READ(pp_ctrl_reg);
1900
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1901
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2342 Serge 1902
	/*
1903
	 * If the panel wasn't on, delay before accessing aux channel
1904
	 */
5060 serge 1905
	if (!edp_have_panel_power(intel_dp)) {
5354 serge 1906
		DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1907
			      port_name(intel_dig_port->port));
2342 Serge 1908
		msleep(intel_dp->panel_power_up_delay);
1909
	}
5060 serge 1910
 
1911
	return need_to_disable;
2330 Serge 1912
}
2327 Serge 1913
 
5354 serge 1914
/*
1915
 * Must be paired with intel_edp_panel_vdd_off() or
1916
 * intel_edp_panel_off().
1917
 * Nested calls to these functions are not allowed since
1918
 * we drop the lock. Caller must use some higher level
1919
 * locking to prevent nested calls from other threads.
1920
 */
5060 serge 1921
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1922
{
5354 serge 1923
	bool vdd;
5060 serge 1924
 
5354 serge 1925
	if (!is_edp(intel_dp))
1926
		return;
1927
 
1928
	pps_lock(intel_dp);
1929
	vdd = edp_panel_vdd_on(intel_dp);
1930
	pps_unlock(intel_dp);
1931
 
6084 serge 1932
	I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
5354 serge 1933
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 1934
}
1935
 
1936
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1937
{
3243 Serge 1938
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1939
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 1940
	struct intel_digital_port *intel_dig_port =
1941
		dp_to_dig_port(intel_dp);
1942
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943
	enum intel_display_power_domain power_domain;
2330 Serge 1944
	u32 pp;
6937 serge 1945
	i915_reg_t pp_stat_reg, pp_ctrl_reg;
2327 Serge 1946
 
5354 serge 1947
	lockdep_assert_held(&dev_priv->pps_mutex);
3480 Serge 1948
 
5354 serge 1949
	WARN_ON(intel_dp->want_panel_vdd);
5060 serge 1950
 
5354 serge 1951
	if (!edp_have_panel_vdd(intel_dp))
1952
		return;
4560 Serge 1953
 
5354 serge 1954
	DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1955
		      port_name(intel_dig_port->port));
1956
 
6084 serge 1957
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1958
	pp &= ~EDP_FORCE_VDD;
2327 Serge 1959
 
6084 serge 1960
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961
	pp_stat_reg = _pp_stat_reg(intel_dp);
3746 Serge 1962
 
6084 serge 1963
	I915_WRITE(pp_ctrl_reg, pp);
1964
	POSTING_READ(pp_ctrl_reg);
3746 Serge 1965
 
2330 Serge 1966
	/* Make sure sequencer is idle before allowing subsequent activity */
6084 serge 1967
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1968
	I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
4560 Serge 1969
 
6084 serge 1970
	if ((pp & POWER_TARGET_ON) == 0)
1971
		intel_dp->last_power_cycle = jiffies;
4560 Serge 1972
 
6084 serge 1973
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
1974
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 1975
}
2327 Serge 1976
 
5060 serge 1977
static void edp_panel_vdd_work(struct work_struct *__work)
3243 Serge 1978
{
3482 Serge 1979
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1980
						 struct intel_dp, panel_vdd_work);
1981
 
5354 serge 1982
	pps_lock(intel_dp);
1983
	if (!intel_dp->want_panel_vdd)
6084 serge 1984
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 1985
	pps_unlock(intel_dp);
3243 Serge 1986
}
2342 Serge 1987
 
5060 serge 1988
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2330 Serge 1989
{
5060 serge 1990
	unsigned long delay;
1991
 
1992
	/*
1993
	 * Queue the timer to fire a long time from now (relative to the power
1994
	 * down delay) to keep the panel power up across a sequence of
1995
	 * operations.
1996
	 */
1997
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
6320 serge 1998
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
5060 serge 1999
}
2000
 
5354 serge 2001
/*
2002
 * Must be paired with edp_panel_vdd_on().
2003
 * Must hold pps_mutex around the whole on/off sequence.
2004
 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2005
 */
5060 serge 2006
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2007
{
5354 serge 2008
	struct drm_i915_private *dev_priv =
2009
		intel_dp_to_dev(intel_dp)->dev_private;
2010
 
2011
	lockdep_assert_held(&dev_priv->pps_mutex);
2012
 
2342 Serge 2013
	if (!is_edp(intel_dp))
2014
		return;
2015
 
6084 serge 2016
	I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
5354 serge 2017
	     port_name(dp_to_dig_port(intel_dp)->port));
2342 Serge 2018
 
2019
	intel_dp->want_panel_vdd = false;
2020
 
5060 serge 2021
	if (sync)
2022
		edp_panel_vdd_off_sync(intel_dp);
2023
	else
2024
		edp_panel_vdd_schedule_off(intel_dp);
2342 Serge 2025
}
2026
 
5354 serge 2027
static void edp_panel_on(struct intel_dp *intel_dp)
2342 Serge 2028
{
3243 Serge 2029
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2030
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 2031
	u32 pp;
6937 serge 2032
	i915_reg_t pp_ctrl_reg;
2327 Serge 2033
 
5354 serge 2034
	lockdep_assert_held(&dev_priv->pps_mutex);
2035
 
2342 Serge 2036
	if (!is_edp(intel_dp))
2037
		return;
2327 Serge 2038
 
5354 serge 2039
	DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2040
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 2041
 
5354 serge 2042
	if (WARN(edp_have_panel_power(intel_dp),
2043
		 "eDP port %c panel power already on\n",
2044
		 port_name(dp_to_dig_port(intel_dp)->port)))
2342 Serge 2045
		return;
2046
 
5060 serge 2047
	wait_panel_power_cycle(intel_dp);
2342 Serge 2048
 
4560 Serge 2049
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2050
	pp = ironlake_get_pp_control(intel_dp);
2342 Serge 2051
	if (IS_GEN5(dev)) {
6084 serge 2052
		/* ILK workaround: disable reset around power sequence */
2053
		pp &= ~PANEL_POWER_RESET;
4560 Serge 2054
		I915_WRITE(pp_ctrl_reg, pp);
2055
		POSTING_READ(pp_ctrl_reg);
2342 Serge 2056
	}
2327 Serge 2057
 
2342 Serge 2058
	pp |= POWER_TARGET_ON;
2059
	if (!IS_GEN5(dev))
2060
		pp |= PANEL_POWER_RESET;
2061
 
3746 Serge 2062
	I915_WRITE(pp_ctrl_reg, pp);
2063
	POSTING_READ(pp_ctrl_reg);
2064
 
5060 serge 2065
	wait_panel_on(intel_dp);
2066
	intel_dp->last_power_on = jiffies;
2327 Serge 2067
 
2342 Serge 2068
	if (IS_GEN5(dev)) {
6084 serge 2069
		pp |= PANEL_POWER_RESET; /* restore panel reset bit */
4560 Serge 2070
		I915_WRITE(pp_ctrl_reg, pp);
2071
		POSTING_READ(pp_ctrl_reg);
2342 Serge 2072
	}
2330 Serge 2073
}
2327 Serge 2074
 
5354 serge 2075
void intel_edp_panel_on(struct intel_dp *intel_dp)
2330 Serge 2076
{
5354 serge 2077
	if (!is_edp(intel_dp))
2078
		return;
2079
 
2080
	pps_lock(intel_dp);
2081
	edp_panel_on(intel_dp);
2082
	pps_unlock(intel_dp);
2083
}
2084
 
2085
 
2086
static void edp_panel_off(struct intel_dp *intel_dp)
2087
{
5060 serge 2088
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3243 Serge 2090
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2091
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2092
	enum intel_display_power_domain power_domain;
2342 Serge 2093
	u32 pp;
6937 serge 2094
	i915_reg_t pp_ctrl_reg;
2327 Serge 2095
 
5354 serge 2096
	lockdep_assert_held(&dev_priv->pps_mutex);
2097
 
2342 Serge 2098
	if (!is_edp(intel_dp))
2099
		return;
2327 Serge 2100
 
5354 serge 2101
	DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2102
		      port_name(dp_to_dig_port(intel_dp)->port));
2327 Serge 2103
 
5354 serge 2104
	WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2105
	     port_name(dp_to_dig_port(intel_dp)->port));
5060 serge 2106
 
3746 Serge 2107
	pp = ironlake_get_pp_control(intel_dp);
3031 serge 2108
	/* We need to switch off panel power _and_ force vdd, for otherwise some
2109
	 * panels get very unhappy and cease to work. */
5060 serge 2110
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2111
		EDP_BLC_ENABLE);
2327 Serge 2112
 
4560 Serge 2113
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2114
 
5060 serge 2115
	intel_dp->want_panel_vdd = false;
2116
 
3746 Serge 2117
	I915_WRITE(pp_ctrl_reg, pp);
2118
	POSTING_READ(pp_ctrl_reg);
2119
 
5060 serge 2120
	intel_dp->last_power_cycle = jiffies;
2121
	wait_panel_off(intel_dp);
2122
 
2123
	/* We got a reference when we enabled the VDD. */
6084 serge 2124
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 2125
	intel_display_power_put(dev_priv, power_domain);
2330 Serge 2126
}
2327 Serge 2127
 
5354 serge 2128
void intel_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 2129
{
5354 serge 2130
	if (!is_edp(intel_dp))
2131
		return;
2132
 
2133
	pps_lock(intel_dp);
2134
	edp_panel_off(intel_dp);
2135
	pps_unlock(intel_dp);
2136
}
2137
 
2138
/* Enable backlight in the panel power control. */
2139
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2140
{
3243 Serge 2141
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2142
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2143
	struct drm_i915_private *dev_priv = dev->dev_private;
2144
	u32 pp;
6937 serge 2145
	i915_reg_t pp_ctrl_reg;
2327 Serge 2146
 
2330 Serge 2147
	/*
2148
	 * If we enable the backlight right away following a panel power
2149
	 * on, we may see slight flicker as the panel syncs with the eDP
2150
	 * link.  So delay a bit to make sure the image is solid before
2151
	 * allowing it to appear.
2152
	 */
5060 serge 2153
	wait_backlight_on(intel_dp);
5354 serge 2154
 
2155
	pps_lock(intel_dp);
2156
 
3746 Serge 2157
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2158
	pp |= EDP_BLC_ENABLE;
3243 Serge 2159
 
4560 Serge 2160
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2161
 
2162
	I915_WRITE(pp_ctrl_reg, pp);
2163
	POSTING_READ(pp_ctrl_reg);
5354 serge 2164
 
2165
	pps_unlock(intel_dp);
2330 Serge 2166
}
2327 Serge 2167
 
5354 serge 2168
/* Enable backlight PWM and backlight PP control. */
2169
void intel_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 2170
{
5354 serge 2171
	if (!is_edp(intel_dp))
2172
		return;
2173
 
2174
	DRM_DEBUG_KMS("\n");
2175
 
2176
	intel_panel_enable_backlight(intel_dp->attached_connector);
2177
	_intel_edp_backlight_on(intel_dp);
2178
}
2179
 
2180
/* Disable backlight in the panel power control. */
2181
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2182
{
3243 Serge 2183
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2184
	struct drm_i915_private *dev_priv = dev->dev_private;
2185
	u32 pp;
6937 serge 2186
	i915_reg_t pp_ctrl_reg;
2327 Serge 2187
 
2342 Serge 2188
	if (!is_edp(intel_dp))
2189
		return;
2190
 
5354 serge 2191
	pps_lock(intel_dp);
2192
 
3746 Serge 2193
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 2194
	pp &= ~EDP_BLC_ENABLE;
3746 Serge 2195
 
4560 Serge 2196
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 2197
 
2198
	I915_WRITE(pp_ctrl_reg, pp);
2199
	POSTING_READ(pp_ctrl_reg);
5354 serge 2200
 
2201
	pps_unlock(intel_dp);
2202
 
5060 serge 2203
	intel_dp->last_backlight_off = jiffies;
2204
	edp_wait_backlight_off(intel_dp);
5354 serge 2205
}
5060 serge 2206
 
5354 serge 2207
/* Disable backlight PP control and backlight PWM. */
2208
void intel_edp_backlight_off(struct intel_dp *intel_dp)
2209
{
2210
	if (!is_edp(intel_dp))
2211
		return;
2212
 
2213
	DRM_DEBUG_KMS("\n");
2214
 
2215
	_intel_edp_backlight_off(intel_dp);
5060 serge 2216
	intel_panel_disable_backlight(intel_dp->attached_connector);
2330 Serge 2217
}
2327 Serge 2218
 
5354 serge 2219
/*
2220
 * Hook for controlling the panel power control backlight through the bl_power
2221
 * sysfs attribute. Take care to handle multiple calls.
2222
 */
2223
static void intel_edp_backlight_power(struct intel_connector *connector,
2224
				      bool enable)
2225
{
2226
	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2227
	bool is_enabled;
2228
 
2229
	pps_lock(intel_dp);
2230
	is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2231
	pps_unlock(intel_dp);
2232
 
2233
	if (is_enabled == enable)
2234
		return;
2235
 
2236
	DRM_DEBUG_KMS("panel power control backlight %s\n",
2237
		      enable ? "enable" : "disable");
2238
 
2239
	if (enable)
2240
		_intel_edp_backlight_on(intel_dp);
2241
	else
2242
		_intel_edp_backlight_off(intel_dp);
2243
}
2244
 
6937 serge 2245
static const char *state_string(bool enabled)
2246
{
2247
	return enabled ? "on" : "off";
2248
}
2249
 
2250
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2251
{
2252
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2253
	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2254
	bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2255
 
2256
	I915_STATE_WARN(cur_state != state,
2257
			"DP port %c state assertion failure (expected %s, current %s)\n",
2258
			port_name(dig_port->port),
2259
			state_string(state), state_string(cur_state));
2260
}
2261
#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2262
 
2263
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2264
{
2265
	bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2266
 
2267
	I915_STATE_WARN(cur_state != state,
2268
			"eDP PLL state assertion failure (expected %s, current %s)\n",
2269
			state_string(state), state_string(cur_state));
2270
}
2271
#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2272
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2273
 
3031 serge 2274
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 2275
{
3243 Serge 2276
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6937 serge 2277
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2278
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2327 Serge 2279
 
6937 serge 2280
	assert_pipe_disabled(dev_priv, crtc->pipe);
2281
	assert_dp_port_disabled(intel_dp);
2282
	assert_edp_pll_disabled(dev_priv);
3031 serge 2283
 
6937 serge 2284
	DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2285
		      crtc->config->port_clock);
3031 serge 2286
 
6937 serge 2287
	intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2288
 
2289
	if (crtc->config->port_clock == 162000)
2290
		intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2291
	else
2292
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2293
 
2294
	I915_WRITE(DP_A, intel_dp->DP);
2295
	POSTING_READ(DP_A);
2296
	udelay(500);
2297
 
3031 serge 2298
	intel_dp->DP |= DP_PLL_ENABLE;
6937 serge 2299
 
3031 serge 2300
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2301
	POSTING_READ(DP_A);
2302
	udelay(200);
2303
}
2327 Serge 2304
 
3031 serge 2305
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 2306
{
3243 Serge 2307
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6937 serge 2308
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2309
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2327 Serge 2310
 
6937 serge 2311
	assert_pipe_disabled(dev_priv, crtc->pipe);
2312
	assert_dp_port_disabled(intel_dp);
2313
	assert_edp_pll_enabled(dev_priv);
3031 serge 2314
 
6937 serge 2315
	DRM_DEBUG_KMS("disabling eDP PLL\n");
3031 serge 2316
 
6937 serge 2317
	intel_dp->DP &= ~DP_PLL_ENABLE;
2318
 
2319
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 2320
	POSTING_READ(DP_A);
2321
	udelay(200);
2322
}
2327 Serge 2323
 
2330 Serge 2324
/* If the sink supports it, try to set the power state appropriately */
3243 Serge 2325
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2330 Serge 2326
{
2327
	int ret, i;
2327 Serge 2328
 
2330 Serge 2329
	/* Should have a valid DPCD by this point */
2330
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2331
		return;
2327 Serge 2332
 
2330 Serge 2333
	if (mode != DRM_MODE_DPMS_ON) {
5060 serge 2334
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2335
					 DP_SET_POWER_D3);
2330 Serge 2336
	} else {
2337
		/*
2338
		 * When turning on, we need to retry for 1ms to give the sink
2339
		 * time to wake up.
2340
		 */
2341
		for (i = 0; i < 3; i++) {
5060 serge 2342
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
6084 serge 2343
						 DP_SET_POWER_D0);
2330 Serge 2344
			if (ret == 1)
2345
				break;
2346
			msleep(1);
2347
		}
2348
	}
5354 serge 2349
 
2350
	if (ret != 1)
2351
		DRM_DEBUG_KMS("failed to %s sink power state\n",
2352
			      mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2330 Serge 2353
}
2327 Serge 2354
 
3031 serge 2355
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2356
				  enum pipe *pipe)
2330 Serge 2357
{
3031 serge 2358
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2359
	enum port port = dp_to_dig_port(intel_dp)->port;
3031 serge 2360
	struct drm_device *dev = encoder->base.dev;
2361
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2362
	enum intel_display_power_domain power_domain;
2363
	u32 tmp;
6937 serge 2364
	bool ret;
2327 Serge 2365
 
5060 serge 2366
	power_domain = intel_display_port_power_domain(encoder);
6937 serge 2367
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
5060 serge 2368
		return false;
2369
 
6937 serge 2370
	ret = false;
2371
 
5060 serge 2372
	tmp = I915_READ(intel_dp->output_reg);
2373
 
3031 serge 2374
	if (!(tmp & DP_PORT_EN))
6937 serge 2375
		goto out;
2342 Serge 2376
 
6084 serge 2377
	if (IS_GEN7(dev) && port == PORT_A) {
3031 serge 2378
		*pipe = PORT_TO_PIPE_CPT(tmp);
6084 serge 2379
	} else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2380
		enum pipe p;
2327 Serge 2381
 
6084 serge 2382
		for_each_pipe(dev_priv, p) {
2383
			u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2384
			if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2385
				*pipe = p;
6937 serge 2386
				ret = true;
2387
 
2388
				goto out;
3031 serge 2389
			}
2390
		}
3243 Serge 2391
 
2392
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
6937 serge 2393
			      i915_mmio_reg_offset(intel_dp->output_reg));
6084 serge 2394
	} else if (IS_CHERRYVIEW(dev)) {
2395
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
2396
	} else {
2397
		*pipe = PORT_TO_PIPE(tmp);
3031 serge 2398
	}
2399
 
6937 serge 2400
	ret = true;
2401
 
2402
out:
2403
	intel_display_power_put(dev_priv, power_domain);
2404
 
2405
	return ret;
2330 Serge 2406
}
2327 Serge 2407
 
4104 Serge 2408
static void intel_dp_get_config(struct intel_encoder *encoder,
6084 serge 2409
				struct intel_crtc_state *pipe_config)
4104 Serge 2410
{
2411
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2412
	u32 tmp, flags = 0;
2413
	struct drm_device *dev = encoder->base.dev;
2414
	struct drm_i915_private *dev_priv = dev->dev_private;
2415
	enum port port = dp_to_dig_port(intel_dp)->port;
2416
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2417
	int dotclock;
4104 Serge 2418
 
5060 serge 2419
	tmp = I915_READ(intel_dp->output_reg);
2420
 
6084 serge 2421
	pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2422
 
2423
	if (HAS_PCH_CPT(dev) && port != PORT_A) {
2424
		u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2425
 
2426
		if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
4104 Serge 2427
			flags |= DRM_MODE_FLAG_PHSYNC;
2428
		else
2429
			flags |= DRM_MODE_FLAG_NHSYNC;
2430
 
6084 serge 2431
		if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
4104 Serge 2432
			flags |= DRM_MODE_FLAG_PVSYNC;
2433
		else
2434
			flags |= DRM_MODE_FLAG_NVSYNC;
2435
	} else {
6084 serge 2436
		if (tmp & DP_SYNC_HS_HIGH)
4104 Serge 2437
			flags |= DRM_MODE_FLAG_PHSYNC;
2438
		else
2439
			flags |= DRM_MODE_FLAG_NHSYNC;
2440
 
6084 serge 2441
		if (tmp & DP_SYNC_VS_HIGH)
4104 Serge 2442
			flags |= DRM_MODE_FLAG_PVSYNC;
2443
		else
2444
			flags |= DRM_MODE_FLAG_NVSYNC;
2445
	}
2446
 
6084 serge 2447
	pipe_config->base.adjusted_mode.flags |= flags;
4104 Serge 2448
 
5139 serge 2449
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
6937 serge 2450
	    !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
5139 serge 2451
		pipe_config->limited_color_range = true;
2452
 
4560 Serge 2453
	pipe_config->has_dp_encoder = true;
2454
 
6084 serge 2455
	pipe_config->lane_count =
2456
		((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2457
 
4560 Serge 2458
	intel_dp_get_m_n(crtc, pipe_config);
2459
 
2460
	if (port == PORT_A) {
6937 serge 2461
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
4104 Serge 2462
			pipe_config->port_clock = 162000;
2463
		else
2464
			pipe_config->port_clock = 270000;
2465
	}
4280 Serge 2466
 
4560 Serge 2467
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2468
					    &pipe_config->dp_m_n);
2469
 
2470
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2471
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
2472
 
6084 serge 2473
	pipe_config->base.adjusted_mode.crtc_clock = dotclock;
4560 Serge 2474
 
4280 Serge 2475
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2476
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2477
		/*
2478
		 * This is a big fat ugly hack.
2479
		 *
2480
		 * Some machines in UEFI boot mode provide us a VBT that has 18
2481
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2482
		 * unknown we fail to light up. Yet the same BIOS boots up with
2483
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2484
		 * max, not what it tells us to use.
2485
		 *
2486
		 * Note: This will still be broken if the eDP panel is not lit
2487
		 * up by the BIOS, and thus we can't get the mode at module
2488
		 * load.
2489
		 */
2490
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2491
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2492
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2493
	}
4104 Serge 2494
}
2495
 
3031 serge 2496
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 2497
{
3031 serge 2498
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2499
	struct drm_device *dev = encoder->base.dev;
5354 serge 2500
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2327 Serge 2501
 
6084 serge 2502
	if (crtc->config->has_audio)
5354 serge 2503
		intel_audio_codec_disable(encoder);
2504
 
6084 serge 2505
	if (HAS_PSR(dev) && !HAS_DDI(dev))
2506
		intel_psr_disable(intel_dp);
2507
 
3031 serge 2508
	/* Make sure the panel is off before trying to change the mode. But also
2509
	 * ensure that we have vdd while we switch off the panel. */
5060 serge 2510
	intel_edp_panel_vdd_on(intel_dp);
2511
	intel_edp_backlight_off(intel_dp);
4560 Serge 2512
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
5060 serge 2513
	intel_edp_panel_off(intel_dp);
2330 Serge 2514
 
5354 serge 2515
	/* disable the port before the pipe on g4x */
2516
	if (INTEL_INFO(dev)->gen < 5)
3031 serge 2517
		intel_dp_link_down(intel_dp);
2518
}
2330 Serge 2519
 
5354 serge 2520
static void ilk_post_disable_dp(struct intel_encoder *encoder)
3031 serge 2521
{
2522
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 2523
	enum port port = dp_to_dig_port(intel_dp)->port;
5060 serge 2524
 
2525
	intel_dp_link_down(intel_dp);
6937 serge 2526
 
2527
	/* Only ilk+ has port A */
5354 serge 2528
	if (port == PORT_A)
6084 serge 2529
		ironlake_edp_pll_off(intel_dp);
5060 serge 2530
}
2531
 
2532
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2533
{
2534
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2535
 
2536
	intel_dp_link_down(intel_dp);
2537
}
2538
 
6084 serge 2539
static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2540
				     bool reset)
5060 serge 2541
{
6084 serge 2542
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2543
	enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2544
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2545
	enum pipe pipe = crtc->pipe;
2546
	uint32_t val;
3031 serge 2547
 
6084 serge 2548
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2549
	if (reset)
2550
		val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2551
	else
2552
		val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2553
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
5060 serge 2554
 
6084 serge 2555
	if (crtc->config->lane_count > 2) {
2556
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2557
		if (reset)
2558
			val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2559
		else
2560
			val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2561
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2562
	}
5060 serge 2563
 
2564
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2565
	val |= CHV_PCS_REQ_SOFTRESET_EN;
6084 serge 2566
	if (reset)
2567
		val &= ~DPIO_PCS_CLK_SOFT_RESET;
2568
	else
2569
		val |= DPIO_PCS_CLK_SOFT_RESET;
5060 serge 2570
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2571
 
6084 serge 2572
	if (crtc->config->lane_count > 2) {
2573
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2574
		val |= CHV_PCS_REQ_SOFTRESET_EN;
2575
		if (reset)
2576
			val &= ~DPIO_PCS_CLK_SOFT_RESET;
2577
		else
2578
			val |= DPIO_PCS_CLK_SOFT_RESET;
2579
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2580
	}
2581
}
5060 serge 2582
 
6084 serge 2583
static void chv_post_disable_dp(struct intel_encoder *encoder)
2584
{
2585
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2586
	struct drm_device *dev = encoder->base.dev;
2587
	struct drm_i915_private *dev_priv = dev->dev_private;
5060 serge 2588
 
6084 serge 2589
	intel_dp_link_down(intel_dp);
5060 serge 2590
 
6084 serge 2591
	mutex_lock(&dev_priv->sb_lock);
2592
 
2593
	/* Assert data lane reset */
2594
	chv_data_lane_soft_reset(encoder, true);
2595
 
2596
	mutex_unlock(&dev_priv->sb_lock);
2330 Serge 2597
}
2598
 
5354 serge 2599
static void
2600
_intel_dp_set_link_train(struct intel_dp *intel_dp,
2601
			 uint32_t *DP,
2602
			 uint8_t dp_train_pat)
2603
{
2604
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605
	struct drm_device *dev = intel_dig_port->base.base.dev;
2606
	struct drm_i915_private *dev_priv = dev->dev_private;
2607
	enum port port = intel_dig_port->port;
2608
 
2609
	if (HAS_DDI(dev)) {
2610
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2611
 
2612
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2613
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2614
		else
2615
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2616
 
2617
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2618
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2619
		case DP_TRAINING_PATTERN_DISABLE:
2620
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2621
 
2622
			break;
2623
		case DP_TRAINING_PATTERN_1:
2624
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2625
			break;
2626
		case DP_TRAINING_PATTERN_2:
2627
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2628
			break;
2629
		case DP_TRAINING_PATTERN_3:
2630
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2631
			break;
2632
		}
2633
		I915_WRITE(DP_TP_CTL(port), temp);
2634
 
6084 serge 2635
	} else if ((IS_GEN7(dev) && port == PORT_A) ||
2636
		   (HAS_PCH_CPT(dev) && port != PORT_A)) {
5354 serge 2637
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2638
 
2639
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2640
		case DP_TRAINING_PATTERN_DISABLE:
2641
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2642
			break;
2643
		case DP_TRAINING_PATTERN_1:
2644
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2645
			break;
2646
		case DP_TRAINING_PATTERN_2:
2647
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2648
			break;
2649
		case DP_TRAINING_PATTERN_3:
2650
			DRM_ERROR("DP training pattern 3 not supported\n");
2651
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2652
			break;
2653
		}
2654
 
2655
	} else {
2656
		if (IS_CHERRYVIEW(dev))
2657
			*DP &= ~DP_LINK_TRAIN_MASK_CHV;
2658
		else
2659
			*DP &= ~DP_LINK_TRAIN_MASK;
2660
 
2661
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2662
		case DP_TRAINING_PATTERN_DISABLE:
2663
			*DP |= DP_LINK_TRAIN_OFF;
2664
			break;
2665
		case DP_TRAINING_PATTERN_1:
2666
			*DP |= DP_LINK_TRAIN_PAT_1;
2667
			break;
2668
		case DP_TRAINING_PATTERN_2:
2669
			*DP |= DP_LINK_TRAIN_PAT_2;
2670
			break;
2671
		case DP_TRAINING_PATTERN_3:
2672
			if (IS_CHERRYVIEW(dev)) {
2673
				*DP |= DP_LINK_TRAIN_PAT_3_CHV;
2674
			} else {
2675
				DRM_ERROR("DP training pattern 3 not supported\n");
2676
				*DP |= DP_LINK_TRAIN_PAT_2;
2677
			}
2678
			break;
2679
		}
2680
	}
2681
}
2682
 
2683
static void intel_dp_enable_port(struct intel_dp *intel_dp)
2684
{
2685
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2686
	struct drm_i915_private *dev_priv = dev->dev_private;
6937 serge 2687
	struct intel_crtc *crtc =
2688
		to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
5354 serge 2689
 
2690
	/* enable with pattern 1 (as per spec) */
2691
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2692
				 DP_TRAINING_PATTERN_1);
2693
 
2694
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2695
	POSTING_READ(intel_dp->output_reg);
2696
 
2697
	/*
2698
	 * Magic for VLV/CHV. We _must_ first set up the register
2699
	 * without actually enabling the port, and then do another
2700
	 * write to enable the port. Otherwise link training will
2701
	 * fail when the power sequencer is freshly used for this port.
2702
	 */
2703
	intel_dp->DP |= DP_PORT_EN;
6937 serge 2704
	if (crtc->config->has_audio)
2705
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
5354 serge 2706
 
2707
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2708
	POSTING_READ(intel_dp->output_reg);
2709
}
2710
 
3031 serge 2711
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 2712
{
3031 serge 2713
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2714
	struct drm_device *dev = encoder->base.dev;
2330 Serge 2715
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 2716
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2330 Serge 2717
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
6937 serge 2718
	enum port port = dp_to_dig_port(intel_dp)->port;
2719
	enum pipe pipe = crtc->pipe;
2330 Serge 2720
 
3031 serge 2721
	if (WARN_ON(dp_reg & DP_PORT_EN))
2722
		return;
2342 Serge 2723
 
5354 serge 2724
	pps_lock(intel_dp);
2725
 
6937 serge 2726
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 2727
		vlv_init_panel_power_sequencer(intel_dp);
2728
 
6937 serge 2729
	/*
2730
	 * We get an occasional spurious underrun between the port
2731
	 * enable and vdd enable, when enabling port A eDP.
2732
	 *
2733
	 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2734
	 */
2735
	if (port == PORT_A)
2736
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2737
 
5354 serge 2738
	intel_dp_enable_port(intel_dp);
2739
 
6937 serge 2740
	if (port == PORT_A && IS_GEN5(dev_priv)) {
2741
		/*
2742
		 * Underrun reporting for the other pipe was disabled in
2743
		 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2744
		 * enabled, so it's now safe to re-enable underrun reporting.
2745
		 */
2746
		intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2747
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2748
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2749
	}
2750
 
5354 serge 2751
	edp_panel_vdd_on(intel_dp);
2752
	edp_panel_on(intel_dp);
2753
	edp_panel_vdd_off(intel_dp, true);
2754
 
6937 serge 2755
	if (port == PORT_A)
2756
		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2757
 
5354 serge 2758
	pps_unlock(intel_dp);
2759
 
6937 serge 2760
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
6084 serge 2761
		unsigned int lane_mask = 0x0;
5354 serge 2762
 
6084 serge 2763
		if (IS_CHERRYVIEW(dev))
2764
			lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2765
 
2766
		vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2767
				    lane_mask);
2768
	}
2769
 
3031 serge 2770
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
6084 serge 2771
	intel_dp_start_link_train(intel_dp);
3746 Serge 2772
	intel_dp_stop_link_train(intel_dp);
5354 serge 2773
 
6084 serge 2774
	if (crtc->config->has_audio) {
5354 serge 2775
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
6937 serge 2776
				 pipe_name(pipe));
5354 serge 2777
		intel_audio_codec_enable(encoder);
2778
	}
4560 Serge 2779
}
2780
 
2781
static void g4x_enable_dp(struct intel_encoder *encoder)
2782
{
2783
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2784
 
2785
	intel_enable_dp(encoder);
5060 serge 2786
	intel_edp_backlight_on(intel_dp);
2330 Serge 2787
}
2788
 
4104 Serge 2789
static void vlv_enable_dp(struct intel_encoder *encoder)
2790
{
4560 Serge 2791
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2792
 
5060 serge 2793
	intel_edp_backlight_on(intel_dp);
6084 serge 2794
	intel_psr_enable(intel_dp);
4104 Serge 2795
}
2796
 
4560 Serge 2797
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
3031 serge 2798
{
6937 serge 2799
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3031 serge 2800
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
6937 serge 2801
	enum port port = dp_to_dig_port(intel_dp)->port;
2802
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3031 serge 2803
 
5060 serge 2804
	intel_dp_prepare(encoder);
2805
 
6937 serge 2806
	if (port == PORT_A && IS_GEN5(dev_priv)) {
2807
		/*
2808
		 * We get FIFO underruns on the other pipe when
2809
		 * enabling the CPU eDP PLL, and when enabling CPU
2810
		 * eDP port. We could potentially avoid the PLL
2811
		 * underrun with a vblank wait just prior to enabling
2812
		 * the PLL, but that doesn't appear to help the port
2813
		 * enable case. Just sweep it all under the rug.
2814
		 */
2815
		intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2816
		intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2817
	}
2818
 
5060 serge 2819
	/* Only ilk+ has port A */
6937 serge 2820
	if (port == PORT_A)
3031 serge 2821
		ironlake_edp_pll_on(intel_dp);
5060 serge 2822
	}
3031 serge 2823
 
5354 serge 2824
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2825
{
2826
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2827
	struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2828
	enum pipe pipe = intel_dp->pps_pipe;
6937 serge 2829
	i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5354 serge 2830
 
2831
	edp_panel_vdd_off_sync(intel_dp);
2832
 
2833
	/*
2834
	 * VLV seems to get confused when multiple power seqeuencers
2835
	 * have the same port selected (even if only one has power/vdd
2836
	 * enabled). The failure manifests as vlv_wait_port_ready() failing
2837
	 * CHV on the other hand doesn't seem to mind having the same port
2838
	 * selected in multiple power seqeuencers, but let's clear the
2839
	 * port select always when logically disconnecting a power sequencer
2840
	 * from a port.
2841
	 */
2842
	DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2843
		      pipe_name(pipe), port_name(intel_dig_port->port));
2844
	I915_WRITE(pp_on_reg, 0);
2845
	POSTING_READ(pp_on_reg);
2846
 
2847
	intel_dp->pps_pipe = INVALID_PIPE;
2848
}
2849
 
2850
static void vlv_steal_power_sequencer(struct drm_device *dev,
2851
				      enum pipe pipe)
2852
{
2853
	struct drm_i915_private *dev_priv = dev->dev_private;
2854
	struct intel_encoder *encoder;
2855
 
2856
	lockdep_assert_held(&dev_priv->pps_mutex);
2857
 
2858
	if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2859
		return;
2860
 
6937 serge 2861
	for_each_intel_encoder(dev, encoder) {
5354 serge 2862
		struct intel_dp *intel_dp;
2863
		enum port port;
2864
 
2865
		if (encoder->type != INTEL_OUTPUT_EDP)
2866
			continue;
2867
 
2868
		intel_dp = enc_to_intel_dp(&encoder->base);
2869
		port = dp_to_dig_port(intel_dp)->port;
2870
 
2871
		if (intel_dp->pps_pipe != pipe)
2872
			continue;
2873
 
2874
		DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2875
			      pipe_name(pipe), port_name(port));
2876
 
6084 serge 2877
		WARN(encoder->base.crtc,
5354 serge 2878
		     "stealing pipe %c power sequencer from active eDP port %c\n",
2879
		     pipe_name(pipe), port_name(port));
2880
 
2881
		/* make sure vdd is off before we steal it */
2882
		vlv_detach_power_sequencer(intel_dp);
2883
	}
2884
}
2885
 
2886
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2887
{
2888
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2889
	struct intel_encoder *encoder = &intel_dig_port->base;
2890
	struct drm_device *dev = encoder->base.dev;
2891
	struct drm_i915_private *dev_priv = dev->dev_private;
2892
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2893
 
2894
	lockdep_assert_held(&dev_priv->pps_mutex);
2895
 
2896
	if (!is_edp(intel_dp))
2897
		return;
2898
 
2899
	if (intel_dp->pps_pipe == crtc->pipe)
2900
		return;
2901
 
2902
	/*
2903
	 * If another power sequencer was being used on this
2904
	 * port previously make sure to turn off vdd there while
2905
	 * we still have control of it.
2906
	 */
2907
	if (intel_dp->pps_pipe != INVALID_PIPE)
2908
		vlv_detach_power_sequencer(intel_dp);
2909
 
2910
	/*
2911
	 * We may be stealing the power
2912
	 * sequencer from another port.
2913
	 */
2914
	vlv_steal_power_sequencer(dev, crtc->pipe);
2915
 
2916
	/* now it's all ours */
2917
	intel_dp->pps_pipe = crtc->pipe;
2918
 
2919
	DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2920
		      pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2921
 
2922
	/* init power sequencer on this pipe and port */
2923
	intel_dp_init_panel_power_sequencer(dev, intel_dp);
2924
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2925
}
2926
 
4104 Serge 2927
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2928
{
2929
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2930
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2931
	struct drm_device *dev = encoder->base.dev;
2932
	struct drm_i915_private *dev_priv = dev->dev_private;
2933
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 2934
	enum dpio_channel port = vlv_dport_to_channel(dport);
6084 serge 2935
	int pipe = intel_crtc->pipe;
2936
	u32 val;
4104 Serge 2937
 
6084 serge 2938
	mutex_lock(&dev_priv->sb_lock);
4104 Serge 2939
 
4560 Serge 2940
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
6084 serge 2941
	val = 0;
2942
	if (pipe)
2943
		val |= (1<<21);
2944
	else
2945
		val &= ~(1<<21);
2946
	val |= 0x001000c4;
4560 Serge 2947
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2948
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2949
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
4104 Serge 2950
 
6084 serge 2951
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2952
 
2953
	intel_enable_dp(encoder);
4539 Serge 2954
}
4104 Serge 2955
 
4560 Serge 2956
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
4104 Serge 2957
{
2958
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2959
	struct drm_device *dev = encoder->base.dev;
2960
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 2961
	struct intel_crtc *intel_crtc =
2962
		to_intel_crtc(encoder->base.crtc);
2963
	enum dpio_channel port = vlv_dport_to_channel(dport);
2964
	int pipe = intel_crtc->pipe;
4104 Serge 2965
 
5060 serge 2966
	intel_dp_prepare(encoder);
2967
 
4104 Serge 2968
	/* Program Tx lane resets to default */
6084 serge 2969
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 2970
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
4104 Serge 2971
			 DPIO_PCS_TX_LANE2_RESET |
2972
			 DPIO_PCS_TX_LANE1_RESET);
4560 Serge 2973
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
4104 Serge 2974
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2975
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2976
			 (1<
2977
				 DPIO_PCS_CLK_SOFT_RESET);
2978
 
2979
	/* Fix up inter-pair skew failure */
4560 Serge 2980
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2981
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2982
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
6084 serge 2983
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 2984
}
2985
 
5060 serge 2986
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2987
{
2988
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2989
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2990
	struct drm_device *dev = encoder->base.dev;
2991
	struct drm_i915_private *dev_priv = dev->dev_private;
2992
	struct intel_crtc *intel_crtc =
2993
		to_intel_crtc(encoder->base.crtc);
2994
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2995
	int pipe = intel_crtc->pipe;
6084 serge 2996
	int data, i, stagger;
5060 serge 2997
	u32 val;
2998
 
6084 serge 2999
	mutex_lock(&dev_priv->sb_lock);
5060 serge 3000
 
5354 serge 3001
	/* allow hardware to manage TX FIFO reset source */
3002
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3003
	val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3004
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3005
 
6084 serge 3006
	if (intel_crtc->config->lane_count > 2) {
3007
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3008
		val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3009
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3010
	}
5354 serge 3011
 
5060 serge 3012
	/* Program Tx lane latency optimal setting*/
6084 serge 3013
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3014
		/* Set the upar bit */
6084 serge 3015
		if (intel_crtc->config->lane_count == 1)
3016
			data = 0x0;
3017
		else
3018
			data = (i == 1) ? 0x0 : 0x1;
5060 serge 3019
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3020
				data << DPIO_UPAR_SHIFT);
3021
	}
3022
 
3023
	/* Data lane stagger programming */
6084 serge 3024
	if (intel_crtc->config->port_clock > 270000)
3025
		stagger = 0x18;
3026
	else if (intel_crtc->config->port_clock > 135000)
3027
		stagger = 0xd;
3028
	else if (intel_crtc->config->port_clock > 67500)
3029
		stagger = 0x7;
3030
	else if (intel_crtc->config->port_clock > 33750)
3031
		stagger = 0x4;
3032
	else
3033
		stagger = 0x2;
5060 serge 3034
 
6084 serge 3035
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3036
	val |= DPIO_TX2_STAGGER_MASK(0x1f);
3037
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
5060 serge 3038
 
6084 serge 3039
	if (intel_crtc->config->lane_count > 2) {
3040
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3041
		val |= DPIO_TX2_STAGGER_MASK(0x1f);
3042
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3043
	}
3044
 
3045
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3046
		       DPIO_LANESTAGGER_STRAP(stagger) |
3047
		       DPIO_LANESTAGGER_STRAP_OVRD |
3048
		       DPIO_TX1_STAGGER_MASK(0x1f) |
3049
		       DPIO_TX1_STAGGER_MULT(6) |
3050
		       DPIO_TX2_STAGGER_MULT(0));
3051
 
3052
	if (intel_crtc->config->lane_count > 2) {
3053
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3054
			       DPIO_LANESTAGGER_STRAP(stagger) |
3055
			       DPIO_LANESTAGGER_STRAP_OVRD |
3056
			       DPIO_TX1_STAGGER_MASK(0x1f) |
3057
			       DPIO_TX1_STAGGER_MULT(7) |
3058
			       DPIO_TX2_STAGGER_MULT(5));
3059
	}
3060
 
3061
	/* Deassert data lane reset */
3062
	chv_data_lane_soft_reset(encoder, false);
3063
 
3064
	mutex_unlock(&dev_priv->sb_lock);
3065
 
5060 serge 3066
	intel_enable_dp(encoder);
6084 serge 3067
 
3068
	/* Second common lane will stay alive on its own now */
3069
	if (dport->release_cl2_override) {
3070
		chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3071
		dport->release_cl2_override = false;
3072
	}
5060 serge 3073
}
3074
 
3075
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3076
{
3077
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3078
	struct drm_device *dev = encoder->base.dev;
3079
	struct drm_i915_private *dev_priv = dev->dev_private;
3080
	struct intel_crtc *intel_crtc =
3081
		to_intel_crtc(encoder->base.crtc);
3082
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3083
	enum pipe pipe = intel_crtc->pipe;
6084 serge 3084
	unsigned int lane_mask =
3085
		intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
5060 serge 3086
	u32 val;
3087
 
5354 serge 3088
	intel_dp_prepare(encoder);
3089
 
6084 serge 3090
	/*
3091
	 * Must trick the second common lane into life.
3092
	 * Otherwise we can't even access the PLL.
3093
	 */
3094
	if (ch == DPIO_CH0 && pipe == PIPE_B)
3095
		dport->release_cl2_override =
3096
			!chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
5060 serge 3097
 
6084 serge 3098
	chv_phy_powergate_lanes(encoder, true, lane_mask);
3099
 
3100
	mutex_lock(&dev_priv->sb_lock);
3101
 
3102
	/* Assert data lane reset */
3103
	chv_data_lane_soft_reset(encoder, true);
3104
 
5060 serge 3105
	/* program left/right clock distribution */
3106
	if (pipe != PIPE_B) {
3107
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3108
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3109
		if (ch == DPIO_CH0)
3110
			val |= CHV_BUFLEFTENA1_FORCE;
3111
		if (ch == DPIO_CH1)
3112
			val |= CHV_BUFRIGHTENA1_FORCE;
3113
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3114
	} else {
3115
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3116
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3117
		if (ch == DPIO_CH0)
3118
			val |= CHV_BUFLEFTENA2_FORCE;
3119
		if (ch == DPIO_CH1)
3120
			val |= CHV_BUFRIGHTENA2_FORCE;
3121
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3122
	}
3123
 
3124
	/* program clock channel usage */
3125
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3126
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3127
	if (pipe != PIPE_B)
3128
		val &= ~CHV_PCS_USEDCLKCHANNEL;
3129
	else
3130
		val |= CHV_PCS_USEDCLKCHANNEL;
3131
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3132
 
6084 serge 3133
	if (intel_crtc->config->lane_count > 2) {
3134
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3135
		val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3136
		if (pipe != PIPE_B)
3137
			val &= ~CHV_PCS_USEDCLKCHANNEL;
3138
		else
3139
			val |= CHV_PCS_USEDCLKCHANNEL;
3140
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3141
	}
5060 serge 3142
 
3143
	/*
3144
	 * This a a bit weird since generally CL
3145
	 * matches the pipe, but here we need to
3146
	 * pick the CL based on the port.
3147
	 */
3148
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3149
	if (pipe != PIPE_B)
3150
		val &= ~CHV_CMN_USEDCLKCHANNEL;
3151
	else
3152
		val |= CHV_CMN_USEDCLKCHANNEL;
3153
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3154
 
6084 serge 3155
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3156
}
3157
 
6084 serge 3158
static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3159
{
3160
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3161
	enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3162
	u32 val;
3163
 
3164
	mutex_lock(&dev_priv->sb_lock);
3165
 
3166
	/* disable left/right clock distribution */
3167
	if (pipe != PIPE_B) {
3168
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3169
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3170
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3171
	} else {
3172
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3173
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3174
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3175
	}
3176
 
3177
	mutex_unlock(&dev_priv->sb_lock);
3178
 
3179
	/*
3180
	 * Leave the power down bit cleared for at least one
3181
	 * lane so that chv_powergate_phy_ch() will power
3182
	 * on something when the channel is otherwise unused.
3183
	 * When the port is off and the override is removed
3184
	 * the lanes power down anyway, so otherwise it doesn't
3185
	 * really matter what the state of power down bits is
3186
	 * after this.
3187
	 */
3188
	chv_phy_powergate_lanes(encoder, false, 0x0);
3189
}
3190
 
2330 Serge 3191
/*
3192
 * Native read with retry for link status and receiver capability reads for
3193
 * cases where the sink may still be asleep.
5060 serge 3194
 *
3195
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3196
 * supposed to retry 3 times per the spec.
2330 Serge 3197
 */
5060 serge 3198
static ssize_t
3199
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3200
			void *buffer, size_t size)
2330 Serge 3201
{
5060 serge 3202
	ssize_t ret;
3203
	int i;
2330 Serge 3204
 
5354 serge 3205
	/*
3206
	 * Sometime we just get the same incorrect byte repeated
3207
	 * over the entire buffer. Doing just one throw away read
3208
	 * initially seems to "solve" it.
3209
	 */
3210
	drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3211
 
2330 Serge 3212
	for (i = 0; i < 3; i++) {
5060 serge 3213
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3214
		if (ret == size)
3215
			return ret;
2330 Serge 3216
		msleep(1);
3217
	}
3218
 
5060 serge 3219
	return ret;
2330 Serge 3220
}
3221
 
3222
/*
3223
 * Fetch AUX CH registers 0x202 - 0x207 which contain
3224
 * link status information
3225
 */
6937 serge 3226
bool
2342 Serge 3227
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 3228
{
5060 serge 3229
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 3230
				       DP_LANE0_1_STATUS,
3231
				       link_status,
5060 serge 3232
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2330 Serge 3233
}
3234
 
5060 serge 3235
/* These are source-specific values. */
6937 serge 3236
uint8_t
2342 Serge 3237
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 3238
{
3243 Serge 3239
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
6084 serge 3240
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 3241
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3242
 
6084 serge 3243
	if (IS_BROXTON(dev))
3244
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3245
	else if (INTEL_INFO(dev)->gen >= 9) {
3246
		if (dev_priv->edp_low_vswing && port == PORT_A)
3247
			return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5354 serge 3248
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6937 serge 3249
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 3250
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4104 Serge 3251
	else if (IS_GEN7(dev) && port == PORT_A)
5354 serge 3252
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4104 Serge 3253
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
5354 serge 3254
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2342 Serge 3255
	else
5354 serge 3256
		return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2342 Serge 3257
}
3258
 
6937 serge 3259
uint8_t
2342 Serge 3260
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3261
{
3243 Serge 3262
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 3263
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 3264
 
5354 serge 3265
	if (INTEL_INFO(dev)->gen >= 9) {
2342 Serge 3266
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3267
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3268
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3269
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3270
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3271
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3272
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
6084 serge 3273
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3274
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3275
		default:
5354 serge 3276
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3243 Serge 3277
		}
5354 serge 3278
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3279
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3280
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3281
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3282
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3283
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3284
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3285
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3286
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3287
		default:
3288
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
3289
		}
6937 serge 3290
	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3243 Serge 3291
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3292
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3293
			return DP_TRAIN_PRE_EMPH_LEVEL_3;
3294
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3295
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3296
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3297
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3298
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3299
		default:
5354 serge 3300
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
4104 Serge 3301
		}
3302
	} else if (IS_GEN7(dev) && port == PORT_A) {
3303
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3304
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3305
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3306
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3307
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3308
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
2342 Serge 3309
		default:
5354 serge 3310
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
2342 Serge 3311
		}
3312
	} else {
6084 serge 3313
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3314
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3315
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3316
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3317
			return DP_TRAIN_PRE_EMPH_LEVEL_2;
3318
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3319
			return DP_TRAIN_PRE_EMPH_LEVEL_1;
3320
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
6084 serge 3321
		default:
5354 serge 3322
			return DP_TRAIN_PRE_EMPH_LEVEL_0;
6084 serge 3323
		}
2330 Serge 3324
	}
3325
}
3326
 
6084 serge 3327
static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
4104 Serge 3328
{
3329
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3330
	struct drm_i915_private *dev_priv = dev->dev_private;
3331
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
4560 Serge 3332
	struct intel_crtc *intel_crtc =
3333
		to_intel_crtc(dport->base.base.crtc);
4104 Serge 3334
	unsigned long demph_reg_value, preemph_reg_value,
3335
		uniqtranscale_reg_value;
3336
	uint8_t train_set = intel_dp->train_set[0];
4560 Serge 3337
	enum dpio_channel port = vlv_dport_to_channel(dport);
3338
	int pipe = intel_crtc->pipe;
4104 Serge 3339
 
3340
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3341
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
4104 Serge 3342
		preemph_reg_value = 0x0004000;
3343
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3344
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3345
			demph_reg_value = 0x2B405555;
3346
			uniqtranscale_reg_value = 0x552AB83A;
3347
			break;
5354 serge 3348
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3349
			demph_reg_value = 0x2B404040;
3350
			uniqtranscale_reg_value = 0x5548B83A;
3351
			break;
5354 serge 3352
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3353
			demph_reg_value = 0x2B245555;
3354
			uniqtranscale_reg_value = 0x5560B83A;
3355
			break;
5354 serge 3356
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4104 Serge 3357
			demph_reg_value = 0x2B405555;
3358
			uniqtranscale_reg_value = 0x5598DA3A;
3359
			break;
3360
		default:
3361
			return 0;
3362
		}
3363
		break;
5354 serge 3364
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
4104 Serge 3365
		preemph_reg_value = 0x0002000;
3366
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3367
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3368
			demph_reg_value = 0x2B404040;
3369
			uniqtranscale_reg_value = 0x5552B83A;
3370
			break;
5354 serge 3371
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3372
			demph_reg_value = 0x2B404848;
3373
			uniqtranscale_reg_value = 0x5580B83A;
3374
			break;
5354 serge 3375
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4104 Serge 3376
			demph_reg_value = 0x2B404040;
3377
			uniqtranscale_reg_value = 0x55ADDA3A;
3378
			break;
3379
		default:
3380
			return 0;
3381
		}
3382
		break;
5354 serge 3383
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
4104 Serge 3384
		preemph_reg_value = 0x0000000;
3385
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3386
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3387
			demph_reg_value = 0x2B305555;
3388
			uniqtranscale_reg_value = 0x5570B83A;
3389
			break;
5354 serge 3390
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4104 Serge 3391
			demph_reg_value = 0x2B2B4040;
3392
			uniqtranscale_reg_value = 0x55ADDA3A;
3393
			break;
3394
		default:
3395
			return 0;
3396
		}
3397
		break;
5354 serge 3398
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
4104 Serge 3399
		preemph_reg_value = 0x0006000;
3400
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3401
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4104 Serge 3402
			demph_reg_value = 0x1B405555;
3403
			uniqtranscale_reg_value = 0x55ADDA3A;
3404
			break;
3405
		default:
3406
			return 0;
3407
		}
3408
		break;
3409
	default:
3410
		return 0;
3411
	}
3412
 
6084 serge 3413
	mutex_lock(&dev_priv->sb_lock);
4560 Serge 3414
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3415
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3416
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
4104 Serge 3417
			 uniqtranscale_reg_value);
4560 Serge 3418
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3419
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3420
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3421
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
6084 serge 3422
	mutex_unlock(&dev_priv->sb_lock);
4104 Serge 3423
 
3424
	return 0;
3425
}
3426
 
6084 serge 3427
static bool chv_need_uniq_trans_scale(uint8_t train_set)
5060 serge 3428
{
6084 serge 3429
	return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3430
		(train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3431
}
3432
 
3433
static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3434
{
5060 serge 3435
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3436
	struct drm_i915_private *dev_priv = dev->dev_private;
3437
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3438
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3439
	u32 deemph_reg_value, margin_reg_value, val;
3440
	uint8_t train_set = intel_dp->train_set[0];
3441
	enum dpio_channel ch = vlv_dport_to_channel(dport);
3442
	enum pipe pipe = intel_crtc->pipe;
3443
	int i;
3444
 
3445
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3446
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
5060 serge 3447
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3448
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3449
			deemph_reg_value = 128;
3450
			margin_reg_value = 52;
3451
			break;
5354 serge 3452
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3453
			deemph_reg_value = 128;
3454
			margin_reg_value = 77;
3455
			break;
5354 serge 3456
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3457
			deemph_reg_value = 128;
3458
			margin_reg_value = 102;
3459
			break;
5354 serge 3460
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
5060 serge 3461
			deemph_reg_value = 128;
3462
			margin_reg_value = 154;
3463
			/* FIXME extra to set for 1200 */
3464
			break;
3465
		default:
3466
			return 0;
3467
		}
3468
		break;
5354 serge 3469
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
5060 serge 3470
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3471
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3472
			deemph_reg_value = 85;
3473
			margin_reg_value = 78;
3474
			break;
5354 serge 3475
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3476
			deemph_reg_value = 85;
3477
			margin_reg_value = 116;
3478
			break;
5354 serge 3479
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
5060 serge 3480
			deemph_reg_value = 85;
3481
			margin_reg_value = 154;
3482
			break;
3483
		default:
3484
			return 0;
3485
		}
3486
		break;
5354 serge 3487
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
5060 serge 3488
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3489
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3490
			deemph_reg_value = 64;
3491
			margin_reg_value = 104;
3492
			break;
5354 serge 3493
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
5060 serge 3494
			deemph_reg_value = 64;
3495
			margin_reg_value = 154;
3496
			break;
3497
		default:
3498
			return 0;
3499
		}
3500
		break;
5354 serge 3501
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
5060 serge 3502
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3503
		case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
5060 serge 3504
			deemph_reg_value = 43;
3505
			margin_reg_value = 154;
3506
			break;
3507
		default:
3508
			return 0;
3509
		}
3510
		break;
3511
	default:
3512
		return 0;
3513
	}
3514
 
6084 serge 3515
	mutex_lock(&dev_priv->sb_lock);
5060 serge 3516
 
3517
	/* Clear calc init */
3518
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3519
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
5354 serge 3520
	val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3521
	val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
5060 serge 3522
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3523
 
6084 serge 3524
	if (intel_crtc->config->lane_count > 2) {
3525
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3526
		val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3527
		val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3528
		val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3529
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3530
	}
5060 serge 3531
 
5354 serge 3532
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3533
	val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3534
	val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3535
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3536
 
6084 serge 3537
	if (intel_crtc->config->lane_count > 2) {
3538
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3539
		val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3540
		val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3541
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3542
	}
5354 serge 3543
 
5060 serge 3544
	/* Program swing deemph */
6084 serge 3545
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3546
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3547
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3548
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3549
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3550
	}
3551
 
3552
	/* Program swing margin */
6084 serge 3553
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3554
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
6084 serge 3555
 
5354 serge 3556
		val &= ~DPIO_SWING_MARGIN000_MASK;
3557
		val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
6084 serge 3558
 
3559
		/*
3560
		 * Supposedly this value shouldn't matter when unique transition
3561
		 * scale is disabled, but in fact it does matter. Let's just
3562
		 * always program the same value and hope it's OK.
3563
		 */
3564
		val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3565
		val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3566
 
5060 serge 3567
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3568
	}
3569
 
6084 serge 3570
	/*
3571
	 * The document said it needs to set bit 27 for ch0 and bit 26
3572
	 * for ch1. Might be a typo in the doc.
3573
	 * For now, for this unique transition scale selection, set bit
3574
	 * 27 for ch0 and ch1.
3575
	 */
3576
	for (i = 0; i < intel_crtc->config->lane_count; i++) {
5060 serge 3577
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
6084 serge 3578
		if (chv_need_uniq_trans_scale(train_set))
3579
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3580
		else
3581
			val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
5060 serge 3582
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3583
	}
3584
 
3585
	/* Start swing calculation */
3586
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3587
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3588
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3589
 
6084 serge 3590
	if (intel_crtc->config->lane_count > 2) {
3591
		val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3592
		val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3593
		vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3594
	}
5060 serge 3595
 
6084 serge 3596
	mutex_unlock(&dev_priv->sb_lock);
5060 serge 3597
 
3598
	return 0;
3599
}
3600
 
2330 Serge 3601
static uint32_t
6084 serge 3602
gen4_signal_levels(uint8_t train_set)
2330 Serge 3603
{
3604
	uint32_t	signal_levels = 0;
3605
 
3606
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
5354 serge 3607
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2330 Serge 3608
	default:
3609
		signal_levels |= DP_VOLTAGE_0_4;
3610
		break;
5354 serge 3611
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2330 Serge 3612
		signal_levels |= DP_VOLTAGE_0_6;
3613
		break;
5354 serge 3614
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2330 Serge 3615
		signal_levels |= DP_VOLTAGE_0_8;
3616
		break;
5354 serge 3617
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2330 Serge 3618
		signal_levels |= DP_VOLTAGE_1_2;
3619
		break;
3620
	}
3621
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
5354 serge 3622
	case DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3623
	default:
3624
		signal_levels |= DP_PRE_EMPHASIS_0;
3625
		break;
5354 serge 3626
	case DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3627
		signal_levels |= DP_PRE_EMPHASIS_3_5;
3628
		break;
5354 serge 3629
	case DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3630
		signal_levels |= DP_PRE_EMPHASIS_6;
3631
		break;
5354 serge 3632
	case DP_TRAIN_PRE_EMPH_LEVEL_3:
2330 Serge 3633
		signal_levels |= DP_PRE_EMPHASIS_9_5;
3634
		break;
3635
	}
3636
	return signal_levels;
3637
}
3638
 
3639
/* Gen6's DP voltage swing and pre-emphasis control */
3640
static uint32_t
6084 serge 3641
gen6_edp_signal_levels(uint8_t train_set)
2330 Serge 3642
{
3643
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3644
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3645
	switch (signal_levels) {
5354 serge 3646
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3648
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
5354 serge 3649
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3650
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
5354 serge 3651
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3652
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2330 Serge 3653
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
5354 serge 3654
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3655
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2330 Serge 3656
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
5354 serge 3657
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3658
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2330 Serge 3659
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3660
	default:
3661
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3662
			      "0x%x\n", signal_levels);
3663
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3664
	}
3665
}
3666
 
2342 Serge 3667
/* Gen7's DP voltage swing and pre-emphasis control */
3668
static uint32_t
6084 serge 3669
gen7_edp_signal_levels(uint8_t train_set)
2342 Serge 3670
{
3671
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3672
					 DP_TRAIN_PRE_EMPHASIS_MASK);
3673
	switch (signal_levels) {
5354 serge 3674
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3675
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
5354 serge 3676
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3677
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
5354 serge 3678
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2342 Serge 3679
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
3680
 
5354 serge 3681
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3682
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
5354 serge 3683
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3684
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3685
 
5354 serge 3686
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2342 Serge 3687
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
5354 serge 3688
	case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2342 Serge 3689
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3690
 
3691
	default:
3692
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3693
			      "0x%x\n", signal_levels);
3694
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
3695
	}
3696
}
3697
 
6937 serge 3698
void
3699
intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3480 Serge 3700
{
3701
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 3702
	enum port port = intel_dig_port->port;
3480 Serge 3703
	struct drm_device *dev = intel_dig_port->base.base.dev;
6937 serge 3704
	struct drm_i915_private *dev_priv = to_i915(dev);
6084 serge 3705
	uint32_t signal_levels, mask = 0;
3480 Serge 3706
	uint8_t train_set = intel_dp->train_set[0];
3707
 
6084 serge 3708
	if (HAS_DDI(dev)) {
3709
		signal_levels = ddi_signal_levels(intel_dp);
3710
 
3711
		if (IS_BROXTON(dev))
3712
			signal_levels = 0;
3713
		else
3714
			mask = DDI_BUF_EMP_MASK;
5060 serge 3715
	} else if (IS_CHERRYVIEW(dev)) {
6084 serge 3716
		signal_levels = chv_signal_levels(intel_dp);
4104 Serge 3717
	} else if (IS_VALLEYVIEW(dev)) {
6084 serge 3718
		signal_levels = vlv_signal_levels(intel_dp);
4104 Serge 3719
	} else if (IS_GEN7(dev) && port == PORT_A) {
6084 serge 3720
		signal_levels = gen7_edp_signal_levels(train_set);
3480 Serge 3721
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4104 Serge 3722
	} else if (IS_GEN6(dev) && port == PORT_A) {
6084 serge 3723
		signal_levels = gen6_edp_signal_levels(train_set);
3480 Serge 3724
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3725
	} else {
6084 serge 3726
		signal_levels = gen4_signal_levels(train_set);
3480 Serge 3727
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3728
	}
3729
 
6084 serge 3730
	if (mask)
3731
		DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3480 Serge 3732
 
6084 serge 3733
	DRM_DEBUG_KMS("Using vswing level %d\n",
3734
		train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3735
	DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3736
		(train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3737
			DP_TRAIN_PRE_EMPHASIS_SHIFT);
3738
 
6937 serge 3739
	intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3480 Serge 3740
 
6937 serge 3741
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2330 Serge 3742
	POSTING_READ(intel_dp->output_reg);
4560 Serge 3743
	}
2330 Serge 3744
 
6937 serge 3745
void
3746
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4560 Serge 3747
			uint8_t dp_train_pat)
3748
{
3749
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3750
	struct drm_i915_private *dev_priv =
3751
		to_i915(intel_dig_port->base.base.dev);
4560 Serge 3752
 
6937 serge 3753
	_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4560 Serge 3754
 
6937 serge 3755
	I915_WRITE(intel_dp->output_reg, intel_dp->DP);
4560 Serge 3756
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3757
}
3758
 
6937 serge 3759
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3746 Serge 3760
{
3761
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3762
	struct drm_device *dev = intel_dig_port->base.base.dev;
3763
	struct drm_i915_private *dev_priv = dev->dev_private;
3764
	enum port port = intel_dig_port->port;
3765
	uint32_t val;
3766
 
3767
	if (!HAS_DDI(dev))
3768
		return;
3769
 
3770
	val = I915_READ(DP_TP_CTL(port));
3771
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3772
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3773
	I915_WRITE(DP_TP_CTL(port), val);
3774
 
3775
	/*
3776
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3777
	 * we need to set idle transmission mode is to work around a HW issue
3778
	 * where we enable the pipe while not in idle link-training mode.
3779
	 * In this case there is requirement to wait for a minimum number of
3780
	 * idle patterns to be sent.
3781
	 */
3782
	if (port == PORT_A)
3783
		return;
3784
 
3785
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3786
		     1))
3787
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3788
}
3789
 
6084 serge 3790
static void
2330 Serge 3791
intel_dp_link_down(struct intel_dp *intel_dp)
3792
{
3243 Serge 3793
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6084 serge 3794
	struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4104 Serge 3795
	enum port port = intel_dig_port->port;
3243 Serge 3796
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 3797
	struct drm_i915_private *dev_priv = dev->dev_private;
3798
	uint32_t DP = intel_dp->DP;
3799
 
5060 serge 3800
	if (WARN_ON(HAS_DDI(dev)))
3243 Serge 3801
		return;
3802
 
3031 serge 3803
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 3804
		return;
3805
 
3806
	DRM_DEBUG_KMS("\n");
3807
 
6084 serge 3808
	if ((IS_GEN7(dev) && port == PORT_A) ||
3809
	    (HAS_PCH_CPT(dev) && port != PORT_A)) {
2330 Serge 3810
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
6084 serge 3811
		DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
2330 Serge 3812
	} else {
5354 serge 3813
		if (IS_CHERRYVIEW(dev))
3814
			DP &= ~DP_LINK_TRAIN_MASK_CHV;
3815
		else
6084 serge 3816
			DP &= ~DP_LINK_TRAIN_MASK;
3817
		DP |= DP_LINK_TRAIN_PAT_IDLE;
2330 Serge 3818
	}
6084 serge 3819
	I915_WRITE(intel_dp->output_reg, DP);
2330 Serge 3820
	POSTING_READ(intel_dp->output_reg);
3821
 
6084 serge 3822
	DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3823
	I915_WRITE(intel_dp->output_reg, DP);
3824
	POSTING_READ(intel_dp->output_reg);
2330 Serge 3825
 
6084 serge 3826
	/*
3827
	 * HW workaround for IBX, we need to move the port
3828
	 * to transcoder A after disabling it to allow the
3829
	 * matching HDMI port to be enabled on transcoder A.
3830
	 */
3831
	if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
6937 serge 3832
		/*
3833
		 * We get CPU/PCH FIFO underruns on the other pipe when
3834
		 * doing the workaround. Sweep them under the rug.
3835
		 */
3836
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3837
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3838
 
6084 serge 3839
		/* always enable with pattern 1 (as per spec) */
3840
		DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3841
		DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
2330 Serge 3842
		I915_WRITE(intel_dp->output_reg, DP);
6084 serge 3843
		POSTING_READ(intel_dp->output_reg);
2330 Serge 3844
 
6084 serge 3845
		DP &= ~DP_PORT_EN;
3846
		I915_WRITE(intel_dp->output_reg, DP);
3847
		POSTING_READ(intel_dp->output_reg);
6937 serge 3848
 
3849
		intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3850
		intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3851
		intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
2330 Serge 3852
	}
3853
 
2342 Serge 3854
	msleep(intel_dp->panel_power_down_delay);
6937 serge 3855
 
3856
	intel_dp->DP = DP;
2330 Serge 3857
}
3858
 
3859
static bool
3860
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3861
{
4560 Serge 3862
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3863
	struct drm_device *dev = dig_port->base.base.dev;
3864
	struct drm_i915_private *dev_priv = dev->dev_private;
6084 serge 3865
	uint8_t rev;
4560 Serge 3866
 
5060 serge 3867
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3868
				    sizeof(intel_dp->dpcd)) < 0)
3031 serge 3869
		return false; /* aux transfer failed */
3870
 
5354 serge 3871
	DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3480 Serge 3872
 
3031 serge 3873
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3874
		return false; /* DPCD not present */
3875
 
4104 Serge 3876
	/* Check if the panel supports PSR */
3877
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4560 Serge 3878
	if (is_edp(intel_dp)) {
5060 serge 3879
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
6084 serge 3880
					intel_dp->psr_dpcd,
3881
					sizeof(intel_dp->psr_dpcd));
4560 Serge 3882
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3883
			dev_priv->psr.sink_support = true;
6084 serge 3884
			DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4560 Serge 3885
		}
6084 serge 3886
 
3887
		if (INTEL_INFO(dev)->gen >= 9 &&
3888
			(intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3889
			uint8_t frame_sync_cap;
3890
 
3891
			dev_priv->psr.sink_support = true;
3892
			intel_dp_dpcd_read_wake(&intel_dp->aux,
3893
					DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3894
					&frame_sync_cap, 1);
3895
			dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3896
			/* PSR2 needs frame sync as well */
3897
			dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3898
			DRM_DEBUG_KMS("PSR2 %s on sink",
3899
				dev_priv->psr.psr2_support ? "supported" : "not supported");
3900
		}
4560 Serge 3901
	}
3902
 
6084 serge 3903
	DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
6937 serge 3904
		      yesno(intel_dp_source_supports_hbr2(intel_dp)),
6084 serge 3905
		      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
5060 serge 3906
 
6084 serge 3907
	/* Intermediate frequency support */
3908
	if (is_edp(intel_dp) &&
3909
	    (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] &	DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3910
	    (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3911
	    (rev >= 0x03)) { /* eDp v1.4 or higher */
3912
		__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3913
		int i;
3914
 
3915
		intel_dp_dpcd_read_wake(&intel_dp->aux,
3916
				DP_SUPPORTED_LINK_RATES,
3917
				sink_rates,
3918
				sizeof(sink_rates));
3919
 
3920
		for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3921
			int val = le16_to_cpu(sink_rates[i]);
3922
 
3923
			if (val == 0)
3924
				break;
3925
 
3926
			/* Value read is in kHz while drm clock is saved in deca-kHz */
3927
			intel_dp->sink_rates[i] = (val * 200) / 10;
3928
		}
3929
		intel_dp->num_sink_rates = i;
3930
	}
3931
 
3932
	intel_dp_print_rates(intel_dp);
3933
 
3031 serge 3934
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3935
	      DP_DWN_STRM_PORT_PRESENT))
3936
		return true; /* native DP sink */
3937
 
3938
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3939
		return true; /* no per-port downstream info */
3940
 
5060 serge 3941
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
6084 serge 3942
				    intel_dp->downstream_ports,
5060 serge 3943
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3031 serge 3944
		return false; /* downstream port status fetch failed */
3945
 
6084 serge 3946
	return true;
3031 serge 3947
}
2330 Serge 3948
 
3031 serge 3949
static void
3950
intel_dp_probe_oui(struct intel_dp *intel_dp)
3951
{
3952
	u8 buf[3];
3953
 
3954
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3955
		return;
3956
 
5060 serge 3957
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3031 serge 3958
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3959
			      buf[0], buf[1], buf[2]);
3960
 
5060 serge 3961
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3031 serge 3962
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3963
			      buf[0], buf[1], buf[2]);
2330 Serge 3964
}
3965
 
2342 Serge 3966
static bool
5060 serge 3967
intel_dp_probe_mst(struct intel_dp *intel_dp)
3968
{
3969
	u8 buf[1];
3970
 
3971
	if (!intel_dp->can_mst)
3972
		return false;
3973
 
3974
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3975
		return false;
3976
 
3977
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3978
		if (buf[0] & DP_MST_CAP) {
3979
			DRM_DEBUG_KMS("Sink is MST capable\n");
3980
			intel_dp->is_mst = true;
3981
		} else {
3982
			DRM_DEBUG_KMS("Sink is not MST capable\n");
3983
			intel_dp->is_mst = false;
3984
		}
3985
	}
3986
 
3987
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3988
	return intel_dp->is_mst;
3989
}
3990
 
6084 serge 3991
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
5060 serge 3992
{
6084 serge 3993
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6937 serge 3994
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 3995
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
5354 serge 3996
	u8 buf;
6084 serge 3997
	int ret = 0;
6937 serge 3998
	int count = 0;
3999
	int attempts = 10;
5060 serge 4000
 
6084 serge 4001
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4002
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4003
		ret = -EIO;
4004
		goto out;
4005
	}
4006
 
4007
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4008
			       buf & ~DP_TEST_SINK_START) < 0) {
4009
		DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4010
		ret = -EIO;
4011
		goto out;
4012
	}
4013
 
6937 serge 4014
	do {
4015
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4016
 
4017
		if (drm_dp_dpcd_readb(&intel_dp->aux,
4018
				      DP_TEST_SINK_MISC, &buf) < 0) {
4019
			ret = -EIO;
4020
			goto out;
4021
		}
4022
		count = buf & DP_TEST_COUNT_MASK;
4023
	} while (--attempts && count);
4024
 
4025
	if (attempts == 0) {
4026
		DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4027
		ret = -ETIMEDOUT;
4028
	}
4029
 
6084 serge 4030
 out:
4031
	hsw_enable_ips(intel_crtc);
4032
	return ret;
4033
}
4034
 
4035
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4036
{
4037
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6937 serge 4038
	struct drm_device *dev = dig_port->base.base.dev;
6084 serge 4039
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4040
	u8 buf;
4041
	int ret;
4042
 
5354 serge 4043
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4044
		return -EIO;
5060 serge 4045
 
5354 serge 4046
	if (!(buf & DP_TEST_CRC_SUPPORTED))
5060 serge 4047
		return -ENOTTY;
4048
 
5354 serge 4049
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4050
		return -EIO;
4051
 
6937 serge 4052
	if (buf & DP_TEST_SINK_START) {
4053
		ret = intel_dp_sink_crc_stop(intel_dp);
4054
		if (ret)
4055
			return ret;
4056
	}
4057
 
6084 serge 4058
	hsw_disable_ips(intel_crtc);
4059
 
5060 serge 4060
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
6084 serge 4061
			       buf | DP_TEST_SINK_START) < 0) {
4062
		hsw_enable_ips(intel_crtc);
5354 serge 4063
		return -EIO;
6084 serge 4064
	}
5060 serge 4065
 
6937 serge 4066
	intel_wait_for_vblank(dev, intel_crtc->pipe);
6084 serge 4067
	return 0;
4068
}
5354 serge 4069
 
6084 serge 4070
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4071
{
4072
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4073
	struct drm_device *dev = dig_port->base.base.dev;
4074
	struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4075
	u8 buf;
4076
	int count, ret;
4077
	int attempts = 6;
4078
 
4079
	ret = intel_dp_sink_crc_start(intel_dp);
4080
	if (ret)
4081
		return ret;
4082
 
5354 serge 4083
	do {
6084 serge 4084
		intel_wait_for_vblank(dev, intel_crtc->pipe);
4085
 
5354 serge 4086
		if (drm_dp_dpcd_readb(&intel_dp->aux,
6084 serge 4087
				      DP_TEST_SINK_MISC, &buf) < 0) {
4088
			ret = -EIO;
4089
			goto stop;
4090
		}
4091
		count = buf & DP_TEST_COUNT_MASK;
5060 serge 4092
 
6937 serge 4093
	} while (--attempts && count == 0);
6084 serge 4094
 
5354 serge 4095
	if (attempts == 0) {
6084 serge 4096
			DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4097
			ret = -ETIMEDOUT;
4098
			goto stop;
4099
		}
6937 serge 4100
 
4101
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4102
		ret = -EIO;
4103
		goto stop;
5354 serge 4104
	}
4105
 
6084 serge 4106
stop:
4107
	intel_dp_sink_crc_stop(intel_dp);
4108
	return ret;
5060 serge 4109
}
4110
 
4111
static bool
2342 Serge 4112
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4113
{
5060 serge 4114
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
6084 serge 4115
				       DP_DEVICE_SERVICE_IRQ_VECTOR,
5060 serge 4116
				       sink_irq_vector, 1) == 1;
4117
}
4118
 
4119
static bool
4120
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4121
{
2342 Serge 4122
	int ret;
4123
 
5060 serge 4124
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4125
					     DP_SINK_COUNT_ESI,
4126
					     sink_irq_vector, 14);
4127
	if (ret != 14)
2342 Serge 4128
		return false;
4129
 
4130
	return true;
4131
}
4132
 
6084 serge 4133
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
2342 Serge 4134
{
6084 serge 4135
	uint8_t test_result = DP_TEST_ACK;
4136
	return test_result;
2342 Serge 4137
}
4138
 
6084 serge 4139
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4140
{
4141
	uint8_t test_result = DP_TEST_NAK;
4142
	return test_result;
4143
}
4144
 
4145
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4146
{
4147
	uint8_t test_result = DP_TEST_NAK;
4148
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4149
	struct drm_connector *connector = &intel_connector->base;
4150
 
4151
	if (intel_connector->detect_edid == NULL ||
4152
	    connector->edid_corrupt ||
4153
	    intel_dp->aux.i2c_defer_count > 6) {
4154
		/* Check EDID read for NACKs, DEFERs and corruption
4155
		 * (DP CTS 1.2 Core r1.1)
4156
		 *    4.2.2.4 : Failed EDID read, I2C_NAK
4157
		 *    4.2.2.5 : Failed EDID read, I2C_DEFER
4158
		 *    4.2.2.6 : EDID corruption detected
4159
		 * Use failsafe mode for all cases
4160
		 */
4161
		if (intel_dp->aux.i2c_nack_count > 0 ||
4162
			intel_dp->aux.i2c_defer_count > 0)
4163
			DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4164
				      intel_dp->aux.i2c_nack_count,
4165
				      intel_dp->aux.i2c_defer_count);
4166
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4167
	} else {
4168
		struct edid *block = intel_connector->detect_edid;
4169
 
4170
		/* We have to write the checksum
4171
		 * of the last block read
4172
		 */
4173
		block += intel_connector->detect_edid->extensions;
4174
 
4175
		if (!drm_dp_dpcd_write(&intel_dp->aux,
4176
					DP_TEST_EDID_CHECKSUM,
4177
					&block->checksum,
4178
					1))
4179
			DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4180
 
4181
		test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4182
		intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4183
	}
4184
 
4185
	/* Set test active flag here so userspace doesn't interrupt things */
4186
	intel_dp->compliance_test_active = 1;
4187
 
4188
	return test_result;
4189
}
4190
 
4191
static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4192
{
4193
	uint8_t test_result = DP_TEST_NAK;
4194
	return test_result;
4195
}
4196
 
4197
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4198
{
4199
	uint8_t response = DP_TEST_NAK;
4200
	uint8_t rxdata = 0;
4201
	int status = 0;
4202
 
4203
	status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4204
	if (status <= 0) {
4205
		DRM_DEBUG_KMS("Could not read test request from sink\n");
4206
		goto update_status;
4207
	}
4208
 
4209
	switch (rxdata) {
4210
	case DP_TEST_LINK_TRAINING:
4211
		DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4212
		intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4213
		response = intel_dp_autotest_link_training(intel_dp);
4214
		break;
4215
	case DP_TEST_LINK_VIDEO_PATTERN:
4216
		DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4217
		intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4218
		response = intel_dp_autotest_video_pattern(intel_dp);
4219
		break;
4220
	case DP_TEST_LINK_EDID_READ:
4221
		DRM_DEBUG_KMS("EDID test requested\n");
4222
		intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4223
		response = intel_dp_autotest_edid(intel_dp);
4224
		break;
4225
	case DP_TEST_LINK_PHY_TEST_PATTERN:
4226
		DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4227
		intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4228
		response = intel_dp_autotest_phy_pattern(intel_dp);
4229
		break;
4230
	default:
4231
		DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4232
		break;
4233
	}
4234
 
4235
update_status:
4236
	status = drm_dp_dpcd_write(&intel_dp->aux,
4237
				   DP_TEST_RESPONSE,
4238
				   &response, 1);
4239
	if (status <= 0)
4240
		DRM_DEBUG_KMS("Could not write test response to sink\n");
4241
}
4242
 
5060 serge 4243
static int
4244
intel_dp_check_mst_status(struct intel_dp *intel_dp)
4245
{
4246
	bool bret;
4247
 
4248
	if (intel_dp->is_mst) {
4249
		u8 esi[16] = { 0 };
4250
		int ret = 0;
4251
		int retry;
4252
		bool handled;
4253
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4254
go_again:
4255
		if (bret == true) {
4256
 
4257
			/* check link status - esi[10] = 0x200c */
6084 serge 4258
			if (intel_dp->active_mst_links &&
4259
			    !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5060 serge 4260
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4261
				intel_dp_start_link_train(intel_dp);
4262
				intel_dp_stop_link_train(intel_dp);
4263
			}
4264
 
6084 serge 4265
			DRM_DEBUG_KMS("got esi %3ph\n", esi);
5060 serge 4266
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4267
 
4268
			if (handled) {
4269
				for (retry = 0; retry < 3; retry++) {
4270
					int wret;
4271
					wret = drm_dp_dpcd_write(&intel_dp->aux,
4272
								 DP_SINK_COUNT_ESI+1,
4273
								 &esi[1], 3);
4274
					if (wret == 3) {
4275
						break;
4276
					}
4277
				}
4278
 
4279
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4280
				if (bret == true) {
6084 serge 4281
					DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
5060 serge 4282
					goto go_again;
4283
				}
4284
			} else
4285
				ret = 0;
4286
 
4287
			return ret;
4288
		} else {
4289
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4290
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4291
			intel_dp->is_mst = false;
4292
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4293
			/* send a hotplug event */
4294
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4295
		}
4296
	}
4297
	return -EINVAL;
4298
}
4299
 
2330 Serge 4300
/*
4301
 * According to DP spec
4302
 * 5.1.2:
4303
 *  1. Read DPCD
4304
 *  2. Configure link according to Receiver Capabilities
4305
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4306
 *  4. Check link status on receipt of hot-plug interrupt
4307
 */
6084 serge 4308
static void
2330 Serge 4309
intel_dp_check_link_status(struct intel_dp *intel_dp)
4310
{
5060 serge 4311
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3243 Serge 4312
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2342 Serge 4313
	u8 sink_irq_vector;
4314
	u8 link_status[DP_LINK_STATUS_SIZE];
4315
 
5060 serge 4316
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4317
 
6937 serge 4318
	/*
4319
	 * Clearing compliance test variables to allow capturing
4320
	 * of values for next automated test request.
4321
	 */
4322
	intel_dp->compliance_test_active = 0;
4323
	intel_dp->compliance_test_type = 0;
4324
	intel_dp->compliance_test_data = 0;
4325
 
6084 serge 4326
	if (!intel_encoder->base.crtc)
2330 Serge 4327
		return;
4328
 
5060 serge 4329
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4330
		return;
4331
 
2330 Serge 4332
	/* Try to read receiver status if the link appears to be up */
2342 Serge 4333
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 4334
		return;
4335
	}
4336
 
4337
	/* Now read the DPCD to see if it's actually running */
4338
	if (!intel_dp_get_dpcd(intel_dp)) {
4339
		return;
4340
	}
4341
 
2342 Serge 4342
	/* Try to read the source of the interrupt */
4343
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4344
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4345
		/* Clear interrupt source */
5060 serge 4346
		drm_dp_dpcd_writeb(&intel_dp->aux,
6084 serge 4347
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4348
				   sink_irq_vector);
2342 Serge 4349
 
4350
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
6084 serge 4351
			DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
2342 Serge 4352
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4353
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4354
	}
4355
 
6937 serge 4356
	/* if link training is requested we should perform it always */
4357
	if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4358
		(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
2330 Serge 4359
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
5060 serge 4360
			      intel_encoder->base.name);
2330 Serge 4361
		intel_dp_start_link_train(intel_dp);
3746 Serge 4362
		intel_dp_stop_link_train(intel_dp);
2330 Serge 4363
	}
4364
}
4365
 
3031 serge 4366
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 4367
static enum drm_connector_status
4368
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4369
{
3031 serge 4370
	uint8_t *dpcd = intel_dp->dpcd;
4371
	uint8_t type;
4372
 
4373
	if (!intel_dp_get_dpcd(intel_dp))
4374
		return connector_status_disconnected;
4375
 
4376
	/* if there's no downstream port, we're done */
4377
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 4378
		return connector_status_connected;
3031 serge 4379
 
4380
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4560 Serge 4381
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4382
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3031 serge 4383
		uint8_t reg;
5060 serge 4384
 
4385
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4386
					    ®, 1) < 0)
3031 serge 4387
			return connector_status_unknown;
5060 serge 4388
 
3031 serge 4389
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4390
					      : connector_status_disconnected;
4391
	}
4392
 
4393
	/* If no HPD, poke DDC gently */
5060 serge 4394
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3031 serge 4395
		return connector_status_connected;
4396
 
4397
	/* Well we tried, say unknown for unreliable port types */
4560 Serge 4398
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6084 serge 4399
		type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4560 Serge 4400
		if (type == DP_DS_PORT_TYPE_VGA ||
4401
		    type == DP_DS_PORT_TYPE_NON_EDID)
6084 serge 4402
			return connector_status_unknown;
4560 Serge 4403
	} else {
4404
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4405
			DP_DWN_STRM_PORT_TYPE_MASK;
4406
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4407
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
5060 serge 4408
			return connector_status_unknown;
4560 Serge 4409
	}
3031 serge 4410
 
4411
	/* Anything else is out of spec, warn and ignore */
4412
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 4413
	return connector_status_disconnected;
4414
}
4415
 
4416
static enum drm_connector_status
5354 serge 4417
edp_detect(struct intel_dp *intel_dp)
2330 Serge 4418
{
3243 Serge 4419
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 4420
	enum drm_connector_status status;
4421
 
6084 serge 4422
	status = intel_panel_detect(dev);
4423
	if (status == connector_status_unknown)
4424
		status = connector_status_connected;
5354 serge 4425
 
6084 serge 4426
	return status;
5354 serge 4427
}
2330 Serge 4428
 
6084 serge 4429
static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4430
				       struct intel_digital_port *port)
5354 serge 4431
{
6084 serge 4432
	u32 bit;
5354 serge 4433
 
6084 serge 4434
	switch (port->port) {
4435
	case PORT_A:
4436
		return true;
4437
	case PORT_B:
4438
		bit = SDE_PORTB_HOTPLUG;
4439
		break;
4440
	case PORT_C:
4441
		bit = SDE_PORTC_HOTPLUG;
4442
		break;
4443
	case PORT_D:
4444
		bit = SDE_PORTD_HOTPLUG;
4445
		break;
4446
	default:
4447
		MISSING_CASE(port->port);
4448
		return false;
4449
	}
3480 Serge 4450
 
6084 serge 4451
	return I915_READ(SDEISR) & bit;
2330 Serge 4452
}
4453
 
6084 serge 4454
static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4455
				       struct intel_digital_port *port)
2330 Serge 4456
{
6084 serge 4457
	u32 bit;
2330 Serge 4458
 
6084 serge 4459
	switch (port->port) {
4460
	case PORT_A:
4461
		return true;
3480 Serge 4462
	case PORT_B:
6084 serge 4463
		bit = SDE_PORTB_HOTPLUG_CPT;
2330 Serge 4464
		break;
3480 Serge 4465
	case PORT_C:
6084 serge 4466
		bit = SDE_PORTC_HOTPLUG_CPT;
2330 Serge 4467
		break;
3480 Serge 4468
	case PORT_D:
6084 serge 4469
		bit = SDE_PORTD_HOTPLUG_CPT;
2330 Serge 4470
		break;
6084 serge 4471
	case PORT_E:
4472
		bit = SDE_PORTE_HOTPLUG_SPT;
4473
		break;
2330 Serge 4474
	default:
6084 serge 4475
		MISSING_CASE(port->port);
4476
		return false;
2330 Serge 4477
	}
6084 serge 4478
 
4479
	return I915_READ(SDEISR) & bit;
4480
}
4481
 
4482
static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4483
				       struct intel_digital_port *port)
4484
{
4485
	u32 bit;
4486
 
4487
	switch (port->port) {
4488
	case PORT_B:
4489
		bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4490
		break;
4491
	case PORT_C:
4492
		bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4493
		break;
4494
	case PORT_D:
4495
		bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4496
		break;
4497
	default:
4498
		MISSING_CASE(port->port);
4499
		return false;
4560 Serge 4500
	}
2330 Serge 4501
 
6084 serge 4502
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
5097 serge 4503
}
4504
 
6660 serge 4505
static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
6084 serge 4506
				       struct intel_digital_port *port)
4507
{
4508
	u32 bit;
4509
 
4510
	switch (port->port) {
4511
	case PORT_B:
6660 serge 4512
		bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4513
		break;
4514
	case PORT_C:
6660 serge 4515
		bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4516
		break;
4517
	case PORT_D:
6660 serge 4518
		bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6084 serge 4519
		break;
4520
	default:
4521
		MISSING_CASE(port->port);
4522
		return false;
4523
	}
4524
 
4525
	return I915_READ(PORT_HOTPLUG_STAT) & bit;
4526
}
4527
 
4528
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4529
				       struct intel_digital_port *intel_dig_port)
4530
{
4531
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4532
	enum port port;
6296 serge 4533
	u32 bit;
6084 serge 4534
 
6296 serge 4535
	intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4536
	switch (port) {
4537
	case PORT_A:
4538
		bit = BXT_DE_PORT_HP_DDIA;
4539
		break;
4540
	case PORT_B:
4541
		bit = BXT_DE_PORT_HP_DDIB;
4542
		break;
4543
	case PORT_C:
4544
		bit = BXT_DE_PORT_HP_DDIC;
4545
		break;
4546
	default:
4547
		MISSING_CASE(port);
4548
		return false;
4549
	}
6084 serge 4550
 
4551
	return I915_READ(GEN8_DE_PORT_ISR) & bit;
4552
}
4553
 
4554
/*
4555
 * intel_digital_port_connected - is the specified port connected?
4556
 * @dev_priv: i915 private structure
4557
 * @port: the port to test
4558
 *
4559
 * Return %true if @port is connected, %false otherwise.
4560
 */
6937 serge 4561
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
6084 serge 4562
					 struct intel_digital_port *port)
4563
{
4564
	if (HAS_PCH_IBX(dev_priv))
4565
		return ibx_digital_port_connected(dev_priv, port);
4566
	if (HAS_PCH_SPLIT(dev_priv))
4567
		return cpt_digital_port_connected(dev_priv, port);
4568
	else if (IS_BROXTON(dev_priv))
4569
		return bxt_digital_port_connected(dev_priv, port);
6660 serge 4570
	else if (IS_GM45(dev_priv))
4571
		return gm45_digital_port_connected(dev_priv, port);
6084 serge 4572
	else
4573
		return g4x_digital_port_connected(dev_priv, port);
4574
}
4575
 
2342 Serge 4576
static struct edid *
5354 serge 4577
intel_dp_get_edid(struct intel_dp *intel_dp)
2342 Serge 4578
{
5354 serge 4579
	struct intel_connector *intel_connector = intel_dp->attached_connector;
3243 Serge 4580
 
4581
	/* use cached edid if we have one */
4582
	if (intel_connector->edid) {
4583
		/* invalid edid */
4584
		if (IS_ERR(intel_connector->edid))
3031 serge 4585
			return NULL;
4586
 
4560 Serge 4587
		return drm_edid_duplicate(intel_connector->edid);
5354 serge 4588
	} else
4589
		return drm_get_edid(&intel_connector->base,
4590
				    &intel_dp->aux.ddc);
4591
}
3031 serge 4592
 
5354 serge 4593
static void
4594
intel_dp_set_edid(struct intel_dp *intel_dp)
4595
{
4596
	struct intel_connector *intel_connector = intel_dp->attached_connector;
4597
	struct edid *edid;
4598
 
4599
	edid = intel_dp_get_edid(intel_dp);
4600
	intel_connector->detect_edid = edid;
4601
 
4602
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4603
		intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4604
	else
4605
		intel_dp->has_audio = drm_detect_monitor_audio(edid);
2342 Serge 4606
}
4607
 
5354 serge 4608
static void
4609
intel_dp_unset_edid(struct intel_dp *intel_dp)
2342 Serge 4610
{
5354 serge 4611
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2342 Serge 4612
 
5354 serge 4613
	kfree(intel_connector->detect_edid);
4614
	intel_connector->detect_edid = NULL;
3243 Serge 4615
 
5354 serge 4616
	intel_dp->has_audio = false;
4617
}
3031 serge 4618
 
2330 Serge 4619
static enum drm_connector_status
4620
intel_dp_detect(struct drm_connector *connector, bool force)
4621
{
4622
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 4623
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4624
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4625
	struct drm_device *dev = connector->dev;
2330 Serge 4626
	enum drm_connector_status status;
5060 serge 4627
	enum intel_display_power_domain power_domain;
4628
	bool ret;
6084 serge 4629
	u8 sink_irq_vector;
2330 Serge 4630
 
4104 Serge 4631
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5060 serge 4632
		      connector->base.id, connector->name);
5354 serge 4633
	intel_dp_unset_edid(intel_dp);
4104 Serge 4634
 
5060 serge 4635
	if (intel_dp->is_mst) {
4636
		/* MST devices are disconnected from a monitor POV */
4637
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5354 serge 4639
		return connector_status_disconnected;
5060 serge 4640
	}
4641
 
6084 serge 4642
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4643
	intel_display_power_get(to_i915(dev), power_domain);
2330 Serge 4644
 
5354 serge 4645
	/* Can't disconnect eDP, but you can close the lid... */
4646
	if (is_edp(intel_dp))
4647
		status = edp_detect(intel_dp);
6937 serge 4648
	else if (intel_digital_port_connected(to_i915(dev),
4649
					      dp_to_dig_port(intel_dp)))
4650
		status = intel_dp_detect_dpcd(intel_dp);
2330 Serge 4651
	else
6937 serge 4652
		status = connector_status_disconnected;
4653
 
4654
	if (status != connector_status_connected) {
4655
		intel_dp->compliance_test_active = 0;
4656
		intel_dp->compliance_test_type = 0;
4657
		intel_dp->compliance_test_data = 0;
4658
 
4560 Serge 4659
		goto out;
6937 serge 4660
	}
3031 serge 4661
 
4662
	intel_dp_probe_oui(intel_dp);
4663
 
5060 serge 4664
	ret = intel_dp_probe_mst(intel_dp);
4665
	if (ret) {
4666
		/* if we are in MST mode then this connector
4667
		   won't appear connected or have anything with EDID on it */
4668
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
4669
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4670
		status = connector_status_disconnected;
4671
		goto out;
4672
	}
4673
 
6937 serge 4674
	/*
4675
	 * Clearing NACK and defer counts to get their exact values
4676
	 * while reading EDID which are required by Compliance tests
4677
	 * 4.2.2.4 and 4.2.2.5
4678
	 */
4679
	intel_dp->aux.i2c_nack_count = 0;
4680
	intel_dp->aux.i2c_defer_count = 0;
4681
 
5354 serge 4682
	intel_dp_set_edid(intel_dp);
3243 Serge 4683
 
4684
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4685
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4560 Serge 4686
	status = connector_status_connected;
4687
 
6084 serge 4688
	/* Try to read the source of the interrupt */
4689
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4690
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4691
		/* Clear interrupt source */
4692
		drm_dp_dpcd_writeb(&intel_dp->aux,
4693
				   DP_DEVICE_SERVICE_IRQ_VECTOR,
4694
				   sink_irq_vector);
4695
 
4696
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4697
			intel_dp_handle_test_request(intel_dp);
4698
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4699
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4700
	}
4701
 
4560 Serge 4702
out:
6084 serge 4703
	intel_display_power_put(to_i915(dev), power_domain);
4560 Serge 4704
	return status;
2330 Serge 4705
}
4706
 
5354 serge 4707
static void
4708
intel_dp_force(struct drm_connector *connector)
2330 Serge 4709
{
4710
	struct intel_dp *intel_dp = intel_attached_dp(connector);
5354 serge 4711
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
6084 serge 4712
	struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5060 serge 4713
	enum intel_display_power_domain power_domain;
2330 Serge 4714
 
5354 serge 4715
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4716
		      connector->base.id, connector->name);
4717
	intel_dp_unset_edid(intel_dp);
2330 Serge 4718
 
5354 serge 4719
	if (connector->status != connector_status_connected)
4720
		return;
5060 serge 4721
 
6084 serge 4722
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
4723
	intel_display_power_get(dev_priv, power_domain);
5354 serge 4724
 
4725
	intel_dp_set_edid(intel_dp);
4726
 
6084 serge 4727
	intel_display_power_put(dev_priv, power_domain);
5354 serge 4728
 
4729
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4730
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4731
}
4732
 
4733
static int intel_dp_get_modes(struct drm_connector *connector)
4734
{
4735
	struct intel_connector *intel_connector = to_intel_connector(connector);
4736
	struct edid *edid;
4737
 
4738
	edid = intel_connector->detect_edid;
4739
	if (edid) {
4740
		int ret = intel_connector_update_modes(connector, edid);
6084 serge 4741
		if (ret)
4742
			return ret;
5354 serge 4743
	}
2330 Serge 4744
 
3243 Serge 4745
	/* if eDP has no EDID, fall back to fixed mode */
5354 serge 4746
	if (is_edp(intel_attached_dp(connector)) &&
4747
	    intel_connector->panel.fixed_mode) {
6084 serge 4748
		struct drm_display_mode *mode;
5354 serge 4749
 
4750
		mode = drm_mode_duplicate(connector->dev,
3243 Serge 4751
					  intel_connector->panel.fixed_mode);
4752
		if (mode) {
2330 Serge 4753
			drm_mode_probed_add(connector, mode);
4754
			return 1;
4755
		}
4756
	}
5354 serge 4757
 
2330 Serge 4758
	return 0;
4759
}
4760
 
3243 Serge 4761
static bool
4762
intel_dp_detect_audio(struct drm_connector *connector)
4763
{
5354 serge 4764
	bool has_audio = false;
3243 Serge 4765
	struct edid *edid;
2330 Serge 4766
 
5354 serge 4767
	edid = to_intel_connector(connector)->detect_edid;
4768
	if (edid)
3243 Serge 4769
		has_audio = drm_detect_monitor_audio(edid);
2330 Serge 4770
 
3243 Serge 4771
	return has_audio;
4772
}
2330 Serge 4773
 
4774
static int
4775
intel_dp_set_property(struct drm_connector *connector,
4776
		      struct drm_property *property,
4777
		      uint64_t val)
4778
{
4779
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3243 Serge 4780
	struct intel_connector *intel_connector = to_intel_connector(connector);
4781
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4782
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 4783
	int ret;
4784
 
3243 Serge 4785
	ret = drm_object_property_set_value(&connector->base, property, val);
2330 Serge 4786
	if (ret)
4787
		return ret;
3480 Serge 4788
 
2330 Serge 4789
	if (property == dev_priv->force_audio_property) {
4790
		int i = val;
4791
		bool has_audio;
4792
 
4793
		if (i == intel_dp->force_audio)
4794
			return 0;
4795
 
4796
		intel_dp->force_audio = i;
4797
 
3031 serge 4798
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 4799
			has_audio = intel_dp_detect_audio(connector);
4800
		else
3031 serge 4801
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 4802
 
4803
		if (has_audio == intel_dp->has_audio)
4804
			return 0;
4805
 
4806
		intel_dp->has_audio = has_audio;
4807
		goto done;
4808
	}
4809
 
4810
	if (property == dev_priv->broadcast_rgb_property) {
3746 Serge 4811
		bool old_auto = intel_dp->color_range_auto;
6084 serge 4812
		bool old_range = intel_dp->limited_color_range;
3746 Serge 4813
 
3480 Serge 4814
		switch (val) {
4815
		case INTEL_BROADCAST_RGB_AUTO:
4816
			intel_dp->color_range_auto = true;
4817
			break;
4818
		case INTEL_BROADCAST_RGB_FULL:
4819
			intel_dp->color_range_auto = false;
6084 serge 4820
			intel_dp->limited_color_range = false;
3480 Serge 4821
			break;
4822
		case INTEL_BROADCAST_RGB_LIMITED:
4823
			intel_dp->color_range_auto = false;
6084 serge 4824
			intel_dp->limited_color_range = true;
3480 Serge 4825
			break;
4826
		default:
4827
			return -EINVAL;
4828
		}
3746 Serge 4829
 
4830
		if (old_auto == intel_dp->color_range_auto &&
6084 serge 4831
		    old_range == intel_dp->limited_color_range)
3746 Serge 4832
			return 0;
4833
 
6084 serge 4834
		goto done;
2330 Serge 4835
	}
4836
 
3243 Serge 4837
	if (is_edp(intel_dp) &&
4838
	    property == connector->dev->mode_config.scaling_mode_property) {
4839
		if (val == DRM_MODE_SCALE_NONE) {
4840
			DRM_DEBUG_KMS("no scaling not supported\n");
4841
			return -EINVAL;
4842
		}
4843
 
4844
		if (intel_connector->panel.fitting_mode == val) {
4845
			/* the eDP scaling property is not changed */
4846
			return 0;
4847
		}
4848
		intel_connector->panel.fitting_mode = val;
4849
 
4850
		goto done;
4851
	}
4852
 
2330 Serge 4853
	return -EINVAL;
4854
 
4855
done:
3480 Serge 4856
	if (intel_encoder->base.crtc)
4857
		intel_crtc_restore_mode(intel_encoder->base.crtc);
2330 Serge 4858
 
4859
	return 0;
4860
}
4861
 
4862
static void
4104 Serge 4863
intel_dp_connector_destroy(struct drm_connector *connector)
2330 Serge 4864
{
3243 Serge 4865
	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 Serge 4866
 
5354 serge 4867
	kfree(intel_connector->detect_edid);
4868
 
3243 Serge 4869
	if (!IS_ERR_OR_NULL(intel_connector->edid))
4870
		kfree(intel_connector->edid);
4871
 
4104 Serge 4872
	/* Can't call is_edp() since the encoder may have been destroyed
4873
	 * already. */
4874
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3243 Serge 4875
		intel_panel_fini(&intel_connector->panel);
2330 Serge 4876
 
4877
	drm_connector_cleanup(connector);
4878
	kfree(connector);
4879
}
4880
 
3243 Serge 4881
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 4882
{
3243 Serge 4883
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4884
	struct intel_dp *intel_dp = &intel_dig_port->dp;
2330 Serge 4885
 
6937 serge 4886
	intel_dp_aux_fini(intel_dp);
5060 serge 4887
	intel_dp_mst_encoder_cleanup(intel_dig_port);
2342 Serge 4888
	if (is_edp(intel_dp)) {
6937 serge 4889
//		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 4890
		/*
4891
		 * vdd might still be enabled do to the delayed vdd off.
4892
		 * Make sure vdd is actually turned off here.
4893
		 */
4894
		pps_lock(intel_dp);
5060 serge 4895
		edp_panel_vdd_off_sync(intel_dp);
5354 serge 4896
		pps_unlock(intel_dp);
4897
 
6937 serge 4898
		if (intel_dp->edp_notifier.notifier_call) {
4899
			intel_dp->edp_notifier.notifier_call = NULL;
4900
		}
2342 Serge 4901
	}
6084 serge 4902
	drm_encoder_cleanup(encoder);
3243 Serge 4903
	kfree(intel_dig_port);
2330 Serge 4904
}
4905
 
6660 serge 4906
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5060 serge 4907
{
4908
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4909
 
4910
	if (!is_edp(intel_dp))
4911
		return;
4912
 
5354 serge 4913
	/*
4914
	 * vdd might still be enabled do to the delayed vdd off.
4915
	 * Make sure vdd is actually turned off here.
4916
	 */
6937 serge 4917
//	cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5354 serge 4918
	pps_lock(intel_dp);
5060 serge 4919
	edp_panel_vdd_off_sync(intel_dp);
5354 serge 4920
	pps_unlock(intel_dp);
5060 serge 4921
}
4922
 
5354 serge 4923
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4924
{
4925
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4926
	struct drm_device *dev = intel_dig_port->base.base.dev;
4927
	struct drm_i915_private *dev_priv = dev->dev_private;
4928
	enum intel_display_power_domain power_domain;
4929
 
4930
	lockdep_assert_held(&dev_priv->pps_mutex);
4931
 
4932
	if (!edp_have_panel_vdd(intel_dp))
4933
		return;
4934
 
4935
	/*
4936
	 * The VDD bit needs a power domain reference, so if the bit is
4937
	 * already enabled when we boot or resume, grab this reference and
4938
	 * schedule a vdd off, so we don't hold on to the reference
4939
	 * indefinitely.
4940
	 */
4941
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
6084 serge 4942
	power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5354 serge 4943
	intel_display_power_get(dev_priv, power_domain);
4944
 
4945
	edp_panel_vdd_schedule_off(intel_dp);
4946
}
4947
 
6660 serge 4948
void intel_dp_encoder_reset(struct drm_encoder *encoder)
5060 serge 4949
{
6937 serge 4950
	struct intel_dp *intel_dp;
5354 serge 4951
 
4952
	if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4953
		return;
4954
 
6937 serge 4955
	intel_dp = enc_to_intel_dp(encoder);
4956
 
5354 serge 4957
	pps_lock(intel_dp);
4958
 
4959
	/*
4960
	 * Read out the current power sequencer assignment,
4961
	 * in case the BIOS did something with it.
4962
	 */
6937 serge 4963
	if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
5354 serge 4964
		vlv_initial_power_sequencer_setup(intel_dp);
4965
 
4966
	intel_edp_panel_vdd_sanitize(intel_dp);
4967
 
4968
	pps_unlock(intel_dp);
5060 serge 4969
}
4970
 
2330 Serge 4971
static const struct drm_connector_funcs intel_dp_connector_funcs = {
6084 serge 4972
	.dpms = drm_atomic_helper_connector_dpms,
2330 Serge 4973
	.detect = intel_dp_detect,
5354 serge 4974
	.force = intel_dp_force,
2330 Serge 4975
	.fill_modes = drm_helper_probe_single_connector_modes,
4976
	.set_property = intel_dp_set_property,
6084 serge 4977
	.atomic_get_property = intel_connector_atomic_get_property,
4104 Serge 4978
	.destroy = intel_dp_connector_destroy,
6084 serge 4979
	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4980
	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
2330 Serge 4981
};
4982
 
4983
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4984
	.get_modes = intel_dp_get_modes,
4985
	.mode_valid = intel_dp_mode_valid,
4986
	.best_encoder = intel_best_encoder,
4987
};
4988
 
4989
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5060 serge 4990
	.reset = intel_dp_encoder_reset,
2330 Serge 4991
	.destroy = intel_dp_encoder_destroy,
4992
};
4993
 
6084 serge 4994
enum irqreturn
5060 serge 4995
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4996
{
4997
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4998
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4999
	struct drm_device *dev = intel_dig_port->base.base.dev;
5000
	struct drm_i915_private *dev_priv = dev->dev_private;
5001
	enum intel_display_power_domain power_domain;
6084 serge 5002
	enum irqreturn ret = IRQ_NONE;
5060 serge 5003
 
6084 serge 5004
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5005
	    intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5060 serge 5006
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5007
 
5354 serge 5008
	if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5009
		/*
5010
		 * vdd off can generate a long pulse on eDP which
5011
		 * would require vdd on to handle it, and thus we
5012
		 * would end up in an endless cycle of
5013
		 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5014
		 */
5015
		DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5016
			      port_name(intel_dig_port->port));
6084 serge 5017
		return IRQ_HANDLED;
5354 serge 5018
	}
5019
 
5020
	DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5021
		      port_name(intel_dig_port->port),
5060 serge 5022
		      long_hpd ? "long" : "short");
5023
 
6084 serge 5024
	power_domain = intel_display_port_aux_power_domain(intel_encoder);
5060 serge 5025
	intel_display_power_get(dev_priv, power_domain);
5026
 
5027
	if (long_hpd) {
6937 serge 5028
		/* indicate that we need to restart link training */
5029
		intel_dp->train_set_valid = false;
5030
 
6084 serge 5031
		if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5060 serge 5032
			goto mst_fail;
5033
 
5034
		if (!intel_dp_get_dpcd(intel_dp)) {
5035
			goto mst_fail;
5036
		}
5037
 
5038
		intel_dp_probe_oui(intel_dp);
5039
 
6084 serge 5040
		if (!intel_dp_probe_mst(intel_dp)) {
5041
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5042
			intel_dp_check_link_status(intel_dp);
5043
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5060 serge 5044
			goto mst_fail;
6084 serge 5045
		}
5060 serge 5046
	} else {
5047
		if (intel_dp->is_mst) {
5048
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5049
				goto mst_fail;
5050
		}
5051
 
5052
		if (!intel_dp->is_mst) {
5053
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5054
			intel_dp_check_link_status(intel_dp);
5055
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
5056
		}
5057
	}
6084 serge 5058
 
5059
	ret = IRQ_HANDLED;
5060
 
5060 serge 5061
	goto put_power;
5062
mst_fail:
5063
	/* if we were in MST mode, and device is not there get out of MST mode */
5064
	if (intel_dp->is_mst) {
5065
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5066
		intel_dp->is_mst = false;
5067
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5068
	}
5069
put_power:
5070
	intel_display_power_put(dev_priv, power_domain);
5071
 
5072
	return ret;
2330 Serge 5073
}
5074
 
6084 serge 5075
/* check the VBT to see whether the eDP is on another port */
4560 Serge 5076
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
2330 Serge 5077
{
5078
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 5079
	union child_device_config *p_child;
2330 Serge 5080
	int i;
4560 Serge 5081
	static const short port_mapping[] = {
6084 serge 5082
		[PORT_B] = DVO_PORT_DPB,
5083
		[PORT_C] = DVO_PORT_DPC,
5084
		[PORT_D] = DVO_PORT_DPD,
5085
		[PORT_E] = DVO_PORT_DPE,
4560 Serge 5086
	};
2330 Serge 5087
 
6084 serge 5088
	/*
5089
	 * eDP not supported on g4x. so bail out early just
5090
	 * for a bit extra safety in case the VBT is bonkers.
5091
	 */
5092
	if (INTEL_INFO(dev)->gen < 5)
5093
		return false;
5094
 
4560 Serge 5095
	if (port == PORT_A)
5096
		return true;
5097
 
4104 Serge 5098
	if (!dev_priv->vbt.child_dev_num)
2330 Serge 5099
		return false;
5100
 
4104 Serge 5101
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5102
		p_child = dev_priv->vbt.child_dev + i;
2330 Serge 5103
 
4560 Serge 5104
		if (p_child->common.dvo_port == port_mapping[port] &&
5105
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5106
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
2330 Serge 5107
			return true;
5108
	}
5109
	return false;
5110
}
5111
 
5060 serge 5112
void
2330 Serge 5113
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5114
{
3243 Serge 5115
	struct intel_connector *intel_connector = to_intel_connector(connector);
5116
 
2330 Serge 5117
	intel_attach_force_audio_property(connector);
5118
	intel_attach_broadcast_rgb_property(connector);
3480 Serge 5119
	intel_dp->color_range_auto = true;
3243 Serge 5120
 
5121
	if (is_edp(intel_dp)) {
5122
		drm_mode_create_scaling_mode_property(connector->dev);
5123
		drm_object_attach_property(
5124
			&connector->base,
5125
			connector->dev->mode_config.scaling_mode_property,
5126
			DRM_MODE_SCALE_ASPECT);
5127
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5128
	}
2330 Serge 5129
}
5130
 
5060 serge 5131
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5132
{
5133
	intel_dp->last_power_cycle = jiffies;
5134
	intel_dp->last_power_on = jiffies;
5135
	intel_dp->last_backlight_off = jiffies;
5136
}
5137
 
3243 Serge 5138
static void
5139
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5354 serge 5140
				    struct intel_dp *intel_dp)
3243 Serge 5141
{
5142
	struct drm_i915_private *dev_priv = dev->dev_private;
5354 serge 5143
	struct edp_power_seq cur, vbt, spec,
5144
		*final = &intel_dp->pps_delays;
6084 serge 5145
	u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
6937 serge 5146
	i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3243 Serge 5147
 
5354 serge 5148
	lockdep_assert_held(&dev_priv->pps_mutex);
5149
 
5150
	/* already initialized? */
5151
	if (final->t11_t12 != 0)
5152
		return;
5153
 
6084 serge 5154
	if (IS_BROXTON(dev)) {
5155
		/*
5156
		 * TODO: BXT has 2 sets of PPS registers.
5157
		 * Correct Register for Broxton need to be identified
5158
		 * using VBT. hardcoding for now
5159
		 */
5160
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5161
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5162
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5163
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 5164
		pp_ctrl_reg = PCH_PP_CONTROL;
3746 Serge 5165
		pp_on_reg = PCH_PP_ON_DELAYS;
5166
		pp_off_reg = PCH_PP_OFF_DELAYS;
5167
		pp_div_reg = PCH_PP_DIVISOR;
5168
	} else {
4560 Serge 5169
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5170
 
5171
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5172
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5173
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5174
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5175
	}
5176
 
3243 Serge 5177
	/* Workaround: Need to write PP_CONTROL with the unlock key as
5178
	 * the very first thing. */
6084 serge 5179
	pp_ctl = ironlake_get_pp_control(intel_dp);
3243 Serge 5180
 
3746 Serge 5181
	pp_on = I915_READ(pp_on_reg);
5182
	pp_off = I915_READ(pp_off_reg);
6084 serge 5183
	if (!IS_BROXTON(dev)) {
5184
		I915_WRITE(pp_ctrl_reg, pp_ctl);
5185
		pp_div = I915_READ(pp_div_reg);
5186
	}
3243 Serge 5187
 
5188
	/* Pull timing values out of registers */
5189
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5190
		PANEL_POWER_UP_DELAY_SHIFT;
5191
 
5192
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5193
		PANEL_LIGHT_ON_DELAY_SHIFT;
5194
 
5195
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5196
		PANEL_LIGHT_OFF_DELAY_SHIFT;
5197
 
5198
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5199
		PANEL_POWER_DOWN_DELAY_SHIFT;
5200
 
6084 serge 5201
	if (IS_BROXTON(dev)) {
5202
		u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5203
			BXT_POWER_CYCLE_DELAY_SHIFT;
5204
		if (tmp > 0)
5205
			cur.t11_t12 = (tmp - 1) * 1000;
5206
		else
5207
			cur.t11_t12 = 0;
5208
	} else {
5209
		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3243 Serge 5210
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
6084 serge 5211
	}
3243 Serge 5212
 
5213
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5214
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5215
 
4104 Serge 5216
	vbt = dev_priv->vbt.edp_pps;
3243 Serge 5217
 
5218
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5219
	 * our hw here, which are all in 100usec. */
5220
	spec.t1_t3 = 210 * 10;
5221
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5222
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5223
	spec.t10 = 500 * 10;
5224
	/* This one is special and actually in units of 100ms, but zero
5225
	 * based in the hw (so we need to add 100 ms). But the sw vbt
5226
	 * table multiplies it with 1000 to make it in units of 100usec,
5227
	 * too. */
5228
	spec.t11_t12 = (510 + 100) * 10;
5229
 
5230
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5231
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5232
 
5233
	/* Use the max of the register settings and vbt. If both are
5234
	 * unset, fall back to the spec limits. */
5354 serge 5235
#define assign_final(field)	final->field = (max(cur.field, vbt.field) == 0 ? \
3243 Serge 5236
				       spec.field : \
5237
				       max(cur.field, vbt.field))
5238
	assign_final(t1_t3);
5239
	assign_final(t8);
5240
	assign_final(t9);
5241
	assign_final(t10);
5242
	assign_final(t11_t12);
5243
#undef assign_final
5244
 
5354 serge 5245
#define get_delay(field)	(DIV_ROUND_UP(final->field, 10))
3243 Serge 5246
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
5247
	intel_dp->backlight_on_delay = get_delay(t8);
5248
	intel_dp->backlight_off_delay = get_delay(t9);
5249
	intel_dp->panel_power_down_delay = get_delay(t10);
5250
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5251
#undef get_delay
5252
 
5253
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5254
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5255
		      intel_dp->panel_power_cycle_delay);
5256
 
5257
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5258
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5259
}
5260
 
5261
static void
5262
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5354 serge 5263
					      struct intel_dp *intel_dp)
3243 Serge 5264
{
5265
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 5266
	u32 pp_on, pp_off, pp_div, port_sel = 0;
5267
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
6937 serge 5268
	i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5354 serge 5269
	enum port port = dp_to_dig_port(intel_dp)->port;
5270
	const struct edp_power_seq *seq = &intel_dp->pps_delays;
3243 Serge 5271
 
5354 serge 5272
	lockdep_assert_held(&dev_priv->pps_mutex);
5273
 
6084 serge 5274
	if (IS_BROXTON(dev)) {
5275
		/*
5276
		 * TODO: BXT has 2 sets of PPS registers.
5277
		 * Correct Register for Broxton need to be identified
5278
		 * using VBT. hardcoding for now
5279
		 */
5280
		pp_ctrl_reg = BXT_PP_CONTROL(0);
5281
		pp_on_reg = BXT_PP_ON_DELAYS(0);
5282
		pp_off_reg = BXT_PP_OFF_DELAYS(0);
5283
 
5284
	} else if (HAS_PCH_SPLIT(dev)) {
3746 Serge 5285
		pp_on_reg = PCH_PP_ON_DELAYS;
5286
		pp_off_reg = PCH_PP_OFF_DELAYS;
5287
		pp_div_reg = PCH_PP_DIVISOR;
5288
	} else {
4560 Serge 5289
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5290
 
5291
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5292
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5293
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 5294
	}
5295
 
5060 serge 5296
	/*
5297
	 * And finally store the new values in the power sequencer. The
5298
	 * backlight delays are set to 1 because we do manual waits on them. For
5299
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5300
	 * we'll end up waiting for the backlight off delay twice: once when we
5301
	 * do the manual sleep, and once when we disable the panel and wait for
5302
	 * the PP_STATUS bit to become zero.
5303
	 */
3243 Serge 5304
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5060 serge 5305
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5306
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3243 Serge 5307
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5308
	/* Compute the divisor for the pp clock, simply match the Bspec
5309
	 * formula. */
6084 serge 5310
	if (IS_BROXTON(dev)) {
5311
		pp_div = I915_READ(pp_ctrl_reg);
5312
		pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5313
		pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5314
				<< BXT_POWER_CYCLE_DELAY_SHIFT);
5315
	} else {
5316
		pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5317
		pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5318
				<< PANEL_POWER_CYCLE_DELAY_SHIFT);
5319
	}
3243 Serge 5320
 
5321
	/* Haswell doesn't have any port selection bits for the panel
5322
	 * power sequencer any more. */
6937 serge 5323
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5354 serge 5324
		port_sel = PANEL_PORT_SELECT_VLV(port);
4104 Serge 5325
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5354 serge 5326
		if (port == PORT_A)
4560 Serge 5327
			port_sel = PANEL_PORT_SELECT_DPA;
3243 Serge 5328
		else
4560 Serge 5329
			port_sel = PANEL_PORT_SELECT_DPD;
3243 Serge 5330
	}
5331
 
3746 Serge 5332
	pp_on |= port_sel;
3243 Serge 5333
 
3746 Serge 5334
	I915_WRITE(pp_on_reg, pp_on);
5335
	I915_WRITE(pp_off_reg, pp_off);
6084 serge 5336
	if (IS_BROXTON(dev))
5337
		I915_WRITE(pp_ctrl_reg, pp_div);
5338
	else
5339
		I915_WRITE(pp_div_reg, pp_div);
3746 Serge 5340
 
3243 Serge 5341
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3746 Serge 5342
		      I915_READ(pp_on_reg),
5343
		      I915_READ(pp_off_reg),
6084 serge 5344
		      IS_BROXTON(dev) ?
5345
		      (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
3746 Serge 5346
		      I915_READ(pp_div_reg));
3243 Serge 5347
}
5348
 
6084 serge 5349
/**
5350
 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5351
 * @dev: DRM device
5352
 * @refresh_rate: RR to be programmed
5353
 *
5354
 * This function gets called when refresh rate (RR) has to be changed from
5355
 * one frequency to another. Switches can be between high and low RR
5356
 * supported by the panel or to any other RR based on media playback (in
5357
 * this case, RR value needs to be passed from user space).
5358
 *
5359
 * The caller of this function needs to take a lock on dev_priv->drrs.
5360
 */
5361
static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5060 serge 5362
{
5363
	struct drm_i915_private *dev_priv = dev->dev_private;
5364
	struct intel_encoder *encoder;
6084 serge 5365
	struct intel_digital_port *dig_port = NULL;
5366
	struct intel_dp *intel_dp = dev_priv->drrs.dp;
5367
	struct intel_crtc_state *config = NULL;
5060 serge 5368
	struct intel_crtc *intel_crtc = NULL;
6084 serge 5369
	enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5060 serge 5370
 
5371
	if (refresh_rate <= 0) {
5372
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5373
		return;
5374
	}
5375
 
6084 serge 5376
	if (intel_dp == NULL) {
5377
		DRM_DEBUG_KMS("DRRS not supported.\n");
5060 serge 5378
		return;
5379
	}
5380
 
5381
	/*
6084 serge 5382
	 * FIXME: This needs proper synchronization with psr state for some
5383
	 * platforms that cannot have PSR and DRRS enabled at the same time.
5060 serge 5384
	 */
5385
 
6084 serge 5386
	dig_port = dp_to_dig_port(intel_dp);
5387
	encoder = &dig_port->base;
5388
	intel_crtc = to_intel_crtc(encoder->base.crtc);
5060 serge 5389
 
5390
	if (!intel_crtc) {
5391
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5392
		return;
5393
	}
5394
 
6084 serge 5395
	config = intel_crtc->config;
5060 serge 5396
 
6084 serge 5397
	if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5060 serge 5398
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5399
		return;
5400
	}
5401
 
6084 serge 5402
	if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5403
			refresh_rate)
5060 serge 5404
		index = DRRS_LOW_RR;
5405
 
6084 serge 5406
	if (index == dev_priv->drrs.refresh_rate_type) {
5060 serge 5407
		DRM_DEBUG_KMS(
5408
			"DRRS requested for previously set RR...ignoring\n");
5409
		return;
5410
	}
5411
 
5412
	if (!intel_crtc->active) {
5413
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5414
		return;
5415
	}
5416
 
6084 serge 5417
	if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5418
		switch (index) {
5419
		case DRRS_HIGH_RR:
5420
			intel_dp_set_m_n(intel_crtc, M1_N1);
5421
			break;
5422
		case DRRS_LOW_RR:
5423
			intel_dp_set_m_n(intel_crtc, M2_N2);
5424
			break;
5425
		case DRRS_MAX_RR:
5426
		default:
5427
			DRM_ERROR("Unsupported refreshrate type\n");
5428
		}
5429
	} else if (INTEL_INFO(dev)->gen > 6) {
6937 serge 5430
		i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
6084 serge 5431
		u32 val;
5432
 
5060 serge 5433
		val = I915_READ(reg);
5434
		if (index > DRRS_HIGH_RR) {
6937 serge 5435
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 5436
				val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5437
			else
5438
				val |= PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5439
		} else {
6937 serge 5440
			if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6084 serge 5441
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5442
			else
5443
				val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5060 serge 5444
		}
5445
		I915_WRITE(reg, val);
5446
	}
5447
 
6084 serge 5448
	dev_priv->drrs.refresh_rate_type = index;
5449
 
5450
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5451
}
5452
 
5453
/**
5454
 * intel_edp_drrs_enable - init drrs struct if supported
5455
 * @intel_dp: DP struct
5456
 *
5457
 * Initializes frontbuffer_bits and drrs.dp
5458
 */
5459
void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5460
{
5461
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5462
	struct drm_i915_private *dev_priv = dev->dev_private;
5463
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5464
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5465
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5466
 
5467
	if (!intel_crtc->config->has_drrs) {
5468
		DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5469
		return;
5470
	}
5471
 
5472
	mutex_lock(&dev_priv->drrs.mutex);
5473
	if (WARN_ON(dev_priv->drrs.dp)) {
5474
		DRM_ERROR("DRRS already enabled\n");
5475
		goto unlock;
5476
	}
5477
 
5478
	dev_priv->drrs.busy_frontbuffer_bits = 0;
5479
 
5480
	dev_priv->drrs.dp = intel_dp;
5481
 
5482
unlock:
5483
	mutex_unlock(&dev_priv->drrs.mutex);
5484
}
5485
 
5486
/**
5487
 * intel_edp_drrs_disable - Disable DRRS
5488
 * @intel_dp: DP struct
5489
 *
5490
 */
5491
void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5492
{
5493
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
5494
	struct drm_i915_private *dev_priv = dev->dev_private;
5495
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5496
	struct drm_crtc *crtc = dig_port->base.base.crtc;
5497
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5498
 
5499
	if (!intel_crtc->config->has_drrs)
5500
		return;
5501
 
5502
	mutex_lock(&dev_priv->drrs.mutex);
5503
	if (!dev_priv->drrs.dp) {
5504
		mutex_unlock(&dev_priv->drrs.mutex);
5505
		return;
5506
	}
5507
 
5508
	if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5509
		intel_dp_set_drrs_state(dev_priv->dev,
5510
			intel_dp->attached_connector->panel.
5511
			fixed_mode->vrefresh);
5512
 
5513
	dev_priv->drrs.dp = NULL;
5514
	mutex_unlock(&dev_priv->drrs.mutex);
5515
 
6937 serge 5516
//	cancel_delayed_work_sync(&dev_priv->drrs.work);
6084 serge 5517
}
5518
 
5519
static void intel_edp_drrs_downclock_work(struct work_struct *work)
5520
{
5521
	struct drm_i915_private *dev_priv =
5522
		container_of(work, typeof(*dev_priv), drrs.work.work);
5523
	struct intel_dp *intel_dp;
5524
 
5525
	mutex_lock(&dev_priv->drrs.mutex);
5526
 
5527
	intel_dp = dev_priv->drrs.dp;
5528
 
5529
	if (!intel_dp)
5530
		goto unlock;
5531
 
5060 serge 5532
	/*
6084 serge 5533
	 * The delayed work can race with an invalidate hence we need to
5534
	 * recheck.
5060 serge 5535
	 */
5536
 
6084 serge 5537
	if (dev_priv->drrs.busy_frontbuffer_bits)
5538
		goto unlock;
5060 serge 5539
 
6084 serge 5540
	if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5541
		intel_dp_set_drrs_state(dev_priv->dev,
5542
			intel_dp->attached_connector->panel.
5543
			downclock_mode->vrefresh);
5060 serge 5544
 
6084 serge 5545
unlock:
5546
	mutex_unlock(&dev_priv->drrs.mutex);
5547
}
5060 serge 5548
 
6084 serge 5549
/**
5550
 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5551
 * @dev: DRM device
5552
 * @frontbuffer_bits: frontbuffer plane tracking bits
5553
 *
5554
 * This function gets called everytime rendering on the given planes start.
5555
 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5556
 *
5557
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5558
 */
5559
void intel_edp_drrs_invalidate(struct drm_device *dev,
5560
		unsigned frontbuffer_bits)
5561
{
5562
	struct drm_i915_private *dev_priv = dev->dev_private;
5563
	struct drm_crtc *crtc;
5564
	enum pipe pipe;
5565
 
5566
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5567
		return;
5568
 
6937 serge 5569
//	cancel_delayed_work(&dev_priv->drrs.work);
6084 serge 5570
 
5571
	mutex_lock(&dev_priv->drrs.mutex);
5572
	if (!dev_priv->drrs.dp) {
5573
		mutex_unlock(&dev_priv->drrs.mutex);
5574
		return;
5575
	}
5576
 
5577
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5578
	pipe = to_intel_crtc(crtc)->pipe;
5579
 
5580
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5581
	dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5582
 
5583
	/* invalidate means busy screen hence upclock */
5584
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5585
		intel_dp_set_drrs_state(dev_priv->dev,
5586
				dev_priv->drrs.dp->attached_connector->panel.
5587
				fixed_mode->vrefresh);
5588
 
5589
	mutex_unlock(&dev_priv->drrs.mutex);
5060 serge 5590
}
5591
 
6084 serge 5592
/**
5593
 * intel_edp_drrs_flush - Restart Idleness DRRS
5594
 * @dev: DRM device
5595
 * @frontbuffer_bits: frontbuffer plane tracking bits
5596
 *
5597
 * This function gets called every time rendering on the given planes has
5598
 * completed or flip on a crtc is completed. So DRRS should be upclocked
5599
 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5600
 * if no other planes are dirty.
5601
 *
5602
 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5603
 */
5604
void intel_edp_drrs_flush(struct drm_device *dev,
5605
		unsigned frontbuffer_bits)
5606
{
5607
	struct drm_i915_private *dev_priv = dev->dev_private;
5608
	struct drm_crtc *crtc;
5609
	enum pipe pipe;
5610
 
5611
	if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5612
		return;
5613
 
6937 serge 5614
//	cancel_delayed_work(&dev_priv->drrs.work);
6084 serge 5615
 
5616
	mutex_lock(&dev_priv->drrs.mutex);
5617
	if (!dev_priv->drrs.dp) {
5618
		mutex_unlock(&dev_priv->drrs.mutex);
5619
		return;
5620
	}
5621
 
5622
	crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5623
	pipe = to_intel_crtc(crtc)->pipe;
5624
 
5625
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5626
	dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5627
 
5628
	/* flush means busy screen hence upclock */
5629
	if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5630
		intel_dp_set_drrs_state(dev_priv->dev,
5631
				dev_priv->drrs.dp->attached_connector->panel.
5632
				fixed_mode->vrefresh);
5633
 
6320 serge 5634
	/*
5635
	 * flush also means no more activity hence schedule downclock, if all
5636
	 * other fbs are quiescent too
5637
	 */
5638
	if (!dev_priv->drrs.busy_frontbuffer_bits)
5639
		schedule_delayed_work(&dev_priv->drrs.work,
5640
				msecs_to_jiffies(1000));
6084 serge 5641
	mutex_unlock(&dev_priv->drrs.mutex);
5642
}
5643
 
5644
/**
5645
 * DOC: Display Refresh Rate Switching (DRRS)
5646
 *
5647
 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5648
 * which enables swtching between low and high refresh rates,
5649
 * dynamically, based on the usage scenario. This feature is applicable
5650
 * for internal panels.
5651
 *
5652
 * Indication that the panel supports DRRS is given by the panel EDID, which
5653
 * would list multiple refresh rates for one resolution.
5654
 *
5655
 * DRRS is of 2 types - static and seamless.
5656
 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5657
 * (may appear as a blink on screen) and is used in dock-undock scenario.
5658
 * Seamless DRRS involves changing RR without any visual effect to the user
5659
 * and can be used during normal system usage. This is done by programming
5660
 * certain registers.
5661
 *
5662
 * Support for static/seamless DRRS may be indicated in the VBT based on
5663
 * inputs from the panel spec.
5664
 *
5665
 * DRRS saves power by switching to low RR based on usage scenarios.
5666
 *
5667
 * eDP DRRS:-
5668
 *        The implementation is based on frontbuffer tracking implementation.
5669
 * When there is a disturbance on the screen triggered by user activity or a
5670
 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5671
 * When there is no movement on screen, after a timeout of 1 second, a switch
5672
 * to low RR is made.
5673
 *        For integration with frontbuffer tracking code,
5674
 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5675
 *
5676
 * DRRS can be further extended to support other internal panels and also
5677
 * the scenario of video playback wherein RR is set based on the rate
5678
 * requested by userspace.
5679
 */
5680
 
5681
/**
5682
 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5683
 * @intel_connector: eDP connector
5684
 * @fixed_mode: preferred mode of panel
5685
 *
5686
 * This function is  called only once at driver load to initialize basic
5687
 * DRRS stuff.
5688
 *
5689
 * Returns:
5690
 * Downclock mode if panel supports it, else return NULL.
5691
 * DRRS support is determined by the presence of downclock mode (apart
5692
 * from VBT setting).
5693
 */
5060 serge 5694
static struct drm_display_mode *
6084 serge 5695
intel_dp_drrs_init(struct intel_connector *intel_connector,
5696
		struct drm_display_mode *fixed_mode)
5060 serge 5697
{
5698
	struct drm_connector *connector = &intel_connector->base;
6084 serge 5699
	struct drm_device *dev = connector->dev;
5060 serge 5700
	struct drm_i915_private *dev_priv = dev->dev_private;
5701
	struct drm_display_mode *downclock_mode = NULL;
5702
 
6084 serge 5703
	INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5704
	mutex_init(&dev_priv->drrs.mutex);
5705
 
5060 serge 5706
	if (INTEL_INFO(dev)->gen <= 6) {
5707
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5708
		return NULL;
5709
	}
5710
 
5711
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5354 serge 5712
		DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5060 serge 5713
		return NULL;
5714
	}
5715
 
5716
	downclock_mode = intel_find_panel_downclock
5717
					(dev, fixed_mode, connector);
5718
 
5719
	if (!downclock_mode) {
6084 serge 5720
		DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5060 serge 5721
		return NULL;
5722
	}
5723
 
6084 serge 5724
	dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5060 serge 5725
 
6084 serge 5726
	dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5354 serge 5727
	DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5060 serge 5728
	return downclock_mode;
5729
}
5730
 
4104 Serge 5731
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5354 serge 5732
				     struct intel_connector *intel_connector)
4104 Serge 5733
{
5734
	struct drm_connector *connector = &intel_connector->base;
5735
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5060 serge 5736
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5737
	struct drm_device *dev = intel_encoder->base.dev;
4104 Serge 5738
	struct drm_i915_private *dev_priv = dev->dev_private;
5739
	struct drm_display_mode *fixed_mode = NULL;
5060 serge 5740
	struct drm_display_mode *downclock_mode = NULL;
4104 Serge 5741
	bool has_dpcd;
5742
	struct drm_display_mode *scan;
5743
	struct edid *edid;
5354 serge 5744
	enum pipe pipe = INVALID_PIPE;
4104 Serge 5745
 
5746
	if (!is_edp(intel_dp))
5747
		return true;
5748
 
5354 serge 5749
	pps_lock(intel_dp);
5750
	intel_edp_panel_vdd_sanitize(intel_dp);
5751
	pps_unlock(intel_dp);
4104 Serge 5752
 
5753
	/* Cache DPCD and EDID for edp. */
5754
	has_dpcd = intel_dp_get_dpcd(intel_dp);
5755
 
5756
	if (has_dpcd) {
5757
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5758
			dev_priv->no_aux_handshake =
5759
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5760
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5761
	} else {
5762
		/* if this fails, presume the device is a ghost */
5763
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
5764
		return false;
5765
	}
5766
 
5767
	/* We now know it's not a ghost, init power sequence regs. */
5354 serge 5768
	pps_lock(intel_dp);
5769
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5770
	pps_unlock(intel_dp);
4104 Serge 5771
 
5060 serge 5772
	mutex_lock(&dev->mode_config.mutex);
5773
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4104 Serge 5774
	if (edid) {
5775
		if (drm_add_edid_modes(connector, edid)) {
5776
			drm_mode_connector_update_edid_property(connector,
5777
								edid);
5778
			drm_edid_to_eld(connector, edid);
5779
		} else {
5780
			kfree(edid);
5781
			edid = ERR_PTR(-EINVAL);
5782
		}
5783
	} else {
5784
		edid = ERR_PTR(-ENOENT);
5785
	}
5786
	intel_connector->edid = edid;
5787
 
5788
	/* prefer fixed mode from EDID if available */
5789
	list_for_each_entry(scan, &connector->probed_modes, head) {
5790
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5791
			fixed_mode = drm_mode_duplicate(dev, scan);
5060 serge 5792
			downclock_mode = intel_dp_drrs_init(
5793
						intel_connector, fixed_mode);
4104 Serge 5794
			break;
5795
		}
5796
	}
5797
 
5798
	/* fallback to VBT if available for eDP */
5799
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5800
		fixed_mode = drm_mode_duplicate(dev,
5801
					dev_priv->vbt.lfp_lvds_vbt_mode);
5802
		if (fixed_mode)
5803
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5804
	}
5060 serge 5805
	mutex_unlock(&dev->mode_config.mutex);
4104 Serge 5806
 
6937 serge 5807
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5808
//		intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5809
//		register_reboot_notifier(&intel_dp->edp_notifier);
5354 serge 5810
 
5811
		/*
5812
		 * Figure out the current pipe for the initial backlight setup.
5813
		 * If the current pipe isn't valid, try the PPS pipe, and if that
5814
		 * fails just assume pipe A.
5815
		 */
5816
		if (IS_CHERRYVIEW(dev))
5817
			pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5818
		else
5819
			pipe = PORT_TO_PIPE(intel_dp->DP);
5820
 
5821
		if (pipe != PIPE_A && pipe != PIPE_B)
5822
			pipe = intel_dp->pps_pipe;
5823
 
5824
		if (pipe != PIPE_A && pipe != PIPE_B)
5825
			pipe = PIPE_A;
5826
 
5827
		DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5828
			      pipe_name(pipe));
5829
	}
5830
 
5060 serge 5831
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6084 serge 5832
	intel_connector->panel.backlight.power = intel_edp_backlight_power;
5354 serge 5833
	intel_panel_setup_backlight(connector, pipe);
4104 Serge 5834
 
5835
	return true;
5836
}
5837
 
5838
bool
3243 Serge 5839
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5840
			struct intel_connector *intel_connector)
2330 Serge 5841
{
3243 Serge 5842
	struct drm_connector *connector = &intel_connector->base;
5843
	struct intel_dp *intel_dp = &intel_dig_port->dp;
5844
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
5845
	struct drm_device *dev = intel_encoder->base.dev;
2330 Serge 5846
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 5847
	enum port port = intel_dig_port->port;
6937 serge 5848
	int type, ret;
2330 Serge 5849
 
5354 serge 5850
	intel_dp->pps_pipe = INVALID_PIPE;
5851
 
5060 serge 5852
	/* intel_dp vfuncs */
5354 serge 5853
	if (INTEL_INFO(dev)->gen >= 9)
5854
		intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6937 serge 5855
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5060 serge 5856
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5857
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859
	else if (HAS_PCH_SPLIT(dev))
5860
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5861
	else
5862
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5863
 
5354 serge 5864
	if (INTEL_INFO(dev)->gen >= 9)
5865
		intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5866
	else
6084 serge 5867
		intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5060 serge 5868
 
6937 serge 5869
	if (HAS_DDI(dev))
5870
		intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5871
 
3031 serge 5872
	/* Preserve the current hw state. */
5873
	intel_dp->DP = I915_READ(intel_dp->output_reg);
3243 Serge 5874
	intel_dp->attached_connector = intel_connector;
2330 Serge 5875
 
4560 Serge 5876
	if (intel_dp_is_edp(dev, port))
5877
		type = DRM_MODE_CONNECTOR_eDP;
5878
	else
6084 serge 5879
		type = DRM_MODE_CONNECTOR_DisplayPort;
2330 Serge 5880
 
4104 Serge 5881
	/*
5882
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883
	 * for DP the encoder type can be set by the caller to
5884
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5885
	 */
5886
	if (type == DRM_MODE_CONNECTOR_eDP)
5887
		intel_encoder->type = INTEL_OUTPUT_EDP;
5888
 
5354 serge 5889
	/* eDP only on port B and/or C on vlv/chv */
6937 serge 5890
	if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891
		    is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5354 serge 5892
		return false;
5893
 
4104 Serge 5894
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5896
			port_name(port));
5897
 
2330 Serge 5898
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5899
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5900
 
5901
	connector->interlace_allowed = true;
5902
	connector->doublescan_allowed = 0;
5903
 
3243 Serge 5904
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5060 serge 5905
			  edp_panel_vdd_work);
2330 Serge 5906
 
5907
	intel_connector_attach_encoder(intel_connector, intel_encoder);
5060 serge 5908
	drm_connector_register(connector);
2330 Serge 5909
 
3480 Serge 5910
	if (HAS_DDI(dev))
3243 Serge 5911
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5912
	else
6084 serge 5913
		intel_connector->get_hw_state = intel_connector_get_hw_state;
5060 serge 5914
	intel_connector->unregister = intel_dp_connector_unregister;
3031 serge 5915
 
5060 serge 5916
	/* Set up the hotplug pin. */
3031 serge 5917
	switch (port) {
5918
	case PORT_A:
3746 Serge 5919
		intel_encoder->hpd_pin = HPD_PORT_A;
6084 serge 5920
		break;
3031 serge 5921
	case PORT_B:
3746 Serge 5922
		intel_encoder->hpd_pin = HPD_PORT_B;
6937 serge 5923
		if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
6084 serge 5924
			intel_encoder->hpd_pin = HPD_PORT_A;
5925
		break;
3031 serge 5926
	case PORT_C:
3746 Serge 5927
		intel_encoder->hpd_pin = HPD_PORT_C;
6084 serge 5928
		break;
3031 serge 5929
	case PORT_D:
3746 Serge 5930
		intel_encoder->hpd_pin = HPD_PORT_D;
6084 serge 5931
		break;
5932
	case PORT_E:
5933
		intel_encoder->hpd_pin = HPD_PORT_E;
5934
		break;
3031 serge 5935
	default:
3746 Serge 5936
		BUG();
2330 Serge 5937
	}
5938
 
5060 serge 5939
	if (is_edp(intel_dp)) {
5354 serge 5940
		pps_lock(intel_dp);
5060 serge 5941
		intel_dp_init_panel_power_timestamps(intel_dp);
6937 serge 5942
		if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5354 serge 5943
			vlv_initial_power_sequencer_setup(intel_dp);
5944
		else
5945
			intel_dp_init_panel_power_sequencer(dev, intel_dp);
5946
		pps_unlock(intel_dp);
5060 serge 5947
	}
2330 Serge 5948
 
6937 serge 5949
	ret = intel_dp_aux_init(intel_dp, intel_connector);
5950
	if (ret)
5951
		goto fail;
3031 serge 5952
 
5060 serge 5953
	/* init MST on ports that can support it */
6084 serge 5954
	if (HAS_DP_MST(dev) &&
5955
	    (port == PORT_B || port == PORT_C || port == PORT_D))
5956
		intel_dp_mst_encoder_init(intel_dig_port,
5957
					  intel_connector->base.base.id);
5060 serge 5958
 
5354 serge 5959
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6937 serge 5960
		intel_dp_aux_fini(intel_dp);
5961
		intel_dp_mst_encoder_cleanup(intel_dig_port);
5962
		goto fail;
2330 Serge 5963
	}
5964
 
5965
	intel_dp_add_properties(intel_dp, connector);
5966
 
5967
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968
	 * 0xd.  Failure to do so will result in spurious interrupts being
5969
	 * generated on the port when a cable is not attached.
5970
	 */
5971
	if (IS_G4X(dev) && !IS_GM45(dev)) {
5972
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5974
	}
4104 Serge 5975
 
6296 serge 5976
	i915_debugfs_connector_add(connector);
5977
 
4104 Serge 5978
	return true;
6937 serge 5979
 
5980
fail:
5981
	if (is_edp(intel_dp)) {
5982
//		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5983
		/*
5984
		 * vdd might still be enabled do to the delayed vdd off.
5985
		 * Make sure vdd is actually turned off here.
5986
		 */
5987
		pps_lock(intel_dp);
5988
		edp_panel_vdd_off_sync(intel_dp);
5989
		pps_unlock(intel_dp);
2330 Serge 5990
}
6937 serge 5991
	drm_connector_unregister(connector);
5992
	drm_connector_cleanup(connector);
3243 Serge 5993
 
6937 serge 5994
	return false;
5995
}
5996
 
5997
void
5998
intel_dp_init(struct drm_device *dev,
5999
	      i915_reg_t output_reg, enum port port)
3243 Serge 6000
{
5060 serge 6001
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 6002
	struct intel_digital_port *intel_dig_port;
6003
	struct intel_encoder *intel_encoder;
6004
	struct drm_encoder *encoder;
6005
	struct intel_connector *intel_connector;
6006
 
4560 Serge 6007
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3243 Serge 6008
	if (!intel_dig_port)
6937 serge 6009
		return;
3243 Serge 6010
 
6084 serge 6011
	intel_connector = intel_connector_alloc();
6012
	if (!intel_connector)
6013
		goto err_connector_alloc;
3243 Serge 6014
 
6015
	intel_encoder = &intel_dig_port->base;
6016
	encoder = &intel_encoder->base;
6017
 
6937 serge 6018
	if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6019
			     DRM_MODE_ENCODER_TMDS, NULL))
6020
		goto err_encoder_init;
3243 Serge 6021
 
3746 Serge 6022
	intel_encoder->compute_config = intel_dp_compute_config;
3243 Serge 6023
	intel_encoder->disable = intel_disable_dp;
6024
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4104 Serge 6025
	intel_encoder->get_config = intel_dp_get_config;
5060 serge 6026
	intel_encoder->suspend = intel_dp_encoder_suspend;
6027
	if (IS_CHERRYVIEW(dev)) {
6028
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6029
		intel_encoder->pre_enable = chv_pre_enable_dp;
6030
		intel_encoder->enable = vlv_enable_dp;
6031
		intel_encoder->post_disable = chv_post_disable_dp;
6084 serge 6032
		intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5060 serge 6033
	} else if (IS_VALLEYVIEW(dev)) {
4560 Serge 6034
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4104 Serge 6035
		intel_encoder->pre_enable = vlv_pre_enable_dp;
6036
		intel_encoder->enable = vlv_enable_dp;
5060 serge 6037
		intel_encoder->post_disable = vlv_post_disable_dp;
4104 Serge 6038
	} else {
4560 Serge 6039
		intel_encoder->pre_enable = g4x_pre_enable_dp;
6040
		intel_encoder->enable = g4x_enable_dp;
5354 serge 6041
		if (INTEL_INFO(dev)->gen >= 5)
6042
			intel_encoder->post_disable = ilk_post_disable_dp;
4104 Serge 6043
	}
3243 Serge 6044
 
6045
	intel_dig_port->port = port;
6046
	intel_dig_port->dp.output_reg = output_reg;
6047
 
6048
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5060 serge 6049
	if (IS_CHERRYVIEW(dev)) {
6050
		if (port == PORT_D)
6051
			intel_encoder->crtc_mask = 1 << 2;
6052
		else
6053
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6054
	} else {
6084 serge 6055
		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5060 serge 6056
	}
6057
	intel_encoder->cloneable = 0;
3243 Serge 6058
 
5060 serge 6059
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6084 serge 6060
	dev_priv->hotplug.irq_port[port] = intel_dig_port;
5060 serge 6061
 
6084 serge 6062
	if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6063
		goto err_init_connector;
6064
 
6937 serge 6065
	return;
6084 serge 6066
 
6067
err_init_connector:
6068
	drm_encoder_cleanup(encoder);
6937 serge 6069
err_encoder_init:
6084 serge 6070
	kfree(intel_connector);
6071
err_connector_alloc:
6072
	kfree(intel_dig_port);
6937 serge 6073
 
6074
	return;
3243 Serge 6075
}
5060 serge 6076
 
6077
void intel_dp_mst_suspend(struct drm_device *dev)
6078
{
6079
	struct drm_i915_private *dev_priv = dev->dev_private;
6080
	int i;
6081
 
6082
	/* disable MST */
6083
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6084
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6085
		if (!intel_dig_port)
6086
			continue;
6087
 
6088
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6089
			if (!intel_dig_port->dp.can_mst)
6090
				continue;
6091
			if (intel_dig_port->dp.is_mst)
6092
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6093
		}
6094
	}
6095
}
6096
 
6097
void intel_dp_mst_resume(struct drm_device *dev)
6098
{
6099
	struct drm_i915_private *dev_priv = dev->dev_private;
6100
	int i;
6101
 
6102
	for (i = 0; i < I915_MAX_PORTS; i++) {
6084 serge 6103
		struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5060 serge 6104
		if (!intel_dig_port)
6105
			continue;
6106
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6107
			int ret;
6108
 
6109
			if (!intel_dig_port->dp.can_mst)
6110
				continue;
6111
 
6112
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6113
			if (ret != 0) {
6114
				intel_dp_check_mst_status(&intel_dig_port->dp);
6115
			}
6116
		}
6117
	}
6118
}