Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
2327 Serge 35
#include "intel_drv.h"
3031 serge 36
#include 
2327 Serge 37
#include "i915_drv.h"
38
 
39
#define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
40
 
4560 Serge 41
struct dp_link_dpll {
42
	int link_bw;
43
	struct dpll dpll;
44
};
45
 
46
static const struct dp_link_dpll gen4_dpll[] = {
47
	{ DP_LINK_BW_1_62,
48
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49
	{ DP_LINK_BW_2_7,
50
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51
};
52
 
53
static const struct dp_link_dpll pch_dpll[] = {
54
	{ DP_LINK_BW_1_62,
55
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56
	{ DP_LINK_BW_2_7,
57
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58
};
59
 
60
static const struct dp_link_dpll vlv_dpll[] = {
61
	{ DP_LINK_BW_1_62,
62
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63
	{ DP_LINK_BW_2_7,
64
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65
};
66
 
2327 Serge 67
/**
68
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
69
 * @intel_dp: DP struct
70
 *
71
 * If a CPU or PCH DP output is attached to an eDP panel, this function
72
 * will return true, and false otherwise.
73
 */
74
static bool is_edp(struct intel_dp *intel_dp)
75
{
3243 Serge 76
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
77
 
78
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
2327 Serge 79
}
80
 
3243 Serge 81
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
2327 Serge 82
{
3243 Serge 83
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
84
 
85
	return intel_dig_port->base.base.dev;
2327 Serge 86
}
87
 
2330 Serge 88
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
89
{
3243 Serge 90
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2330 Serge 91
}
2327 Serge 92
 
2330 Serge 93
static void intel_dp_link_down(struct intel_dp *intel_dp);
94
 
95
static int
96
intel_dp_max_link_bw(struct intel_dp *intel_dp)
97
{
98
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 99
 
2330 Serge 100
	switch (max_link_bw) {
101
	case DP_LINK_BW_1_62:
102
	case DP_LINK_BW_2_7:
103
		break;
4104 Serge 104
	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
105
		max_link_bw = DP_LINK_BW_2_7;
106
		break;
2330 Serge 107
	default:
4104 Serge 108
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
109
		     max_link_bw);
2330 Serge 110
		max_link_bw = DP_LINK_BW_1_62;
111
		break;
112
	}
113
	return max_link_bw;
114
}
2327 Serge 115
 
2342 Serge 116
/*
117
 * The units on the numbers in the next two are... bizarre.  Examples will
118
 * make it clearer; this one parallels an example in the eDP spec.
119
 *
120
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
121
 *
122
 *     270000 * 1 * 8 / 10 == 216000
123
 *
124
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
125
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
126
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
127
 * 119000.  At 18bpp that's 2142000 kilobits per second.
128
 *
129
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
130
 * get the result in decakilobits instead of kilobits.
131
 */
132
 
2330 Serge 133
static int
2351 Serge 134
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 135
{
2342 Serge 136
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 137
}
2327 Serge 138
 
2330 Serge 139
static int
140
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
141
{
142
	return (max_link_clock * max_lanes * 8) / 10;
143
}
2327 Serge 144
 
4560 Serge 145
static enum drm_mode_status
2330 Serge 146
intel_dp_mode_valid(struct drm_connector *connector,
147
		    struct drm_display_mode *mode)
148
{
149
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 150
	struct intel_connector *intel_connector = to_intel_connector(connector);
151
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
3746 Serge 152
	int target_clock = mode->clock;
153
	int max_rate, mode_rate, max_lanes, max_link_clock;
2327 Serge 154
 
3243 Serge 155
	if (is_edp(intel_dp) && fixed_mode) {
156
		if (mode->hdisplay > fixed_mode->hdisplay)
2330 Serge 157
			return MODE_PANEL;
2327 Serge 158
 
3243 Serge 159
		if (mode->vdisplay > fixed_mode->vdisplay)
2330 Serge 160
			return MODE_PANEL;
3746 Serge 161
 
162
		target_clock = fixed_mode->clock;
2330 Serge 163
	}
2327 Serge 164
 
3746 Serge 165
	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
166
	max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
167
 
168
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
169
	mode_rate = intel_dp_link_required(target_clock, 18);
170
 
171
	if (mode_rate > max_rate)
2330 Serge 172
		return MODE_CLOCK_HIGH;
2327 Serge 173
 
2330 Serge 174
	if (mode->clock < 10000)
175
		return MODE_CLOCK_LOW;
176
 
3031 serge 177
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
178
		return MODE_H_ILLEGAL;
179
 
2330 Serge 180
	return MODE_OK;
181
}
182
 
183
static uint32_t
184
pack_aux(uint8_t *src, int src_bytes)
185
{
186
	int	i;
187
	uint32_t v = 0;
188
 
189
	if (src_bytes > 4)
190
		src_bytes = 4;
191
	for (i = 0; i < src_bytes; i++)
192
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
193
	return v;
194
}
195
 
196
static void
197
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
198
{
199
	int i;
200
	if (dst_bytes > 4)
201
		dst_bytes = 4;
202
	for (i = 0; i < dst_bytes; i++)
203
		dst[i] = src >> ((3-i) * 8);
204
}
205
 
206
/* hrawclock is 1/4 the FSB frequency */
207
static int
208
intel_hrawclk(struct drm_device *dev)
209
{
210
	struct drm_i915_private *dev_priv = dev->dev_private;
211
	uint32_t clkcfg;
212
 
3243 Serge 213
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
214
	if (IS_VALLEYVIEW(dev))
215
		return 200;
216
 
2330 Serge 217
	clkcfg = I915_READ(CLKCFG);
218
	switch (clkcfg & CLKCFG_FSB_MASK) {
219
	case CLKCFG_FSB_400:
220
		return 100;
221
	case CLKCFG_FSB_533:
222
		return 133;
223
	case CLKCFG_FSB_667:
224
		return 166;
225
	case CLKCFG_FSB_800:
226
		return 200;
227
	case CLKCFG_FSB_1067:
228
		return 266;
229
	case CLKCFG_FSB_1333:
230
		return 333;
231
	/* these two are just a guess; one of them might be right */
232
	case CLKCFG_FSB_1600:
233
	case CLKCFG_FSB_1600_ALT:
234
		return 400;
235
	default:
236
		return 133;
237
	}
238
}
239
 
4560 Serge 240
static void
241
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
242
				    struct intel_dp *intel_dp,
243
				    struct edp_power_seq *out);
244
static void
245
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
246
					      struct intel_dp *intel_dp,
247
					      struct edp_power_seq *out);
248
 
249
static enum pipe
250
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
251
{
252
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
253
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
254
	struct drm_device *dev = intel_dig_port->base.base.dev;
255
	struct drm_i915_private *dev_priv = dev->dev_private;
256
	enum port port = intel_dig_port->port;
257
	enum pipe pipe;
258
 
259
	/* modeset should have pipe */
260
	if (crtc)
261
		return to_intel_crtc(crtc)->pipe;
262
 
263
	/* init time, try to find a pipe with this port selected */
264
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
265
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
266
			PANEL_PORT_SELECT_MASK;
267
		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
268
			return pipe;
269
		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
270
			return pipe;
271
	}
272
 
273
	/* shrug */
274
	return PIPE_A;
275
}
276
 
277
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
278
{
279
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
280
 
281
	if (HAS_PCH_SPLIT(dev))
282
		return PCH_PP_CONTROL;
283
	else
284
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
285
}
286
 
287
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
288
{
289
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
290
 
291
	if (HAS_PCH_SPLIT(dev))
292
		return PCH_PP_STATUS;
293
	else
294
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
295
}
296
 
2342 Serge 297
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
298
{
3243 Serge 299
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 300
	struct drm_i915_private *dev_priv = dev->dev_private;
301
 
4560 Serge 302
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
2342 Serge 303
}
304
 
305
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
306
{
3243 Serge 307
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 308
	struct drm_i915_private *dev_priv = dev->dev_private;
309
 
4560 Serge 310
	return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
2342 Serge 311
}
312
 
313
static void
314
intel_dp_check_edp(struct intel_dp *intel_dp)
315
{
3243 Serge 316
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 317
	struct drm_i915_private *dev_priv = dev->dev_private;
318
 
319
	if (!is_edp(intel_dp))
320
		return;
3746 Serge 321
 
2342 Serge 322
	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
323
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
324
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
4560 Serge 325
			      I915_READ(_pp_stat_reg(intel_dp)),
326
			      I915_READ(_pp_ctrl_reg(intel_dp)));
2342 Serge 327
	}
328
}
329
 
3480 Serge 330
static uint32_t
331
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
332
{
333
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334
	struct drm_device *dev = intel_dig_port->base.base.dev;
335
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 336
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
3480 Serge 337
	uint32_t status;
338
	bool done;
339
 
340
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
341
	if (has_aux_irq)
342
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
4104 Serge 343
					  msecs_to_jiffies_timeout(10));
3480 Serge 344
	else
345
		done = wait_for_atomic(C, 10) == 0;
346
	if (!done)
347
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
348
			  has_aux_irq);
349
#undef C
350
 
351
	return status;
352
}
353
 
4104 Serge 354
static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
355
				      int index)
356
{
357
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
358
	struct drm_device *dev = intel_dig_port->base.base.dev;
359
	struct drm_i915_private *dev_priv = dev->dev_private;
360
 
361
	/* The clock divider is based off the hrawclk,
362
	 * and would like to run at 2MHz. So, take the
363
	 * hrawclk value and divide by 2 and use that
364
	 *
365
	 * Note that PCH attached eDP panels should use a 125MHz input
366
	 * clock divider.
367
	 */
368
	if (IS_VALLEYVIEW(dev)) {
369
		return index ? 0 : 100;
370
	} else if (intel_dig_port->port == PORT_A) {
371
		if (index)
372
			return 0;
373
		if (HAS_DDI(dev))
374
			return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
375
		else if (IS_GEN6(dev) || IS_GEN7(dev))
376
			return 200; /* SNB & IVB eDP input clock at 400Mhz */
377
		else
378
			return 225; /* eDP input clock at 450Mhz */
379
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
380
		/* Workaround for non-ULT HSW */
381
		switch (index) {
382
		case 0: return 63;
383
		case 1: return 72;
384
		default: return 0;
385
		}
386
	} else if (HAS_PCH_SPLIT(dev)) {
387
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
388
	} else {
389
		return index ? 0 :intel_hrawclk(dev) / 2;
390
	}
391
}
392
 
2330 Serge 393
static int
394
intel_dp_aux_ch(struct intel_dp *intel_dp,
395
		uint8_t *send, int send_bytes,
396
		uint8_t *recv, int recv_size)
397
{
3243 Serge 398
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
399
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 400
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 401
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
2330 Serge 402
	uint32_t ch_data = ch_ctl + 4;
4104 Serge 403
	uint32_t aux_clock_divider;
3480 Serge 404
	int i, ret, recv_bytes;
2330 Serge 405
	uint32_t status;
4104 Serge 406
	int try, precharge, clock = 0;
4560 Serge 407
	bool has_aux_irq = true;
408
	uint32_t timeout;
2330 Serge 409
 
3480 Serge 410
	/* dp aux is extremely sensitive to irq latency, hence request the
411
	 * lowest possible wakeup latency and so prevent the cpu from going into
412
	 * deep sleep states.
413
	 */
414
//	pm_qos_update_request(&dev_priv->pm_qos, 0);
415
 
2342 Serge 416
	intel_dp_check_edp(intel_dp);
2330 Serge 417
 
418
	if (IS_GEN6(dev))
419
		precharge = 3;
420
	else
421
		precharge = 5;
422
 
4560 Serge 423
	if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL)
424
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
425
	else
426
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
427
 
4104 Serge 428
	intel_aux_display_runtime_get(dev_priv);
429
 
2330 Serge 430
	/* Try to wait for any previous AUX channel activity */
431
	for (try = 0; try < 3; try++) {
3480 Serge 432
		status = I915_READ_NOTRACE(ch_ctl);
2330 Serge 433
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
434
			break;
435
		msleep(1);
436
	}
437
 
438
	if (try == 3) {
439
		WARN(1, "dp_aux_ch not started status 0x%08x\n",
440
		     I915_READ(ch_ctl));
3480 Serge 441
		ret = -EBUSY;
442
		goto out;
2330 Serge 443
	}
444
 
4560 Serge 445
	/* Only 5 data registers! */
446
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
447
		ret = -E2BIG;
448
		goto out;
449
	}
450
 
4104 Serge 451
	while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
2330 Serge 452
	/* Must try at least 3 times according to DP spec */
453
	for (try = 0; try < 5; try++) {
454
		/* Load the send data into the aux channel data registers */
455
		for (i = 0; i < send_bytes; i += 4)
456
			I915_WRITE(ch_data + i,
457
				   pack_aux(send + i, send_bytes - i));
458
 
459
		/* Send the command and wait for it to complete */
460
		I915_WRITE(ch_ctl,
461
			   DP_AUX_CH_CTL_SEND_BUSY |
3480 Serge 462
			   (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
4560 Serge 463
				   timeout |
2330 Serge 464
			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
465
			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
466
			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
467
			   DP_AUX_CH_CTL_DONE |
468
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
469
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
470
 
3480 Serge 471
		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
472
 
2330 Serge 473
		/* Clear done status and any errors */
474
		I915_WRITE(ch_ctl,
475
			   status |
476
			   DP_AUX_CH_CTL_DONE |
477
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
478
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 479
 
480
		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
481
			      DP_AUX_CH_CTL_RECEIVE_ERROR))
482
			continue;
2330 Serge 483
		if (status & DP_AUX_CH_CTL_DONE)
484
			break;
485
	}
4104 Serge 486
		if (status & DP_AUX_CH_CTL_DONE)
487
			break;
488
	}
2330 Serge 489
 
490
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
491
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
3480 Serge 492
		ret = -EBUSY;
493
		goto out;
2330 Serge 494
	}
495
 
496
	/* Check for timeout or receive error.
497
	 * Timeouts occur when the sink is not connected
498
	 */
499
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
500
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
3480 Serge 501
		ret = -EIO;
502
		goto out;
2330 Serge 503
	}
504
 
505
	/* Timeouts occur when the device isn't connected, so they're
506
	 * "normal" -- don't fill the kernel log with these */
507
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
508
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
3480 Serge 509
		ret = -ETIMEDOUT;
510
		goto out;
2330 Serge 511
	}
512
 
513
	/* Unload any bytes sent back from the other side */
514
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
515
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
516
	if (recv_bytes > recv_size)
517
		recv_bytes = recv_size;
518
 
519
	for (i = 0; i < recv_bytes; i += 4)
520
		unpack_aux(I915_READ(ch_data + i),
521
			   recv + i, recv_bytes - i);
522
 
3480 Serge 523
	ret = recv_bytes;
524
out:
525
//	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
4104 Serge 526
	intel_aux_display_runtime_put(dev_priv);
3480 Serge 527
 
528
	return ret;
2330 Serge 529
}
530
 
531
/* Write data to the aux channel in native mode */
532
static int
533
intel_dp_aux_native_write(struct intel_dp *intel_dp,
534
			  uint16_t address, uint8_t *send, int send_bytes)
535
{
536
	int ret;
537
	uint8_t	msg[20];
538
	int msg_bytes;
539
	uint8_t	ack;
540
 
4560 Serge 541
	if (WARN_ON(send_bytes > 16))
542
		return -E2BIG;
543
 
2342 Serge 544
	intel_dp_check_edp(intel_dp);
4560 Serge 545
	msg[0] = DP_AUX_NATIVE_WRITE << 4;
2330 Serge 546
	msg[1] = address >> 8;
547
	msg[2] = address & 0xff;
548
	msg[3] = send_bytes - 1;
549
	memcpy(&msg[4], send, send_bytes);
550
	msg_bytes = send_bytes + 4;
551
	for (;;) {
552
		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
553
		if (ret < 0)
554
			return ret;
4560 Serge 555
		ack >>= 4;
556
		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
2330 Serge 557
			break;
4560 Serge 558
		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
2330 Serge 559
			udelay(100);
560
		else
561
			return -EIO;
562
	}
563
	return send_bytes;
564
}
565
 
566
/* Write a single byte to the aux channel in native mode */
567
static int
568
intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
569
			    uint16_t address, uint8_t byte)
570
{
571
	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
572
}
573
 
574
/* read bytes from a native aux channel */
575
static int
576
intel_dp_aux_native_read(struct intel_dp *intel_dp,
577
			 uint16_t address, uint8_t *recv, int recv_bytes)
578
{
579
	uint8_t msg[4];
580
	int msg_bytes;
581
	uint8_t reply[20];
582
	int reply_bytes;
583
	uint8_t ack;
584
	int ret;
585
 
4560 Serge 586
	if (WARN_ON(recv_bytes > 19))
587
		return -E2BIG;
588
 
2342 Serge 589
	intel_dp_check_edp(intel_dp);
4560 Serge 590
	msg[0] = DP_AUX_NATIVE_READ << 4;
2330 Serge 591
	msg[1] = address >> 8;
592
	msg[2] = address & 0xff;
593
	msg[3] = recv_bytes - 1;
594
 
595
	msg_bytes = 4;
596
	reply_bytes = recv_bytes + 1;
597
 
598
	for (;;) {
599
		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
600
				      reply, reply_bytes);
601
		if (ret == 0)
602
			return -EPROTO;
603
		if (ret < 0)
604
			return ret;
4560 Serge 605
		ack = reply[0] >> 4;
606
		if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) {
2330 Serge 607
			memcpy(recv, reply + 1, ret - 1);
608
			return ret - 1;
609
		}
4560 Serge 610
		else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
2330 Serge 611
			udelay(100);
612
		else
613
			return -EIO;
614
	}
615
}
616
 
617
static int
618
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
619
		    uint8_t write_byte, uint8_t *read_byte)
620
{
621
	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
622
	struct intel_dp *intel_dp = container_of(adapter,
623
						struct intel_dp,
624
						adapter);
625
	uint16_t address = algo_data->address;
626
	uint8_t msg[5];
627
	uint8_t reply[2];
628
	unsigned retry;
629
	int msg_bytes;
630
	int reply_bytes;
631
	int ret;
632
 
4560 Serge 633
	ironlake_edp_panel_vdd_on(intel_dp);
2342 Serge 634
	intel_dp_check_edp(intel_dp);
2330 Serge 635
	/* Set up the command byte */
636
	if (mode & MODE_I2C_READ)
4560 Serge 637
		msg[0] = DP_AUX_I2C_READ << 4;
2330 Serge 638
	else
4560 Serge 639
		msg[0] = DP_AUX_I2C_WRITE << 4;
2330 Serge 640
 
641
	if (!(mode & MODE_I2C_STOP))
4560 Serge 642
		msg[0] |= DP_AUX_I2C_MOT << 4;
2330 Serge 643
 
644
	msg[1] = address >> 8;
645
	msg[2] = address;
646
 
647
	switch (mode) {
648
	case MODE_I2C_WRITE:
649
		msg[3] = 0;
650
		msg[4] = write_byte;
651
		msg_bytes = 5;
652
		reply_bytes = 1;
653
		break;
654
	case MODE_I2C_READ:
655
		msg[3] = 0;
656
		msg_bytes = 4;
657
		reply_bytes = 2;
658
		break;
659
	default:
660
		msg_bytes = 3;
661
		reply_bytes = 1;
662
		break;
663
	}
664
 
4560 Serge 665
	/*
666
	 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
667
	 * required to retry at least seven times upon receiving AUX_DEFER
668
	 * before giving up the AUX transaction.
669
	 */
670
	for (retry = 0; retry < 7; retry++) {
2330 Serge 671
		ret = intel_dp_aux_ch(intel_dp,
672
				      msg, msg_bytes,
673
				      reply, reply_bytes);
674
		if (ret < 0) {
675
			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
4560 Serge 676
			goto out;
2330 Serge 677
		}
678
 
4560 Serge 679
		switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
680
		case DP_AUX_NATIVE_REPLY_ACK:
2330 Serge 681
			/* I2C-over-AUX Reply field is only valid
682
			 * when paired with AUX ACK.
683
			 */
684
			break;
4560 Serge 685
		case DP_AUX_NATIVE_REPLY_NACK:
2330 Serge 686
			DRM_DEBUG_KMS("aux_ch native nack\n");
4560 Serge 687
			ret = -EREMOTEIO;
688
			goto out;
689
		case DP_AUX_NATIVE_REPLY_DEFER:
690
			/*
691
			 * For now, just give more slack to branch devices. We
692
			 * could check the DPCD for I2C bit rate capabilities,
693
			 * and if available, adjust the interval. We could also
694
			 * be more careful with DP-to-Legacy adapters where a
695
			 * long legacy cable may force very low I2C bit rates.
696
			 */
697
            udelay(400);
2330 Serge 698
			continue;
699
		default:
700
			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
701
				  reply[0]);
4560 Serge 702
			ret = -EREMOTEIO;
703
			goto out;
2330 Serge 704
		}
705
 
4560 Serge 706
		switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
707
		case DP_AUX_I2C_REPLY_ACK:
2330 Serge 708
			if (mode == MODE_I2C_READ) {
709
				*read_byte = reply[1];
710
			}
4560 Serge 711
			ret = reply_bytes - 1;
712
			goto out;
713
		case DP_AUX_I2C_REPLY_NACK:
2330 Serge 714
			DRM_DEBUG_KMS("aux_i2c nack\n");
4560 Serge 715
			ret = -EREMOTEIO;
716
			goto out;
717
		case DP_AUX_I2C_REPLY_DEFER:
2330 Serge 718
			DRM_DEBUG_KMS("aux_i2c defer\n");
719
			udelay(100);
720
			break;
721
		default:
722
			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
4560 Serge 723
			ret = -EREMOTEIO;
724
			goto out;
2330 Serge 725
		}
726
	}
727
 
728
	DRM_ERROR("too many retries, giving up\n");
4560 Serge 729
	ret = -EREMOTEIO;
730
 
731
out:
732
	ironlake_edp_panel_vdd_off(intel_dp, false);
733
	return ret;
2330 Serge 734
}
735
 
736
static int
737
intel_dp_i2c_init(struct intel_dp *intel_dp,
738
		  struct intel_connector *intel_connector, const char *name)
739
{
2342 Serge 740
	int	ret;
741
 
2330 Serge 742
	DRM_DEBUG_KMS("i2c_init %s\n", name);
743
	intel_dp->algo.running = false;
744
	intel_dp->algo.address = 0;
745
	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
746
 
2342 Serge 747
	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
3031 serge 748
	intel_dp->adapter.owner = THIS_MODULE;
2330 Serge 749
	intel_dp->adapter.class = I2C_CLASS_DDC;
2342 Serge 750
	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
2330 Serge 751
	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
752
	intel_dp->adapter.algo_data = &intel_dp->algo;
4560 Serge 753
	intel_dp->adapter.dev.parent = intel_connector->base.kdev;
2330 Serge 754
 
2342 Serge 755
	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
756
	return ret;
2330 Serge 757
}
758
 
4104 Serge 759
static void
760
intel_dp_set_clock(struct intel_encoder *encoder,
761
		   struct intel_crtc_config *pipe_config, int link_bw)
762
{
763
	struct drm_device *dev = encoder->base.dev;
4560 Serge 764
	const struct dp_link_dpll *divisor = NULL;
765
	int i, count = 0;
4104 Serge 766
 
767
	if (IS_G4X(dev)) {
4560 Serge 768
		divisor = gen4_dpll;
769
		count = ARRAY_SIZE(gen4_dpll);
4104 Serge 770
	} else if (IS_HASWELL(dev)) {
771
		/* Haswell has special-purpose DP DDI clocks. */
772
	} else if (HAS_PCH_SPLIT(dev)) {
4560 Serge 773
		divisor = pch_dpll;
774
		count = ARRAY_SIZE(pch_dpll);
775
	} else if (IS_VALLEYVIEW(dev)) {
776
		divisor = vlv_dpll;
777
		count = ARRAY_SIZE(vlv_dpll);
4104 Serge 778
		}
4560 Serge 779
 
780
	if (divisor && count) {
781
		for (i = 0; i < count; i++) {
782
			if (link_bw == divisor[i].link_bw) {
783
				pipe_config->dpll = divisor[i].dpll;
4104 Serge 784
		pipe_config->clock_set = true;
4560 Serge 785
				break;
786
			}
787
		}
4104 Serge 788
	}
789
}
790
 
3243 Serge 791
bool
3746 Serge 792
intel_dp_compute_config(struct intel_encoder *encoder,
793
			struct intel_crtc_config *pipe_config)
2330 Serge 794
{
3746 Serge 795
	struct drm_device *dev = encoder->base.dev;
796
	struct drm_i915_private *dev_priv = dev->dev_private;
797
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
798
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 799
	enum port port = dp_to_dig_port(intel_dp)->port;
800
	struct intel_crtc *intel_crtc = encoder->new_crtc;
3243 Serge 801
	struct intel_connector *intel_connector = intel_dp->attached_connector;
2330 Serge 802
	int lane_count, clock;
3243 Serge 803
	int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
2330 Serge 804
	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
3031 serge 805
	int bpp, mode_rate;
2330 Serge 806
	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
4104 Serge 807
	int link_avail, link_clock;
2330 Serge 808
 
4104 Serge 809
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
3746 Serge 810
		pipe_config->has_pch_encoder = true;
811
 
812
	pipe_config->has_dp_encoder = true;
813
 
3243 Serge 814
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
815
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
816
				       adjusted_mode);
4104 Serge 817
		if (!HAS_PCH_SPLIT(dev))
818
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
819
						 intel_connector->panel.fitting_mode);
820
		else
821
			intel_pch_panel_fitting(intel_crtc, pipe_config,
822
						intel_connector->panel.fitting_mode);
2330 Serge 823
	}
824
 
3031 serge 825
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
826
		return false;
827
 
828
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
829
		      "max bw %02x pixel clock %iKHz\n",
4560 Serge 830
		      max_lane_count, bws[max_clock],
831
		      adjusted_mode->crtc_clock);
3031 serge 832
 
3746 Serge 833
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
834
	 * bpc in between. */
4104 Serge 835
	bpp = pipe_config->pipe_bpp;
4560 Serge 836
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
837
	    dev_priv->vbt.edp_bpp < bpp) {
4104 Serge 838
		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
839
			      dev_priv->vbt.edp_bpp);
4560 Serge 840
		bpp = dev_priv->vbt.edp_bpp;
4104 Serge 841
	}
3746 Serge 842
 
843
	for (; bpp >= 6*3; bpp -= 2*3) {
4560 Serge 844
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
845
						   bpp);
3746 Serge 846
 
847
		for (clock = 0; clock <= max_clock; clock++) {
848
			for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
849
				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
850
				link_avail = intel_dp_max_data_rate(link_clock,
851
								    lane_count);
852
 
853
				if (mode_rate <= link_avail) {
854
					goto found;
855
				}
856
			}
857
		}
858
	}
859
 
3031 serge 860
		return false;
861
 
3746 Serge 862
found:
3480 Serge 863
	if (intel_dp->color_range_auto) {
864
		/*
865
		 * See:
866
		 * CEA-861-E - 5.1 Default Encoding Parameters
867
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
868
		 */
869
		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
870
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
871
		else
872
			intel_dp->color_range = 0;
873
	}
874
 
875
	if (intel_dp->color_range)
3746 Serge 876
		pipe_config->limited_color_range = true;
3480 Serge 877
 
2330 Serge 878
				intel_dp->link_bw = bws[clock];
879
				intel_dp->lane_count = lane_count;
3746 Serge 880
	pipe_config->pipe_bpp = bpp;
4104 Serge 881
	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
3746 Serge 882
 
883
	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
2330 Serge 884
				       intel_dp->link_bw, intel_dp->lane_count,
4104 Serge 885
		      pipe_config->port_clock, bpp);
3031 serge 886
				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
887
					      mode_rate, link_avail);
2330 Serge 888
 
3746 Serge 889
	intel_link_compute_m_n(bpp, lane_count,
4560 Serge 890
			       adjusted_mode->crtc_clock,
891
			       pipe_config->port_clock,
3746 Serge 892
			       &pipe_config->dp_m_n);
2330 Serge 893
 
4104 Serge 894
	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
895
 
3746 Serge 896
	return true;
2327 Serge 897
}
898
 
4104 Serge 899
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
3480 Serge 900
{
4104 Serge 901
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
902
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
903
	struct drm_device *dev = crtc->base.dev;
3480 Serge 904
	struct drm_i915_private *dev_priv = dev->dev_private;
905
	u32 dpa_ctl;
906
 
4104 Serge 907
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
3480 Serge 908
	dpa_ctl = I915_READ(DP_A);
909
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
910
 
4104 Serge 911
	if (crtc->config.port_clock == 162000) {
3480 Serge 912
		/* For a long time we've carried around a ILK-DevA w/a for the
913
		 * 160MHz clock. If we're really unlucky, it's still required.
914
		 */
915
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
916
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
4104 Serge 917
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
3480 Serge 918
	} else {
919
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
4104 Serge 920
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3480 Serge 921
	}
922
 
923
	I915_WRITE(DP_A, dpa_ctl);
924
 
925
	POSTING_READ(DP_A);
926
	udelay(500);
927
}
928
 
4104 Serge 929
static void intel_dp_mode_set(struct intel_encoder *encoder)
2330 Serge 930
{
4104 Serge 931
	struct drm_device *dev = encoder->base.dev;
2342 Serge 932
	struct drm_i915_private *dev_priv = dev->dev_private;
4104 Serge 933
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
934
	enum port port = dp_to_dig_port(intel_dp)->port;
935
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
936
	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
2327 Serge 937
 
2342 Serge 938
	/*
939
	 * There are four kinds of DP registers:
940
	 *
941
	 * 	IBX PCH
942
	 * 	SNB CPU
943
	 *	IVB CPU
944
	 * 	CPT PCH
945
	 *
946
	 * IBX PCH and CPU are the same for almost everything,
947
	 * except that the CPU DP PLL is configured in this
948
	 * register
949
	 *
950
	 * CPT PCH is quite different, having many bits moved
951
	 * to the TRANS_DP_CTL register instead. That
952
	 * configuration happens (oddly) in ironlake_pch_enable
953
	 */
2327 Serge 954
 
2342 Serge 955
	/* Preserve the BIOS-computed detected bit. This is
956
	 * supposed to be read-only.
957
	 */
958
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 959
 
2342 Serge 960
	/* Handle DP bits in common between all three register formats */
961
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
4104 Serge 962
	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
2342 Serge 963
 
964
	if (intel_dp->has_audio) {
965
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
4104 Serge 966
				 pipe_name(crtc->pipe));
2330 Serge 967
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
4104 Serge 968
		intel_write_eld(&encoder->base, adjusted_mode);
2342 Serge 969
	}
2327 Serge 970
 
2342 Serge 971
	/* Split out the IBX/CPU vs CPT settings */
972
 
4104 Serge 973
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
2342 Serge 974
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
975
			intel_dp->DP |= DP_SYNC_HS_HIGH;
976
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
977
			intel_dp->DP |= DP_SYNC_VS_HIGH;
978
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
979
 
4560 Serge 980
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 981
			intel_dp->DP |= DP_ENHANCED_FRAMING;
982
 
4104 Serge 983
		intel_dp->DP |= crtc->pipe << 29;
984
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
3746 Serge 985
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
2342 Serge 986
		intel_dp->DP |= intel_dp->color_range;
987
 
988
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
989
			intel_dp->DP |= DP_SYNC_HS_HIGH;
990
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
991
			intel_dp->DP |= DP_SYNC_VS_HIGH;
992
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
993
 
4560 Serge 994
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2342 Serge 995
		intel_dp->DP |= DP_ENHANCED_FRAMING;
996
 
4104 Serge 997
		if (crtc->pipe == 1)
2330 Serge 998
		intel_dp->DP |= DP_PIPEB_SELECT;
2342 Serge 999
	} else {
1000
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1001
	}
3480 Serge 1002
 
4104 Serge 1003
	if (port == PORT_A && !IS_VALLEYVIEW(dev))
1004
		ironlake_set_pll_cpu_edp(intel_dp);
2330 Serge 1005
}
2327 Serge 1006
 
2342 Serge 1007
#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1008
#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1009
 
1010
#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1011
#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1012
 
1013
#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1014
#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1015
 
1016
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
1017
				       u32 mask,
1018
				       u32 value)
1019
{
3243 Serge 1020
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2342 Serge 1021
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 1022
	u32 pp_stat_reg, pp_ctrl_reg;
2342 Serge 1023
 
4560 Serge 1024
	pp_stat_reg = _pp_stat_reg(intel_dp);
1025
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1026
 
2342 Serge 1027
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1028
		      mask, value,
3746 Serge 1029
			I915_READ(pp_stat_reg),
1030
			I915_READ(pp_ctrl_reg));
2342 Serge 1031
 
3746 Serge 1032
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
2342 Serge 1033
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
3746 Serge 1034
				I915_READ(pp_stat_reg),
1035
				I915_READ(pp_ctrl_reg));
2342 Serge 1036
	}
4560 Serge 1037
 
1038
	DRM_DEBUG_KMS("Wait complete\n");
2342 Serge 1039
}
1040
 
1041
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
1042
{
1043
	DRM_DEBUG_KMS("Wait for panel power on\n");
1044
	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1045
}
1046
 
1047
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
1048
{
1049
	DRM_DEBUG_KMS("Wait for panel power off time\n");
1050
	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1051
}
1052
 
1053
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
1054
{
1055
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1056
	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1057
}
1058
 
1059
 
1060
/* Read the current pp_control value, unlocking the register if it
1061
 * is locked
1062
 */
1063
 
3746 Serge 1064
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2342 Serge 1065
{
3746 Serge 1066
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1067
	struct drm_i915_private *dev_priv = dev->dev_private;
1068
	u32 control;
2342 Serge 1069
 
4560 Serge 1070
	control = I915_READ(_pp_ctrl_reg(intel_dp));
2342 Serge 1071
	control &= ~PANEL_UNLOCK_MASK;
1072
	control |= PANEL_UNLOCK_REGS;
1073
	return control;
1074
}
1075
 
3243 Serge 1076
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
2330 Serge 1077
{
3243 Serge 1078
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1079
	struct drm_i915_private *dev_priv = dev->dev_private;
1080
	u32 pp;
3746 Serge 1081
	u32 pp_stat_reg, pp_ctrl_reg;
2327 Serge 1082
 
2342 Serge 1083
	if (!is_edp(intel_dp))
1084
		return;
2327 Serge 1085
 
2342 Serge 1086
	WARN(intel_dp->want_panel_vdd,
1087
	     "eDP VDD already requested on\n");
1088
 
1089
	intel_dp->want_panel_vdd = true;
1090
 
4560 Serge 1091
	if (ironlake_edp_have_panel_vdd(intel_dp))
2342 Serge 1092
		return;
1093
 
4560 Serge 1094
	intel_runtime_pm_get(dev_priv);
1095
 
1096
	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1097
 
2342 Serge 1098
	if (!ironlake_edp_have_panel_power(intel_dp))
1099
		ironlake_wait_panel_power_cycle(intel_dp);
1100
 
3746 Serge 1101
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1102
	pp |= EDP_FORCE_VDD;
2342 Serge 1103
 
4560 Serge 1104
	pp_stat_reg = _pp_stat_reg(intel_dp);
1105
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1106
 
1107
	I915_WRITE(pp_ctrl_reg, pp);
1108
	POSTING_READ(pp_ctrl_reg);
1109
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1110
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2342 Serge 1111
	/*
1112
	 * If the panel wasn't on, delay before accessing aux channel
1113
	 */
1114
	if (!ironlake_edp_have_panel_power(intel_dp)) {
1115
		DRM_DEBUG_KMS("eDP was not running\n");
1116
		msleep(intel_dp->panel_power_up_delay);
1117
	}
2330 Serge 1118
}
2327 Serge 1119
 
2342 Serge 1120
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
2330 Serge 1121
{
3243 Serge 1122
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1123
	struct drm_i915_private *dev_priv = dev->dev_private;
1124
	u32 pp;
3746 Serge 1125
	u32 pp_stat_reg, pp_ctrl_reg;
2327 Serge 1126
 
3480 Serge 1127
	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1128
 
2342 Serge 1129
	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
4560 Serge 1130
		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1131
 
3746 Serge 1132
		pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1133
	pp &= ~EDP_FORCE_VDD;
2327 Serge 1134
 
4560 Serge 1135
		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1136
		pp_stat_reg = _pp_stat_reg(intel_dp);
3746 Serge 1137
 
1138
		I915_WRITE(pp_ctrl_reg, pp);
1139
		POSTING_READ(pp_ctrl_reg);
1140
 
2330 Serge 1141
	/* Make sure sequencer is idle before allowing subsequent activity */
3746 Serge 1142
		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1143
		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
4560 Serge 1144
 
1145
		if ((pp & POWER_TARGET_ON) == 0)
1146
			msleep(intel_dp->panel_power_cycle_delay);
1147
 
1148
		intel_runtime_pm_put(dev_priv);
2342 Serge 1149
	}
2330 Serge 1150
}
2327 Serge 1151
 
3243 Serge 1152
static void ironlake_panel_vdd_work(struct work_struct *__work)
1153
{
3482 Serge 1154
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1155
						 struct intel_dp, panel_vdd_work);
1156
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1157
 
1158
	mutex_lock(&dev->mode_config.mutex);
1159
	ironlake_panel_vdd_off_sync(intel_dp);
1160
	mutex_unlock(&dev->mode_config.mutex);
3243 Serge 1161
}
2342 Serge 1162
 
3243 Serge 1163
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2330 Serge 1164
{
2342 Serge 1165
	if (!is_edp(intel_dp))
1166
		return;
1167
 
1168
	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1169
 
1170
	intel_dp->want_panel_vdd = false;
1171
 
1172
	if (sync) {
1173
		ironlake_panel_vdd_off_sync(intel_dp);
1174
	} else {
1175
		/*
1176
		 * Queue the timer to fire a long
1177
		 * time from now (relative to the power down delay)
1178
		 * to keep the panel power up across a sequence of operations
1179
		 */
4126 Serge 1180
		schedule_delayed_work(&intel_dp->panel_vdd_work,
1181
				      msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
2342 Serge 1182
	}
1183
}
1184
 
3243 Serge 1185
void ironlake_edp_panel_on(struct intel_dp *intel_dp)
2342 Serge 1186
{
3243 Serge 1187
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1188
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1189
	u32 pp;
3746 Serge 1190
	u32 pp_ctrl_reg;
2327 Serge 1191
 
2342 Serge 1192
	if (!is_edp(intel_dp))
1193
		return;
2327 Serge 1194
 
2342 Serge 1195
	DRM_DEBUG_KMS("Turn eDP power on\n");
2327 Serge 1196
 
2342 Serge 1197
	if (ironlake_edp_have_panel_power(intel_dp)) {
1198
		DRM_DEBUG_KMS("eDP power already on\n");
1199
		return;
1200
	}
1201
 
1202
	ironlake_wait_panel_power_cycle(intel_dp);
1203
 
4560 Serge 1204
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1205
	pp = ironlake_get_pp_control(intel_dp);
2342 Serge 1206
	if (IS_GEN5(dev)) {
2330 Serge 1207
	/* ILK workaround: disable reset around power sequence */
1208
	pp &= ~PANEL_POWER_RESET;
4560 Serge 1209
		I915_WRITE(pp_ctrl_reg, pp);
1210
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1211
	}
2327 Serge 1212
 
2342 Serge 1213
	pp |= POWER_TARGET_ON;
1214
	if (!IS_GEN5(dev))
1215
		pp |= PANEL_POWER_RESET;
1216
 
3746 Serge 1217
	I915_WRITE(pp_ctrl_reg, pp);
1218
	POSTING_READ(pp_ctrl_reg);
1219
 
2342 Serge 1220
	ironlake_wait_panel_on(intel_dp);
2327 Serge 1221
 
2342 Serge 1222
	if (IS_GEN5(dev)) {
2330 Serge 1223
	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
4560 Serge 1224
		I915_WRITE(pp_ctrl_reg, pp);
1225
		POSTING_READ(pp_ctrl_reg);
2342 Serge 1226
	}
2330 Serge 1227
}
2327 Serge 1228
 
3243 Serge 1229
void ironlake_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 1230
{
3243 Serge 1231
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1232
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1233
	u32 pp;
3746 Serge 1234
	u32 pp_ctrl_reg;
2327 Serge 1235
 
2342 Serge 1236
	if (!is_edp(intel_dp))
1237
		return;
2327 Serge 1238
 
2342 Serge 1239
	DRM_DEBUG_KMS("Turn eDP power off\n");
2327 Serge 1240
 
3746 Serge 1241
	pp = ironlake_get_pp_control(intel_dp);
3031 serge 1242
	/* We need to switch off panel power _and_ force vdd, for otherwise some
1243
	 * panels get very unhappy and cease to work. */
4560 Serge 1244
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
2327 Serge 1245
 
4560 Serge 1246
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1247
 
1248
	I915_WRITE(pp_ctrl_reg, pp);
1249
	POSTING_READ(pp_ctrl_reg);
1250
 
2342 Serge 1251
	ironlake_wait_panel_off(intel_dp);
2330 Serge 1252
}
2327 Serge 1253
 
3243 Serge 1254
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 1255
{
3243 Serge 1256
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1257
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 1258
	struct drm_i915_private *dev_priv = dev->dev_private;
1259
	u32 pp;
3746 Serge 1260
	u32 pp_ctrl_reg;
2327 Serge 1261
 
2342 Serge 1262
	if (!is_edp(intel_dp))
1263
		return;
1264
 
2330 Serge 1265
	DRM_DEBUG_KMS("\n");
1266
	/*
1267
	 * If we enable the backlight right away following a panel power
1268
	 * on, we may see slight flicker as the panel syncs with the eDP
1269
	 * link.  So delay a bit to make sure the image is solid before
1270
	 * allowing it to appear.
1271
	 */
2342 Serge 1272
	msleep(intel_dp->backlight_on_delay);
3746 Serge 1273
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1274
	pp |= EDP_BLC_ENABLE;
3243 Serge 1275
 
4560 Serge 1276
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1277
 
1278
	I915_WRITE(pp_ctrl_reg, pp);
1279
	POSTING_READ(pp_ctrl_reg);
1280
 
4560 Serge 1281
	intel_panel_enable_backlight(intel_dp->attached_connector);
2330 Serge 1282
}
2327 Serge 1283
 
3243 Serge 1284
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
2330 Serge 1285
{
3243 Serge 1286
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 1287
	struct drm_i915_private *dev_priv = dev->dev_private;
1288
	u32 pp;
3746 Serge 1289
	u32 pp_ctrl_reg;
2327 Serge 1290
 
2342 Serge 1291
	if (!is_edp(intel_dp))
1292
		return;
1293
 
4560 Serge 1294
	intel_panel_disable_backlight(intel_dp->attached_connector);
3243 Serge 1295
 
2330 Serge 1296
	DRM_DEBUG_KMS("\n");
3746 Serge 1297
	pp = ironlake_get_pp_control(intel_dp);
2330 Serge 1298
	pp &= ~EDP_BLC_ENABLE;
3746 Serge 1299
 
4560 Serge 1300
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3746 Serge 1301
 
1302
	I915_WRITE(pp_ctrl_reg, pp);
1303
	POSTING_READ(pp_ctrl_reg);
2342 Serge 1304
	msleep(intel_dp->backlight_off_delay);
2330 Serge 1305
}
2327 Serge 1306
 
3031 serge 1307
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 1308
{
3243 Serge 1309
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1310
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1311
	struct drm_device *dev = crtc->dev;
2330 Serge 1312
	struct drm_i915_private *dev_priv = dev->dev_private;
1313
	u32 dpa_ctl;
2327 Serge 1314
 
3031 serge 1315
	assert_pipe_disabled(dev_priv,
1316
			     to_intel_crtc(crtc)->pipe);
1317
 
2330 Serge 1318
	DRM_DEBUG_KMS("\n");
1319
	dpa_ctl = I915_READ(DP_A);
3031 serge 1320
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1321
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1322
 
1323
	/* We don't adjust intel_dp->DP while tearing down the link, to
1324
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1325
	 * enable bits here to ensure that we don't enable too much. */
1326
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1327
	intel_dp->DP |= DP_PLL_ENABLE;
1328
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 1329
	POSTING_READ(DP_A);
1330
	udelay(200);
1331
}
2327 Serge 1332
 
3031 serge 1333
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 1334
{
3243 Serge 1335
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1336
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1337
	struct drm_device *dev = crtc->dev;
2330 Serge 1338
	struct drm_i915_private *dev_priv = dev->dev_private;
1339
	u32 dpa_ctl;
2327 Serge 1340
 
3031 serge 1341
	assert_pipe_disabled(dev_priv,
1342
			     to_intel_crtc(crtc)->pipe);
1343
 
2330 Serge 1344
	dpa_ctl = I915_READ(DP_A);
3031 serge 1345
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1346
	     "dp pll off, should be on\n");
1347
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1348
 
1349
	/* We can't rely on the value tracked for the DP register in
1350
	 * intel_dp->DP because link_down must not change that (otherwise link
1351
	 * re-training will fail. */
2330 Serge 1352
	dpa_ctl &= ~DP_PLL_ENABLE;
1353
	I915_WRITE(DP_A, dpa_ctl);
1354
	POSTING_READ(DP_A);
1355
	udelay(200);
1356
}
2327 Serge 1357
 
2330 Serge 1358
/* If the sink supports it, try to set the power state appropriately */
3243 Serge 1359
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2330 Serge 1360
{
1361
	int ret, i;
2327 Serge 1362
 
2330 Serge 1363
	/* Should have a valid DPCD by this point */
1364
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1365
		return;
2327 Serge 1366
 
2330 Serge 1367
	if (mode != DRM_MODE_DPMS_ON) {
1368
		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1369
						  DP_SET_POWER_D3);
1370
		if (ret != 1)
1371
			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1372
	} else {
1373
		/*
1374
		 * When turning on, we need to retry for 1ms to give the sink
1375
		 * time to wake up.
1376
		 */
1377
		for (i = 0; i < 3; i++) {
1378
			ret = intel_dp_aux_native_write_1(intel_dp,
1379
							  DP_SET_POWER,
1380
							  DP_SET_POWER_D0);
1381
			if (ret == 1)
1382
				break;
1383
			msleep(1);
1384
		}
1385
	}
1386
}
2327 Serge 1387
 
3031 serge 1388
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1389
				  enum pipe *pipe)
2330 Serge 1390
{
3031 serge 1391
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1392
	enum port port = dp_to_dig_port(intel_dp)->port;
3031 serge 1393
	struct drm_device *dev = encoder->base.dev;
1394
	struct drm_i915_private *dev_priv = dev->dev_private;
1395
	u32 tmp = I915_READ(intel_dp->output_reg);
2327 Serge 1396
 
3031 serge 1397
	if (!(tmp & DP_PORT_EN))
1398
		return false;
2342 Serge 1399
 
4104 Serge 1400
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
3031 serge 1401
		*pipe = PORT_TO_PIPE_CPT(tmp);
4104 Serge 1402
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
3031 serge 1403
		*pipe = PORT_TO_PIPE(tmp);
1404
	} else {
1405
		u32 trans_sel;
1406
		u32 trans_dp;
1407
		int i;
2327 Serge 1408
 
3031 serge 1409
		switch (intel_dp->output_reg) {
1410
		case PCH_DP_B:
1411
			trans_sel = TRANS_DP_PORT_SEL_B;
1412
			break;
1413
		case PCH_DP_C:
1414
			trans_sel = TRANS_DP_PORT_SEL_C;
1415
			break;
1416
		case PCH_DP_D:
1417
			trans_sel = TRANS_DP_PORT_SEL_D;
1418
			break;
1419
		default:
1420
			return true;
1421
		}
1422
 
1423
		for_each_pipe(i) {
1424
			trans_dp = I915_READ(TRANS_DP_CTL(i));
1425
			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1426
				*pipe = i;
1427
				return true;
1428
			}
1429
		}
3243 Serge 1430
 
1431
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1432
			      intel_dp->output_reg);
3031 serge 1433
	}
1434
 
1435
	return true;
2330 Serge 1436
}
2327 Serge 1437
 
4104 Serge 1438
static void intel_dp_get_config(struct intel_encoder *encoder,
1439
				struct intel_crtc_config *pipe_config)
1440
{
1441
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1442
	u32 tmp, flags = 0;
1443
	struct drm_device *dev = encoder->base.dev;
1444
	struct drm_i915_private *dev_priv = dev->dev_private;
1445
	enum port port = dp_to_dig_port(intel_dp)->port;
1446
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 1447
	int dotclock;
4104 Serge 1448
 
1449
	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1450
		tmp = I915_READ(intel_dp->output_reg);
1451
		if (tmp & DP_SYNC_HS_HIGH)
1452
			flags |= DRM_MODE_FLAG_PHSYNC;
1453
		else
1454
			flags |= DRM_MODE_FLAG_NHSYNC;
1455
 
1456
		if (tmp & DP_SYNC_VS_HIGH)
1457
			flags |= DRM_MODE_FLAG_PVSYNC;
1458
		else
1459
			flags |= DRM_MODE_FLAG_NVSYNC;
1460
	} else {
1461
		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1462
		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1463
			flags |= DRM_MODE_FLAG_PHSYNC;
1464
		else
1465
			flags |= DRM_MODE_FLAG_NHSYNC;
1466
 
1467
		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1468
			flags |= DRM_MODE_FLAG_PVSYNC;
1469
		else
1470
			flags |= DRM_MODE_FLAG_NVSYNC;
1471
	}
1472
 
1473
	pipe_config->adjusted_mode.flags |= flags;
1474
 
4560 Serge 1475
	pipe_config->has_dp_encoder = true;
1476
 
1477
	intel_dp_get_m_n(crtc, pipe_config);
1478
 
1479
	if (port == PORT_A) {
4104 Serge 1480
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1481
			pipe_config->port_clock = 162000;
1482
		else
1483
			pipe_config->port_clock = 270000;
1484
	}
4280 Serge 1485
 
4560 Serge 1486
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1487
					    &pipe_config->dp_m_n);
1488
 
1489
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1490
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1491
 
1492
	pipe_config->adjusted_mode.crtc_clock = dotclock;
1493
 
4280 Serge 1494
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1495
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1496
		/*
1497
		 * This is a big fat ugly hack.
1498
		 *
1499
		 * Some machines in UEFI boot mode provide us a VBT that has 18
1500
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1501
		 * unknown we fail to light up. Yet the same BIOS boots up with
1502
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1503
		 * max, not what it tells us to use.
1504
		 *
1505
		 * Note: This will still be broken if the eDP panel is not lit
1506
		 * up by the BIOS, and thus we can't get the mode at module
1507
		 * load.
1508
		 */
1509
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1510
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1511
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1512
	}
4104 Serge 1513
}
1514
 
4560 Serge 1515
static bool is_edp_psr(struct drm_device *dev)
4104 Serge 1516
{
4560 Serge 1517
	struct drm_i915_private *dev_priv = dev->dev_private;
1518
 
1519
	return dev_priv->psr.sink_support;
4104 Serge 1520
}
1521
 
1522
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1523
{
1524
	struct drm_i915_private *dev_priv = dev->dev_private;
1525
 
4560 Serge 1526
	if (!HAS_PSR(dev))
4104 Serge 1527
		return false;
1528
 
4560 Serge 1529
	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
4104 Serge 1530
}
1531
 
1532
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1533
				    struct edp_vsc_psr *vsc_psr)
1534
{
1535
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1536
	struct drm_device *dev = dig_port->base.base.dev;
1537
	struct drm_i915_private *dev_priv = dev->dev_private;
1538
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1539
	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1540
	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1541
	uint32_t *data = (uint32_t *) vsc_psr;
1542
	unsigned int i;
1543
 
1544
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1545
	   the video DIP being updated before program video DIP data buffer
1546
	   registers for DIP being updated. */
1547
	I915_WRITE(ctl_reg, 0);
1548
	POSTING_READ(ctl_reg);
1549
 
1550
	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1551
		if (i < sizeof(struct edp_vsc_psr))
1552
			I915_WRITE(data_reg + i, *data++);
1553
		else
1554
			I915_WRITE(data_reg + i, 0);
1555
	}
1556
 
1557
	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1558
	POSTING_READ(ctl_reg);
1559
}
1560
 
1561
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1562
{
1563
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1564
	struct drm_i915_private *dev_priv = dev->dev_private;
1565
	struct edp_vsc_psr psr_vsc;
1566
 
1567
	if (intel_dp->psr_setup_done)
1568
		return;
1569
 
1570
	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1571
	memset(&psr_vsc, 0, sizeof(psr_vsc));
1572
	psr_vsc.sdp_header.HB0 = 0;
1573
	psr_vsc.sdp_header.HB1 = 0x7;
1574
	psr_vsc.sdp_header.HB2 = 0x2;
1575
	psr_vsc.sdp_header.HB3 = 0x8;
1576
	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1577
 
1578
	/* Avoid continuous PSR exit by masking memup and hpd */
4560 Serge 1579
	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
4104 Serge 1580
		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1581
 
1582
	intel_dp->psr_setup_done = true;
1583
}
1584
 
1585
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1586
{
1587
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1588
	struct drm_i915_private *dev_priv = dev->dev_private;
1589
	uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
1590
	int precharge = 0x3;
1591
	int msg_size = 5;       /* Header(4) + Message(1) */
1592
 
1593
	/* Enable PSR in sink */
1594
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1595
		intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1596
					    DP_PSR_ENABLE &
1597
					    ~DP_PSR_MAIN_LINK_ACTIVE);
1598
	else
1599
		intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1600
					    DP_PSR_ENABLE |
1601
					    DP_PSR_MAIN_LINK_ACTIVE);
1602
 
1603
	/* Setup AUX registers */
4560 Serge 1604
	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1605
	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1606
	I915_WRITE(EDP_PSR_AUX_CTL(dev),
4104 Serge 1607
		   DP_AUX_CH_CTL_TIME_OUT_400us |
1608
		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1609
		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1610
		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1611
}
1612
 
1613
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1614
{
1615
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1616
	struct drm_i915_private *dev_priv = dev->dev_private;
1617
	uint32_t max_sleep_time = 0x1f;
1618
	uint32_t idle_frames = 1;
1619
	uint32_t val = 0x0;
4560 Serge 1620
	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
4104 Serge 1621
 
1622
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1623
		val |= EDP_PSR_LINK_STANDBY;
1624
		val |= EDP_PSR_TP2_TP3_TIME_0us;
1625
		val |= EDP_PSR_TP1_TIME_0us;
1626
		val |= EDP_PSR_SKIP_AUX_EXIT;
1627
	} else
1628
		val |= EDP_PSR_LINK_DISABLE;
1629
 
4560 Serge 1630
	I915_WRITE(EDP_PSR_CTL(dev), val |
1631
		   IS_BROADWELL(dev) ? 0 : link_entry_time |
4104 Serge 1632
		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1633
		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1634
		   EDP_PSR_ENABLE);
1635
}
1636
 
1637
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1638
{
1639
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1640
	struct drm_device *dev = dig_port->base.base.dev;
1641
	struct drm_i915_private *dev_priv = dev->dev_private;
1642
	struct drm_crtc *crtc = dig_port->base.base.crtc;
1643
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1644
	struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1645
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1646
 
4560 Serge 1647
	dev_priv->psr.source_ok = false;
1648
 
1649
	if (!HAS_PSR(dev)) {
4104 Serge 1650
		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1651
		return false;
1652
	}
1653
 
1654
	if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1655
	    (dig_port->port != PORT_A)) {
1656
		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1657
		return false;
1658
	}
1659
 
1660
	if (!i915_enable_psr) {
1661
		DRM_DEBUG_KMS("PSR disable by flag\n");
1662
		return false;
1663
	}
1664
 
1665
	crtc = dig_port->base.base.crtc;
1666
	if (crtc == NULL) {
1667
		DRM_DEBUG_KMS("crtc not active for PSR\n");
1668
		return false;
1669
	}
1670
 
1671
	intel_crtc = to_intel_crtc(crtc);
4560 Serge 1672
	if (!intel_crtc_active(crtc)) {
4104 Serge 1673
		DRM_DEBUG_KMS("crtc not active for PSR\n");
1674
		return false;
1675
	}
1676
 
1677
	obj = to_intel_framebuffer(crtc->fb)->obj;
1678
	if (obj->tiling_mode != I915_TILING_X ||
1679
	    obj->fence_reg == I915_FENCE_REG_NONE) {
1680
		DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1681
		return false;
1682
	}
1683
 
1684
	if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1685
		DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1686
		return false;
1687
	}
1688
 
1689
	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1690
	    S3D_ENABLE) {
1691
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1692
		return false;
1693
	}
1694
 
4560 Serge 1695
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4104 Serge 1696
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1697
		return false;
1698
	}
1699
 
4560 Serge 1700
	dev_priv->psr.source_ok = true;
4104 Serge 1701
	return true;
1702
}
1703
 
1704
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1705
{
1706
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1707
 
1708
	if (!intel_edp_psr_match_conditions(intel_dp) ||
1709
	    intel_edp_is_psr_enabled(dev))
1710
		return;
1711
 
1712
	/* Setup PSR once */
1713
	intel_edp_psr_setup(intel_dp);
1714
 
1715
	/* Enable PSR on the panel */
1716
	intel_edp_psr_enable_sink(intel_dp);
1717
 
1718
	/* Enable PSR on the host */
1719
	intel_edp_psr_enable_source(intel_dp);
1720
}
1721
 
1722
void intel_edp_psr_enable(struct intel_dp *intel_dp)
1723
{
1724
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1725
 
1726
	if (intel_edp_psr_match_conditions(intel_dp) &&
1727
	    !intel_edp_is_psr_enabled(dev))
1728
		intel_edp_psr_do_enable(intel_dp);
1729
}
1730
 
1731
void intel_edp_psr_disable(struct intel_dp *intel_dp)
1732
{
1733
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1734
	struct drm_i915_private *dev_priv = dev->dev_private;
1735
 
1736
	if (!intel_edp_is_psr_enabled(dev))
1737
		return;
1738
 
4560 Serge 1739
	I915_WRITE(EDP_PSR_CTL(dev),
1740
		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
4104 Serge 1741
 
1742
	/* Wait till PSR is idle */
4560 Serge 1743
	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
4104 Serge 1744
		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1745
		DRM_ERROR("Timed out waiting for PSR Idle State\n");
1746
}
1747
 
1748
void intel_edp_psr_update(struct drm_device *dev)
1749
{
1750
	struct intel_encoder *encoder;
1751
	struct intel_dp *intel_dp = NULL;
1752
 
1753
	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1754
		if (encoder->type == INTEL_OUTPUT_EDP) {
1755
			intel_dp = enc_to_intel_dp(&encoder->base);
1756
 
4560 Serge 1757
			if (!is_edp_psr(dev))
4104 Serge 1758
				return;
1759
 
1760
			if (!intel_edp_psr_match_conditions(intel_dp))
1761
				intel_edp_psr_disable(intel_dp);
1762
			else
1763
				if (!intel_edp_is_psr_enabled(dev))
1764
					intel_edp_psr_do_enable(intel_dp);
1765
		}
1766
}
1767
 
3031 serge 1768
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 1769
{
3031 serge 1770
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1771
	enum port port = dp_to_dig_port(intel_dp)->port;
1772
	struct drm_device *dev = encoder->base.dev;
2327 Serge 1773
 
3031 serge 1774
	/* Make sure the panel is off before trying to change the mode. But also
1775
	 * ensure that we have vdd while we switch off the panel. */
1776
	ironlake_edp_backlight_off(intel_dp);
4560 Serge 1777
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3031 serge 1778
	ironlake_edp_panel_off(intel_dp);
2330 Serge 1779
 
3031 serge 1780
	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
4104 Serge 1781
	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
3031 serge 1782
		intel_dp_link_down(intel_dp);
1783
}
2330 Serge 1784
 
3031 serge 1785
static void intel_post_disable_dp(struct intel_encoder *encoder)
1786
{
1787
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1788
	enum port port = dp_to_dig_port(intel_dp)->port;
3746 Serge 1789
	struct drm_device *dev = encoder->base.dev;
3031 serge 1790
 
4104 Serge 1791
	if (port == PORT_A || IS_VALLEYVIEW(dev)) {
3031 serge 1792
		intel_dp_link_down(intel_dp);
3746 Serge 1793
		if (!IS_VALLEYVIEW(dev))
3031 serge 1794
		ironlake_edp_pll_off(intel_dp);
1795
	}
2330 Serge 1796
}
1797
 
3031 serge 1798
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 1799
{
3031 serge 1800
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1801
	struct drm_device *dev = encoder->base.dev;
2330 Serge 1802
	struct drm_i915_private *dev_priv = dev->dev_private;
1803
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1804
 
3031 serge 1805
	if (WARN_ON(dp_reg & DP_PORT_EN))
1806
		return;
2342 Serge 1807
 
1808
		ironlake_edp_panel_vdd_on(intel_dp);
3031 serge 1809
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2330 Serge 1810
			intel_dp_start_link_train(intel_dp);
1811
				ironlake_edp_panel_on(intel_dp);
2342 Serge 1812
			ironlake_edp_panel_vdd_off(intel_dp, true);
2330 Serge 1813
			intel_dp_complete_link_train(intel_dp);
3746 Serge 1814
	intel_dp_stop_link_train(intel_dp);
4560 Serge 1815
}
1816
 
1817
static void g4x_enable_dp(struct intel_encoder *encoder)
1818
{
1819
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1820
 
1821
	intel_enable_dp(encoder);
2342 Serge 1822
		ironlake_edp_backlight_on(intel_dp);
2330 Serge 1823
}
1824
 
4104 Serge 1825
static void vlv_enable_dp(struct intel_encoder *encoder)
1826
{
4560 Serge 1827
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1828
 
1829
	ironlake_edp_backlight_on(intel_dp);
4104 Serge 1830
}
1831
 
4560 Serge 1832
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
3031 serge 1833
{
1834
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4104 Serge 1835
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3031 serge 1836
 
4104 Serge 1837
	if (dport->port == PORT_A)
3031 serge 1838
		ironlake_edp_pll_on(intel_dp);
1839
}
1840
 
4104 Serge 1841
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
1842
{
1843
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1844
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1845
	struct drm_device *dev = encoder->base.dev;
1846
	struct drm_i915_private *dev_priv = dev->dev_private;
1847
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
4560 Serge 1848
	enum dpio_channel port = vlv_dport_to_channel(dport);
4104 Serge 1849
		int pipe = intel_crtc->pipe;
4560 Serge 1850
	struct edp_power_seq power_seq;
4104 Serge 1851
		u32 val;
1852
 
1853
	mutex_lock(&dev_priv->dpio_lock);
1854
 
4560 Serge 1855
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
4104 Serge 1856
		val = 0;
1857
		if (pipe)
1858
			val |= (1<<21);
1859
		else
1860
			val &= ~(1<<21);
1861
		val |= 0x001000c4;
4560 Serge 1862
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1863
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1864
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
4104 Serge 1865
 
1866
	mutex_unlock(&dev_priv->dpio_lock);
1867
 
4560 Serge 1868
	/* init power sequencer on this pipe and port */
1869
	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
1870
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
1871
						      &power_seq);
1872
 
4104 Serge 1873
	intel_enable_dp(encoder);
1874
 
4560 Serge 1875
	vlv_wait_port_ready(dev_priv, dport);
4539 Serge 1876
}
4104 Serge 1877
 
4560 Serge 1878
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
4104 Serge 1879
{
1880
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1881
	struct drm_device *dev = encoder->base.dev;
1882
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 1883
	struct intel_crtc *intel_crtc =
1884
		to_intel_crtc(encoder->base.crtc);
1885
	enum dpio_channel port = vlv_dport_to_channel(dport);
1886
	int pipe = intel_crtc->pipe;
4104 Serge 1887
 
1888
	/* Program Tx lane resets to default */
1889
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 1890
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
4104 Serge 1891
			 DPIO_PCS_TX_LANE2_RESET |
1892
			 DPIO_PCS_TX_LANE1_RESET);
4560 Serge 1893
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
4104 Serge 1894
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1895
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1896
			 (1<
1897
				 DPIO_PCS_CLK_SOFT_RESET);
1898
 
1899
	/* Fix up inter-pair skew failure */
4560 Serge 1900
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1901
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1902
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
4104 Serge 1903
	mutex_unlock(&dev_priv->dpio_lock);
1904
}
1905
 
2330 Serge 1906
/*
1907
 * Native read with retry for link status and receiver capability reads for
1908
 * cases where the sink may still be asleep.
1909
 */
1910
static bool
1911
intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1912
			       uint8_t *recv, int recv_bytes)
1913
{
1914
	int ret, i;
1915
 
1916
	/*
1917
	 * Sinks are *supposed* to come up within 1ms from an off state,
1918
	 * but we're also supposed to retry 3 times per the spec.
1919
	 */
1920
	for (i = 0; i < 3; i++) {
1921
		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1922
					       recv_bytes);
1923
		if (ret == recv_bytes)
1924
			return true;
1925
		msleep(1);
1926
	}
1927
 
1928
	return false;
1929
}
1930
 
1931
/*
1932
 * Fetch AUX CH registers 0x202 - 0x207 which contain
1933
 * link status information
1934
 */
1935
static bool
2342 Serge 1936
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 1937
{
1938
	return intel_dp_aux_native_read_retry(intel_dp,
1939
					      DP_LANE0_1_STATUS,
2342 Serge 1940
					      link_status,
2330 Serge 1941
					      DP_LINK_STATUS_SIZE);
1942
}
1943
 
1944
/*
1945
 * These are source-specific values; current Intel hardware supports
1946
 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1947
 */
1948
 
1949
static uint8_t
2342 Serge 1950
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 1951
{
3243 Serge 1952
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 1953
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 1954
 
4560 Serge 1955
	if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
4104 Serge 1956
		return DP_TRAIN_VOLTAGE_SWING_1200;
1957
	else if (IS_GEN7(dev) && port == PORT_A)
2342 Serge 1958
		return DP_TRAIN_VOLTAGE_SWING_800;
4104 Serge 1959
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2342 Serge 1960
		return DP_TRAIN_VOLTAGE_SWING_1200;
1961
	else
1962
		return DP_TRAIN_VOLTAGE_SWING_800;
1963
}
1964
 
1965
static uint8_t
1966
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1967
{
3243 Serge 1968
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
4104 Serge 1969
	enum port port = dp_to_dig_port(intel_dp)->port;
2342 Serge 1970
 
4560 Serge 1971
	if (IS_BROADWELL(dev)) {
2342 Serge 1972
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1973
		case DP_TRAIN_VOLTAGE_SWING_400:
4560 Serge 1974
		case DP_TRAIN_VOLTAGE_SWING_600:
1975
			return DP_TRAIN_PRE_EMPHASIS_6;
1976
		case DP_TRAIN_VOLTAGE_SWING_800:
1977
			return DP_TRAIN_PRE_EMPHASIS_3_5;
1978
		case DP_TRAIN_VOLTAGE_SWING_1200:
1979
		default:
1980
			return DP_TRAIN_PRE_EMPHASIS_0;
1981
		}
1982
	} else if (IS_HASWELL(dev)) {
1983
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1984
		case DP_TRAIN_VOLTAGE_SWING_400:
3243 Serge 1985
			return DP_TRAIN_PRE_EMPHASIS_9_5;
1986
		case DP_TRAIN_VOLTAGE_SWING_600:
2342 Serge 1987
			return DP_TRAIN_PRE_EMPHASIS_6;
3243 Serge 1988
		case DP_TRAIN_VOLTAGE_SWING_800:
1989
			return DP_TRAIN_PRE_EMPHASIS_3_5;
1990
		case DP_TRAIN_VOLTAGE_SWING_1200:
1991
		default:
1992
			return DP_TRAIN_PRE_EMPHASIS_0;
1993
		}
4104 Serge 1994
	} else if (IS_VALLEYVIEW(dev)) {
3243 Serge 1995
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1996
		case DP_TRAIN_VOLTAGE_SWING_400:
4104 Serge 1997
			return DP_TRAIN_PRE_EMPHASIS_9_5;
1998
		case DP_TRAIN_VOLTAGE_SWING_600:
3243 Serge 1999
			return DP_TRAIN_PRE_EMPHASIS_6;
4104 Serge 2000
		case DP_TRAIN_VOLTAGE_SWING_800:
2001
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2002
		case DP_TRAIN_VOLTAGE_SWING_1200:
2003
		default:
2004
			return DP_TRAIN_PRE_EMPHASIS_0;
2005
		}
2006
	} else if (IS_GEN7(dev) && port == PORT_A) {
2007
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2008
		case DP_TRAIN_VOLTAGE_SWING_400:
2009
			return DP_TRAIN_PRE_EMPHASIS_6;
2342 Serge 2010
		case DP_TRAIN_VOLTAGE_SWING_600:
2011
		case DP_TRAIN_VOLTAGE_SWING_800:
2012
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2013
		default:
2014
			return DP_TRAIN_PRE_EMPHASIS_0;
2015
		}
2016
	} else {
2330 Serge 2017
	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2018
	case DP_TRAIN_VOLTAGE_SWING_400:
2019
		return DP_TRAIN_PRE_EMPHASIS_6;
2020
	case DP_TRAIN_VOLTAGE_SWING_600:
2021
		return DP_TRAIN_PRE_EMPHASIS_6;
2022
	case DP_TRAIN_VOLTAGE_SWING_800:
2023
		return DP_TRAIN_PRE_EMPHASIS_3_5;
2024
	case DP_TRAIN_VOLTAGE_SWING_1200:
2025
	default:
2026
		return DP_TRAIN_PRE_EMPHASIS_0;
2027
	}
2342 Serge 2028
	}
2330 Serge 2029
}
2030
 
4104 Serge 2031
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2032
{
2033
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2034
	struct drm_i915_private *dev_priv = dev->dev_private;
2035
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
4560 Serge 2036
	struct intel_crtc *intel_crtc =
2037
		to_intel_crtc(dport->base.base.crtc);
4104 Serge 2038
	unsigned long demph_reg_value, preemph_reg_value,
2039
		uniqtranscale_reg_value;
2040
	uint8_t train_set = intel_dp->train_set[0];
4560 Serge 2041
	enum dpio_channel port = vlv_dport_to_channel(dport);
2042
	int pipe = intel_crtc->pipe;
4104 Serge 2043
 
2044
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2045
	case DP_TRAIN_PRE_EMPHASIS_0:
2046
		preemph_reg_value = 0x0004000;
2047
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2048
		case DP_TRAIN_VOLTAGE_SWING_400:
2049
			demph_reg_value = 0x2B405555;
2050
			uniqtranscale_reg_value = 0x552AB83A;
2051
			break;
2052
		case DP_TRAIN_VOLTAGE_SWING_600:
2053
			demph_reg_value = 0x2B404040;
2054
			uniqtranscale_reg_value = 0x5548B83A;
2055
			break;
2056
		case DP_TRAIN_VOLTAGE_SWING_800:
2057
			demph_reg_value = 0x2B245555;
2058
			uniqtranscale_reg_value = 0x5560B83A;
2059
			break;
2060
		case DP_TRAIN_VOLTAGE_SWING_1200:
2061
			demph_reg_value = 0x2B405555;
2062
			uniqtranscale_reg_value = 0x5598DA3A;
2063
			break;
2064
		default:
2065
			return 0;
2066
		}
2067
		break;
2068
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2069
		preemph_reg_value = 0x0002000;
2070
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2071
		case DP_TRAIN_VOLTAGE_SWING_400:
2072
			demph_reg_value = 0x2B404040;
2073
			uniqtranscale_reg_value = 0x5552B83A;
2074
			break;
2075
		case DP_TRAIN_VOLTAGE_SWING_600:
2076
			demph_reg_value = 0x2B404848;
2077
			uniqtranscale_reg_value = 0x5580B83A;
2078
			break;
2079
		case DP_TRAIN_VOLTAGE_SWING_800:
2080
			demph_reg_value = 0x2B404040;
2081
			uniqtranscale_reg_value = 0x55ADDA3A;
2082
			break;
2083
		default:
2084
			return 0;
2085
		}
2086
		break;
2087
	case DP_TRAIN_PRE_EMPHASIS_6:
2088
		preemph_reg_value = 0x0000000;
2089
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2090
		case DP_TRAIN_VOLTAGE_SWING_400:
2091
			demph_reg_value = 0x2B305555;
2092
			uniqtranscale_reg_value = 0x5570B83A;
2093
			break;
2094
		case DP_TRAIN_VOLTAGE_SWING_600:
2095
			demph_reg_value = 0x2B2B4040;
2096
			uniqtranscale_reg_value = 0x55ADDA3A;
2097
			break;
2098
		default:
2099
			return 0;
2100
		}
2101
		break;
2102
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2103
		preemph_reg_value = 0x0006000;
2104
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2105
		case DP_TRAIN_VOLTAGE_SWING_400:
2106
			demph_reg_value = 0x1B405555;
2107
			uniqtranscale_reg_value = 0x55ADDA3A;
2108
			break;
2109
		default:
2110
			return 0;
2111
		}
2112
		break;
2113
	default:
2114
		return 0;
2115
	}
2116
 
2117
	mutex_lock(&dev_priv->dpio_lock);
4560 Serge 2118
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2119
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2120
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
4104 Serge 2121
			 uniqtranscale_reg_value);
4560 Serge 2122
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2123
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2124
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2125
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
4104 Serge 2126
	mutex_unlock(&dev_priv->dpio_lock);
2127
 
2128
	return 0;
2129
}
2130
 
2330 Serge 2131
static void
4560 Serge 2132
intel_get_adjust_train(struct intel_dp *intel_dp,
2133
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 2134
{
2135
	uint8_t v = 0;
2136
	uint8_t p = 0;
2137
	int lane;
2342 Serge 2138
	uint8_t voltage_max;
2139
	uint8_t preemph_max;
2330 Serge 2140
 
2141
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
3243 Serge 2142
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2143
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2330 Serge 2144
 
2145
		if (this_v > v)
2146
			v = this_v;
2147
		if (this_p > p)
2148
			p = this_p;
2149
	}
2150
 
2342 Serge 2151
	voltage_max = intel_dp_voltage_max(intel_dp);
2152
	if (v >= voltage_max)
2153
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2330 Serge 2154
 
2342 Serge 2155
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2156
	if (p >= preemph_max)
2157
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2330 Serge 2158
 
2159
	for (lane = 0; lane < 4; lane++)
2160
		intel_dp->train_set[lane] = v | p;
2161
}
2162
 
2163
static uint32_t
3480 Serge 2164
intel_gen4_signal_levels(uint8_t train_set)
2330 Serge 2165
{
2166
	uint32_t	signal_levels = 0;
2167
 
2168
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2169
	case DP_TRAIN_VOLTAGE_SWING_400:
2170
	default:
2171
		signal_levels |= DP_VOLTAGE_0_4;
2172
		break;
2173
	case DP_TRAIN_VOLTAGE_SWING_600:
2174
		signal_levels |= DP_VOLTAGE_0_6;
2175
		break;
2176
	case DP_TRAIN_VOLTAGE_SWING_800:
2177
		signal_levels |= DP_VOLTAGE_0_8;
2178
		break;
2179
	case DP_TRAIN_VOLTAGE_SWING_1200:
2180
		signal_levels |= DP_VOLTAGE_1_2;
2181
		break;
2182
	}
2183
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2184
	case DP_TRAIN_PRE_EMPHASIS_0:
2185
	default:
2186
		signal_levels |= DP_PRE_EMPHASIS_0;
2187
		break;
2188
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2189
		signal_levels |= DP_PRE_EMPHASIS_3_5;
2190
		break;
2191
	case DP_TRAIN_PRE_EMPHASIS_6:
2192
		signal_levels |= DP_PRE_EMPHASIS_6;
2193
		break;
2194
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2195
		signal_levels |= DP_PRE_EMPHASIS_9_5;
2196
		break;
2197
	}
2198
	return signal_levels;
2199
}
2200
 
2201
/* Gen6's DP voltage swing and pre-emphasis control */
2202
static uint32_t
2203
intel_gen6_edp_signal_levels(uint8_t train_set)
2204
{
2205
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2206
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2207
	switch (signal_levels) {
2208
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2209
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2210
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2211
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2212
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2213
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2214
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2215
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2216
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2217
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2218
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2219
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2220
	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2221
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2222
	default:
2223
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2224
			      "0x%x\n", signal_levels);
2225
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2226
	}
2227
}
2228
 
2342 Serge 2229
/* Gen7's DP voltage swing and pre-emphasis control */
2230
static uint32_t
2231
intel_gen7_edp_signal_levels(uint8_t train_set)
2232
{
2233
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2234
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2235
	switch (signal_levels) {
2236
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2237
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2238
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2239
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2240
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2241
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2242
 
2243
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2244
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2245
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2246
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2247
 
2248
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2249
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2250
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2251
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2252
 
2253
	default:
2254
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2255
			      "0x%x\n", signal_levels);
2256
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2257
	}
2258
}
2259
 
3243 Serge 2260
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2261
static uint32_t
3480 Serge 2262
intel_hsw_signal_levels(uint8_t train_set)
2330 Serge 2263
{
3243 Serge 2264
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2265
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2266
	switch (signal_levels) {
2267
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2268
		return DDI_BUF_EMP_400MV_0DB_HSW;
2269
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2270
		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2271
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2272
		return DDI_BUF_EMP_400MV_6DB_HSW;
2273
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2274
		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2330 Serge 2275
 
3243 Serge 2276
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2277
		return DDI_BUF_EMP_600MV_0DB_HSW;
2278
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2279
		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2280
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2281
		return DDI_BUF_EMP_600MV_6DB_HSW;
2330 Serge 2282
 
3243 Serge 2283
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2284
		return DDI_BUF_EMP_800MV_0DB_HSW;
2285
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2286
		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2287
	default:
2288
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2289
			      "0x%x\n", signal_levels);
2290
		return DDI_BUF_EMP_400MV_0DB_HSW;
2330 Serge 2291
	}
2292
}
2293
 
4560 Serge 2294
static uint32_t
2295
intel_bdw_signal_levels(uint8_t train_set)
2296
{
2297
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2298
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2299
	switch (signal_levels) {
2300
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2301
		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
2302
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2303
		return DDI_BUF_EMP_400MV_3_5DB_BDW;	/* Sel1 */
2304
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2305
		return DDI_BUF_EMP_400MV_6DB_BDW;	/* Sel2 */
2306
 
2307
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2308
		return DDI_BUF_EMP_600MV_0DB_BDW;	/* Sel3 */
2309
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2310
		return DDI_BUF_EMP_600MV_3_5DB_BDW;	/* Sel4 */
2311
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2312
		return DDI_BUF_EMP_600MV_6DB_BDW;	/* Sel5 */
2313
 
2314
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2315
		return DDI_BUF_EMP_800MV_0DB_BDW;	/* Sel6 */
2316
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2317
		return DDI_BUF_EMP_800MV_3_5DB_BDW;	/* Sel7 */
2318
 
2319
	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2320
		return DDI_BUF_EMP_1200MV_0DB_BDW;	/* Sel8 */
2321
 
2322
	default:
2323
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2324
			      "0x%x\n", signal_levels);
2325
		return DDI_BUF_EMP_400MV_0DB_BDW;	/* Sel0 */
2326
	}
2327
}
2328
 
3480 Serge 2329
/* Properly updates "DP" with the correct signal levels. */
2330
static void
2331
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2332
{
2333
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 2334
	enum port port = intel_dig_port->port;
3480 Serge 2335
	struct drm_device *dev = intel_dig_port->base.base.dev;
2336
	uint32_t signal_levels, mask;
2337
	uint8_t train_set = intel_dp->train_set[0];
2338
 
4560 Serge 2339
	if (IS_BROADWELL(dev)) {
2340
		signal_levels = intel_bdw_signal_levels(train_set);
2341
		mask = DDI_BUF_EMP_MASK;
2342
	} else if (IS_HASWELL(dev)) {
3480 Serge 2343
		signal_levels = intel_hsw_signal_levels(train_set);
2344
		mask = DDI_BUF_EMP_MASK;
4104 Serge 2345
	} else if (IS_VALLEYVIEW(dev)) {
2346
		signal_levels = intel_vlv_signal_levels(intel_dp);
2347
		mask = 0;
2348
	} else if (IS_GEN7(dev) && port == PORT_A) {
3480 Serge 2349
		signal_levels = intel_gen7_edp_signal_levels(train_set);
2350
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4104 Serge 2351
	} else if (IS_GEN6(dev) && port == PORT_A) {
3480 Serge 2352
		signal_levels = intel_gen6_edp_signal_levels(train_set);
2353
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2354
	} else {
2355
		signal_levels = intel_gen4_signal_levels(train_set);
2356
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2357
	}
2358
 
2359
	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2360
 
2361
	*DP = (*DP & ~mask) | signal_levels;
2362
}
2363
 
2330 Serge 2364
static bool
2365
intel_dp_set_link_train(struct intel_dp *intel_dp,
4560 Serge 2366
			uint32_t *DP,
2330 Serge 2367
			uint8_t dp_train_pat)
2368
{
3243 Serge 2369
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2370
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2371
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 2372
	enum port port = intel_dig_port->port;
4560 Serge 2373
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
2374
	int ret, len;
2330 Serge 2375
 
3746 Serge 2376
	if (HAS_DDI(dev)) {
2377
		uint32_t temp = I915_READ(DP_TP_CTL(port));
3243 Serge 2378
 
2379
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2380
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2381
		else
2382
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2383
 
2384
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2385
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2386
		case DP_TRAINING_PATTERN_DISABLE:
2387
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2388
 
2389
			break;
2390
		case DP_TRAINING_PATTERN_1:
2391
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2392
			break;
2393
		case DP_TRAINING_PATTERN_2:
2394
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2395
			break;
2396
		case DP_TRAINING_PATTERN_3:
2397
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2398
			break;
2399
		}
2400
		I915_WRITE(DP_TP_CTL(port), temp);
2401
 
4104 Serge 2402
	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
4560 Serge 2403
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
3031 serge 2404
 
2405
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2406
		case DP_TRAINING_PATTERN_DISABLE:
4560 Serge 2407
			*DP |= DP_LINK_TRAIN_OFF_CPT;
3031 serge 2408
			break;
2409
		case DP_TRAINING_PATTERN_1:
4560 Serge 2410
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
3031 serge 2411
			break;
2412
		case DP_TRAINING_PATTERN_2:
4560 Serge 2413
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3031 serge 2414
			break;
2415
		case DP_TRAINING_PATTERN_3:
2416
			DRM_ERROR("DP training pattern 3 not supported\n");
4560 Serge 2417
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
3031 serge 2418
			break;
2419
		}
2420
 
2421
	} else {
4560 Serge 2422
		*DP &= ~DP_LINK_TRAIN_MASK;
3031 serge 2423
 
2424
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425
		case DP_TRAINING_PATTERN_DISABLE:
4560 Serge 2426
			*DP |= DP_LINK_TRAIN_OFF;
3031 serge 2427
			break;
2428
		case DP_TRAINING_PATTERN_1:
4560 Serge 2429
			*DP |= DP_LINK_TRAIN_PAT_1;
3031 serge 2430
			break;
2431
		case DP_TRAINING_PATTERN_2:
4560 Serge 2432
			*DP |= DP_LINK_TRAIN_PAT_2;
3031 serge 2433
			break;
2434
		case DP_TRAINING_PATTERN_3:
2435
			DRM_ERROR("DP training pattern 3 not supported\n");
4560 Serge 2436
			*DP |= DP_LINK_TRAIN_PAT_2;
3031 serge 2437
			break;
2438
		}
2439
	}
2440
 
4560 Serge 2441
	I915_WRITE(intel_dp->output_reg, *DP);
2330 Serge 2442
	POSTING_READ(intel_dp->output_reg);
2443
 
4560 Serge 2444
	buf[0] = dp_train_pat;
2445
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2446
	    DP_TRAINING_PATTERN_DISABLE) {
2447
		/* don't write DP_TRAINING_LANEx_SET on disable */
2448
		len = 1;
2449
	} else {
2450
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2451
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2452
		len = intel_dp->lane_count + 1;
2453
	}
2330 Serge 2454
 
4560 Serge 2455
	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
2456
					buf, len);
2457
 
2458
	return ret == len;
2459
}
2460
 
2461
static bool
2462
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2463
			uint8_t dp_train_pat)
2464
{
2465
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2466
	intel_dp_set_signal_levels(intel_dp, DP);
2467
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2468
}
2469
 
2470
static bool
2471
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2472
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
2473
{
2474
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2475
	struct drm_device *dev = intel_dig_port->base.base.dev;
2476
	struct drm_i915_private *dev_priv = dev->dev_private;
2477
	int ret;
2478
 
2479
	intel_get_adjust_train(intel_dp, link_status);
2480
	intel_dp_set_signal_levels(intel_dp, DP);
2481
 
2482
	I915_WRITE(intel_dp->output_reg, *DP);
2483
	POSTING_READ(intel_dp->output_reg);
2484
 
2485
	ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2342 Serge 2486
					intel_dp->train_set,
2487
					intel_dp->lane_count);
2330 Serge 2488
 
4560 Serge 2489
	return ret == intel_dp->lane_count;
2330 Serge 2490
}
2491
 
3746 Serge 2492
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2493
{
2494
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2495
	struct drm_device *dev = intel_dig_port->base.base.dev;
2496
	struct drm_i915_private *dev_priv = dev->dev_private;
2497
	enum port port = intel_dig_port->port;
2498
	uint32_t val;
2499
 
2500
	if (!HAS_DDI(dev))
2501
		return;
2502
 
2503
	val = I915_READ(DP_TP_CTL(port));
2504
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2505
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2506
	I915_WRITE(DP_TP_CTL(port), val);
2507
 
2508
	/*
2509
	 * On PORT_A we can have only eDP in SST mode. There the only reason
2510
	 * we need to set idle transmission mode is to work around a HW issue
2511
	 * where we enable the pipe while not in idle link-training mode.
2512
	 * In this case there is requirement to wait for a minimum number of
2513
	 * idle patterns to be sent.
2514
	 */
2515
	if (port == PORT_A)
2516
		return;
2517
 
2518
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2519
		     1))
2520
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
2521
}
2522
 
2330 Serge 2523
/* Enable corresponding port and start training pattern 1 */
3243 Serge 2524
void
2330 Serge 2525
intel_dp_start_link_train(struct intel_dp *intel_dp)
2526
{
3243 Serge 2527
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2528
	struct drm_device *dev = encoder->dev;
2330 Serge 2529
	int i;
2530
	uint8_t voltage;
2342 Serge 2531
	int voltage_tries, loop_tries;
2330 Serge 2532
	uint32_t DP = intel_dp->DP;
4560 Serge 2533
	uint8_t link_config[2];
2330 Serge 2534
 
3480 Serge 2535
	if (HAS_DDI(dev))
3243 Serge 2536
		intel_ddi_prepare_link_retrain(encoder);
2537
 
2330 Serge 2538
	/* Write the link configuration data */
4560 Serge 2539
	link_config[0] = intel_dp->link_bw;
2540
	link_config[1] = intel_dp->lane_count;
2541
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2542
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
2543
	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
2330 Serge 2544
 
4560 Serge 2545
	link_config[0] = 0;
2546
	link_config[1] = DP_SET_ANSI_8B10B;
2547
	intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
2548
 
2330 Serge 2549
	DP |= DP_PORT_EN;
2342 Serge 2550
 
4560 Serge 2551
	/* clock recovery */
2552
	if (!intel_dp_reset_link_train(intel_dp, &DP,
2553
				       DP_TRAINING_PATTERN_1 |
2554
				       DP_LINK_SCRAMBLING_DISABLE)) {
2555
		DRM_ERROR("failed to enable link training\n");
2556
		return;
2557
	}
2558
 
2330 Serge 2559
	voltage = 0xff;
2342 Serge 2560
	voltage_tries = 0;
2561
	loop_tries = 0;
2330 Serge 2562
	for (;;) {
2342 Serge 2563
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2564
 
3243 Serge 2565
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2342 Serge 2566
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2567
			DRM_ERROR("failed to get link status\n");
2330 Serge 2568
			break;
2342 Serge 2569
		}
2330 Serge 2570
 
3243 Serge 2571
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2342 Serge 2572
			DRM_DEBUG_KMS("clock recovery OK\n");
2330 Serge 2573
			break;
2574
		}
2575
 
2576
		/* Check to see if we've tried the max voltage */
2577
		for (i = 0; i < intel_dp->lane_count; i++)
2578
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2579
				break;
3480 Serge 2580
		if (i == intel_dp->lane_count) {
2342 Serge 2581
			++loop_tries;
2582
			if (loop_tries == 5) {
4560 Serge 2583
				DRM_ERROR("too many full retries, give up\n");
2330 Serge 2584
			break;
2342 Serge 2585
			}
4560 Serge 2586
			intel_dp_reset_link_train(intel_dp, &DP,
2587
						  DP_TRAINING_PATTERN_1 |
2588
						  DP_LINK_SCRAMBLING_DISABLE);
2342 Serge 2589
			voltage_tries = 0;
2590
			continue;
2591
		}
2330 Serge 2592
 
2593
		/* Check to see if we've tried the same voltage 5 times */
2594
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2342 Serge 2595
			++voltage_tries;
2596
			if (voltage_tries == 5) {
4560 Serge 2597
				DRM_ERROR("too many voltage retries, give up\n");
2330 Serge 2598
				break;
2342 Serge 2599
			}
2330 Serge 2600
		} else
2342 Serge 2601
			voltage_tries = 0;
2330 Serge 2602
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2603
 
4560 Serge 2604
		/* Update training set as requested by target */
2605
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2606
			DRM_ERROR("failed to update link training\n");
2607
			break;
2608
		}
2330 Serge 2609
	}
2610
 
2611
	intel_dp->DP = DP;
2612
}
2613
 
3243 Serge 2614
void
2330 Serge 2615
intel_dp_complete_link_train(struct intel_dp *intel_dp)
2616
{
2617
	bool channel_eq = false;
2618
	int tries, cr_tries;
2619
	uint32_t DP = intel_dp->DP;
2620
 
2621
	/* channel equalization */
4560 Serge 2622
	if (!intel_dp_set_link_train(intel_dp, &DP,
2623
				     DP_TRAINING_PATTERN_2 |
2624
				     DP_LINK_SCRAMBLING_DISABLE)) {
2625
		DRM_ERROR("failed to start channel equalization\n");
2626
		return;
2627
	}
2628
 
2330 Serge 2629
	tries = 0;
2630
	cr_tries = 0;
2631
	channel_eq = false;
2632
	for (;;) {
2342 Serge 2633
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2330 Serge 2634
 
2635
		if (cr_tries > 5) {
2636
			DRM_ERROR("failed to train DP, aborting\n");
2637
			break;
2638
		}
2639
 
3243 Serge 2640
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
4560 Serge 2641
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
2642
			DRM_ERROR("failed to get link status\n");
2330 Serge 2643
			break;
4560 Serge 2644
		}
2330 Serge 2645
 
2646
		/* Make sure clock is still ok */
3243 Serge 2647
		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2330 Serge 2648
			intel_dp_start_link_train(intel_dp);
4560 Serge 2649
			intel_dp_set_link_train(intel_dp, &DP,
2650
						DP_TRAINING_PATTERN_2 |
2651
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 2652
			cr_tries++;
2653
			continue;
2654
		}
2655
 
3243 Serge 2656
		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2330 Serge 2657
			channel_eq = true;
2658
			break;
2659
		}
2660
 
2661
		/* Try 5 times, then try clock recovery if that fails */
2662
		if (tries > 5) {
2663
			intel_dp_link_down(intel_dp);
2664
			intel_dp_start_link_train(intel_dp);
4560 Serge 2665
			intel_dp_set_link_train(intel_dp, &DP,
2666
						DP_TRAINING_PATTERN_2 |
2667
						DP_LINK_SCRAMBLING_DISABLE);
2330 Serge 2668
			tries = 0;
2669
			cr_tries++;
2670
			continue;
2671
		}
2672
 
4560 Serge 2673
		/* Update training set as requested by target */
2674
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2675
			DRM_ERROR("failed to update link training\n");
2676
			break;
2677
		}
2330 Serge 2678
		++tries;
2679
	}
2680
 
3746 Serge 2681
	intel_dp_set_idle_link_train(intel_dp);
2682
 
2683
	intel_dp->DP = DP;
2684
 
3243 Serge 2685
	if (channel_eq)
3746 Serge 2686
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3243 Serge 2687
 
2330 Serge 2688
}
2689
 
3746 Serge 2690
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2691
{
4560 Serge 2692
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3746 Serge 2693
				DP_TRAINING_PATTERN_DISABLE);
2694
}
2695
 
2330 Serge 2696
static void
2697
intel_dp_link_down(struct intel_dp *intel_dp)
2698
{
3243 Serge 2699
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4104 Serge 2700
	enum port port = intel_dig_port->port;
3243 Serge 2701
	struct drm_device *dev = intel_dig_port->base.base.dev;
2330 Serge 2702
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 2703
	struct intel_crtc *intel_crtc =
2704
		to_intel_crtc(intel_dig_port->base.base.crtc);
2330 Serge 2705
	uint32_t DP = intel_dp->DP;
2706
 
3243 Serge 2707
	/*
2708
	 * DDI code has a strict mode set sequence and we should try to respect
2709
	 * it, otherwise we might hang the machine in many different ways. So we
2710
	 * really should be disabling the port only on a complete crtc_disable
2711
	 * sequence. This function is just called under two conditions on DDI
2712
	 * code:
2713
	 * - Link train failed while doing crtc_enable, and on this case we
2714
	 *   really should respect the mode set sequence and wait for a
2715
	 *   crtc_disable.
2716
	 * - Someone turned the monitor off and intel_dp_check_link_status
2717
	 *   called us. We don't need to disable the whole port on this case, so
2718
	 *   when someone turns the monitor on again,
2719
	 *   intel_ddi_prepare_link_retrain will take care of redoing the link
2720
	 *   train.
2721
	 */
3480 Serge 2722
	if (HAS_DDI(dev))
3243 Serge 2723
		return;
2724
 
3031 serge 2725
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 2726
		return;
2727
 
2728
	DRM_DEBUG_KMS("\n");
2729
 
4104 Serge 2730
	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2330 Serge 2731
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
2732
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2733
	} else {
2734
		DP &= ~DP_LINK_TRAIN_MASK;
2735
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2736
	}
2737
	POSTING_READ(intel_dp->output_reg);
2738
 
3480 Serge 2739
	/* We don't really know why we're doing this */
2740
	intel_wait_for_vblank(dev, intel_crtc->pipe);
2330 Serge 2741
 
3031 serge 2742
	if (HAS_PCH_IBX(dev) &&
2330 Serge 2743
	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3243 Serge 2744
		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2330 Serge 2745
 
2746
		/* Hardware workaround: leaving our transcoder select
2747
		 * set to transcoder B while it's off will prevent the
2748
		 * corresponding HDMI output on transcoder A.
2749
		 *
2750
		 * Combine this with another hardware workaround:
2751
		 * transcoder select bit can only be cleared while the
2752
		 * port is enabled.
2753
		 */
2754
		DP &= ~DP_PIPEB_SELECT;
2755
		I915_WRITE(intel_dp->output_reg, DP);
2756
 
2757
		/* Changes to enable or select take place the vblank
2758
		 * after being written.
2759
		 */
3480 Serge 2760
		if (WARN_ON(crtc == NULL)) {
2761
			/* We should never try to disable a port without a crtc
2762
			 * attached. For paranoia keep the code around for a
2763
			 * bit. */
2330 Serge 2764
			POSTING_READ(intel_dp->output_reg);
2765
			msleep(50);
2766
		} else
3480 Serge 2767
			intel_wait_for_vblank(dev, intel_crtc->pipe);
2330 Serge 2768
	}
2769
 
2342 Serge 2770
	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2330 Serge 2771
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2772
	POSTING_READ(intel_dp->output_reg);
2342 Serge 2773
	msleep(intel_dp->panel_power_down_delay);
2330 Serge 2774
}
2775
 
2776
static bool
2777
intel_dp_get_dpcd(struct intel_dp *intel_dp)
2778
{
4560 Serge 2779
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2780
	struct drm_device *dev = dig_port->base.base.dev;
2781
	struct drm_i915_private *dev_priv = dev->dev_private;
2782
 
3480 Serge 2783
	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2784
 
2330 Serge 2785
	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
3031 serge 2786
					   sizeof(intel_dp->dpcd)) == 0)
2787
		return false; /* aux transfer failed */
2788
 
3480 Serge 2789
	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2790
			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2791
	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2792
 
3031 serge 2793
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2794
		return false; /* DPCD not present */
2795
 
4104 Serge 2796
	/* Check if the panel supports PSR */
2797
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4560 Serge 2798
	if (is_edp(intel_dp)) {
4104 Serge 2799
	intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2800
				       intel_dp->psr_dpcd,
2801
				       sizeof(intel_dp->psr_dpcd));
4560 Serge 2802
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2803
			dev_priv->psr.sink_support = true;
4104 Serge 2804
		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4560 Serge 2805
		}
2806
	}
2807
 
3031 serge 2808
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2809
	      DP_DWN_STRM_PORT_PRESENT))
2810
		return true; /* native DP sink */
2811
 
2812
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2813
		return true; /* no per-port downstream info */
2814
 
2815
	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2816
					   intel_dp->downstream_ports,
2817
					   DP_MAX_DOWNSTREAM_PORTS) == 0)
2818
		return false; /* downstream port status fetch failed */
2819
 
2330 Serge 2820
		return true;
3031 serge 2821
}
2330 Serge 2822
 
3031 serge 2823
static void
2824
intel_dp_probe_oui(struct intel_dp *intel_dp)
2825
{
2826
	u8 buf[3];
2827
 
2828
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2829
		return;
2830
 
2831
	ironlake_edp_panel_vdd_on(intel_dp);
2832
 
2833
	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2834
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2835
			      buf[0], buf[1], buf[2]);
2836
 
2837
	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2838
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2839
			      buf[0], buf[1], buf[2]);
2840
 
2841
	ironlake_edp_panel_vdd_off(intel_dp, false);
2330 Serge 2842
}
2843
 
2342 Serge 2844
static bool
2845
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2846
{
2847
	int ret;
2848
 
2849
	ret = intel_dp_aux_native_read_retry(intel_dp,
2850
					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2851
					     sink_irq_vector, 1);
2852
	if (!ret)
2853
		return false;
2854
 
2855
	return true;
2856
}
2857
 
2858
static void
2859
intel_dp_handle_test_request(struct intel_dp *intel_dp)
2860
{
2861
	/* NAK by default */
3243 Serge 2862
	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2342 Serge 2863
}
2864
 
2330 Serge 2865
/*
2866
 * According to DP spec
2867
 * 5.1.2:
2868
 *  1. Read DPCD
2869
 *  2. Configure link according to Receiver Capabilities
2870
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2871
 *  4. Check link status on receipt of hot-plug interrupt
2872
 */
2873
 
3243 Serge 2874
void
2330 Serge 2875
intel_dp_check_link_status(struct intel_dp *intel_dp)
2876
{
3243 Serge 2877
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2342 Serge 2878
	u8 sink_irq_vector;
2879
	u8 link_status[DP_LINK_STATUS_SIZE];
2880
 
3243 Serge 2881
	if (!intel_encoder->connectors_active)
2330 Serge 2882
		return;
2883
 
3243 Serge 2884
	if (WARN_ON(!intel_encoder->base.crtc))
2330 Serge 2885
		return;
2886
 
2887
	/* Try to read receiver status if the link appears to be up */
2342 Serge 2888
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 2889
		return;
2890
	}
2891
 
2892
	/* Now read the DPCD to see if it's actually running */
2893
	if (!intel_dp_get_dpcd(intel_dp)) {
2894
		return;
2895
	}
2896
 
2342 Serge 2897
	/* Try to read the source of the interrupt */
2898
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2899
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2900
		/* Clear interrupt source */
2901
		intel_dp_aux_native_write_1(intel_dp,
2902
					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2903
					    sink_irq_vector);
2904
 
2905
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2906
			intel_dp_handle_test_request(intel_dp);
2907
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2908
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2909
	}
2910
 
3243 Serge 2911
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2330 Serge 2912
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3243 Serge 2913
			      drm_get_encoder_name(&intel_encoder->base));
2330 Serge 2914
		intel_dp_start_link_train(intel_dp);
2915
		intel_dp_complete_link_train(intel_dp);
3746 Serge 2916
		intel_dp_stop_link_train(intel_dp);
2330 Serge 2917
	}
2918
}
2919
 
3031 serge 2920
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 2921
static enum drm_connector_status
2922
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2923
{
3031 serge 2924
	uint8_t *dpcd = intel_dp->dpcd;
2925
	uint8_t type;
2926
 
2927
	if (!intel_dp_get_dpcd(intel_dp))
2928
		return connector_status_disconnected;
2929
 
2930
	/* if there's no downstream port, we're done */
2931
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 2932
		return connector_status_connected;
3031 serge 2933
 
2934
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
4560 Serge 2935
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2936
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3031 serge 2937
		uint8_t reg;
2938
		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2939
						    ®, 1))
2940
			return connector_status_unknown;
2941
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2942
					      : connector_status_disconnected;
2943
	}
2944
 
2945
	/* If no HPD, poke DDC gently */
2946
	if (drm_probe_ddc(&intel_dp->adapter))
2947
		return connector_status_connected;
2948
 
2949
	/* Well we tried, say unknown for unreliable port types */
4560 Serge 2950
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3031 serge 2951
	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4560 Serge 2952
		if (type == DP_DS_PORT_TYPE_VGA ||
2953
		    type == DP_DS_PORT_TYPE_NON_EDID)
3031 serge 2954
		return connector_status_unknown;
4560 Serge 2955
	} else {
2956
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2957
			DP_DWN_STRM_PORT_TYPE_MASK;
2958
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
2959
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
2960
		return connector_status_unknown;
2961
	}
3031 serge 2962
 
2963
	/* Anything else is out of spec, warn and ignore */
2964
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 2965
	return connector_status_disconnected;
2966
}
2967
 
2968
static enum drm_connector_status
2969
ironlake_dp_detect(struct intel_dp *intel_dp)
2970
{
3243 Serge 2971
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3480 Serge 2972
	struct drm_i915_private *dev_priv = dev->dev_private;
2973
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2330 Serge 2974
	enum drm_connector_status status;
2975
 
2976
	/* Can't disconnect eDP, but you can close the lid... */
2977
	if (is_edp(intel_dp)) {
3243 Serge 2978
		status = intel_panel_detect(dev);
2330 Serge 2979
		if (status == connector_status_unknown)
2980
			status = connector_status_connected;
2981
		return status;
2982
	}
2983
 
3480 Serge 2984
	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2985
		return connector_status_disconnected;
2986
 
2330 Serge 2987
	return intel_dp_detect_dpcd(intel_dp);
2988
}
2989
 
2990
static enum drm_connector_status
2991
g4x_dp_detect(struct intel_dp *intel_dp)
2992
{
3243 Serge 2993
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 2994
	struct drm_i915_private *dev_priv = dev->dev_private;
3480 Serge 2995
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3031 serge 2996
	uint32_t bit;
2330 Serge 2997
 
3746 Serge 2998
	/* Can't disconnect eDP, but you can close the lid... */
2999
	if (is_edp(intel_dp)) {
3000
		enum drm_connector_status status;
3001
 
3002
		status = intel_panel_detect(dev);
3003
		if (status == connector_status_unknown)
3004
			status = connector_status_connected;
3005
		return status;
3006
	}
3007
 
4560 Serge 3008
	if (IS_VALLEYVIEW(dev)) {
3009
		switch (intel_dig_port->port) {
3010
		case PORT_B:
3011
			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3012
			break;
3013
		case PORT_C:
3014
			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3015
			break;
3016
		case PORT_D:
3017
			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3018
			break;
3019
		default:
3020
			return connector_status_unknown;
3021
		}
3022
	} else {
3480 Serge 3023
	switch (intel_dig_port->port) {
3024
	case PORT_B:
4560 Serge 3025
			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
2330 Serge 3026
		break;
3480 Serge 3027
	case PORT_C:
4560 Serge 3028
			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
2330 Serge 3029
		break;
3480 Serge 3030
	case PORT_D:
4560 Serge 3031
			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
2330 Serge 3032
		break;
3033
	default:
3034
		return connector_status_unknown;
3035
	}
4560 Serge 3036
	}
2330 Serge 3037
 
3031 serge 3038
	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2330 Serge 3039
		return connector_status_disconnected;
3040
 
3041
	return intel_dp_detect_dpcd(intel_dp);
3042
}
3043
 
2342 Serge 3044
static struct edid *
3045
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3046
{
3243 Serge 3047
	struct intel_connector *intel_connector = to_intel_connector(connector);
3048
 
3049
	/* use cached edid if we have one */
3050
	if (intel_connector->edid) {
3051
		/* invalid edid */
3052
		if (IS_ERR(intel_connector->edid))
3031 serge 3053
			return NULL;
3054
 
4560 Serge 3055
		return drm_edid_duplicate(intel_connector->edid);
3031 serge 3056
	}
3057
 
3243 Serge 3058
	return drm_get_edid(connector, adapter);
2342 Serge 3059
}
3060
 
3061
static int
3062
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3063
{
3243 Serge 3064
	struct intel_connector *intel_connector = to_intel_connector(connector);
2342 Serge 3065
 
3243 Serge 3066
	/* use cached edid if we have one */
3067
	if (intel_connector->edid) {
3068
		/* invalid edid */
3069
		if (IS_ERR(intel_connector->edid))
3070
			return 0;
3071
 
3072
		return intel_connector_update_modes(connector,
3073
						    intel_connector->edid);
3031 serge 3074
	}
3075
 
3243 Serge 3076
	return intel_ddc_get_modes(connector, adapter);
2342 Serge 3077
}
3078
 
2330 Serge 3079
static enum drm_connector_status
3080
intel_dp_detect(struct drm_connector *connector, bool force)
3081
{
3082
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 3083
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3084
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3085
	struct drm_device *dev = connector->dev;
4560 Serge 3086
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 3087
	enum drm_connector_status status;
3088
	struct edid *edid = NULL;
3089
 
4560 Serge 3090
	intel_runtime_pm_get(dev_priv);
3091
 
4104 Serge 3092
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3093
		      connector->base.id, drm_get_connector_name(connector));
3094
 
2330 Serge 3095
	intel_dp->has_audio = false;
3096
 
3097
	if (HAS_PCH_SPLIT(dev))
3098
		status = ironlake_dp_detect(intel_dp);
3099
	else
3100
		status = g4x_dp_detect(intel_dp);
3101
 
3102
	if (status != connector_status_connected)
4560 Serge 3103
		goto out;
3031 serge 3104
 
3105
	intel_dp_probe_oui(intel_dp);
3106
 
3243 Serge 3107
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3108
		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2330 Serge 3109
	} else {
3031 serge 3110
		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2330 Serge 3111
		if (edid) {
3112
			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3113
			kfree(edid);
3114
		}
3115
	}
3243 Serge 3116
 
3117
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3118
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4560 Serge 3119
	status = connector_status_connected;
3120
 
3121
out:
3122
	intel_runtime_pm_put(dev_priv);
3123
	return status;
2330 Serge 3124
}
3125
 
3126
static int intel_dp_get_modes(struct drm_connector *connector)
3127
{
3128
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3243 Serge 3129
	struct intel_connector *intel_connector = to_intel_connector(connector);
3130
	struct drm_device *dev = connector->dev;
2330 Serge 3131
	int ret;
3132
 
3133
	/* We should parse the EDID data and find out if it has an audio sink
3134
	 */
3135
 
2342 Serge 3136
	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
3243 Serge 3137
	if (ret)
2330 Serge 3138
		return ret;
3139
 
3243 Serge 3140
	/* if eDP has no EDID, fall back to fixed mode */
3141
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2330 Serge 3142
			struct drm_display_mode *mode;
3243 Serge 3143
		mode = drm_mode_duplicate(dev,
3144
					  intel_connector->panel.fixed_mode);
3145
		if (mode) {
2330 Serge 3146
			drm_mode_probed_add(connector, mode);
3147
			return 1;
3148
		}
3149
	}
3150
	return 0;
3151
}
3152
 
3243 Serge 3153
static bool
3154
intel_dp_detect_audio(struct drm_connector *connector)
3155
{
3156
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3157
	struct edid *edid;
3158
	bool has_audio = false;
2330 Serge 3159
 
3243 Serge 3160
	edid = intel_dp_get_edid(connector, &intel_dp->adapter);
3161
	if (edid) {
3162
		has_audio = drm_detect_monitor_audio(edid);
3163
		kfree(edid);
3164
	}
2330 Serge 3165
 
3243 Serge 3166
	return has_audio;
3167
}
2330 Serge 3168
 
3169
static int
3170
intel_dp_set_property(struct drm_connector *connector,
3171
		      struct drm_property *property,
3172
		      uint64_t val)
3173
{
3174
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3243 Serge 3175
	struct intel_connector *intel_connector = to_intel_connector(connector);
3176
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3177
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 3178
	int ret;
3179
 
3243 Serge 3180
	ret = drm_object_property_set_value(&connector->base, property, val);
2330 Serge 3181
	if (ret)
3182
		return ret;
3480 Serge 3183
 
2330 Serge 3184
	if (property == dev_priv->force_audio_property) {
3185
		int i = val;
3186
		bool has_audio;
3187
 
3188
		if (i == intel_dp->force_audio)
3189
			return 0;
3190
 
3191
		intel_dp->force_audio = i;
3192
 
3031 serge 3193
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 3194
			has_audio = intel_dp_detect_audio(connector);
3195
		else
3031 serge 3196
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 3197
 
3198
		if (has_audio == intel_dp->has_audio)
3199
			return 0;
3200
 
3201
		intel_dp->has_audio = has_audio;
3202
		goto done;
3203
	}
3204
 
3205
	if (property == dev_priv->broadcast_rgb_property) {
3746 Serge 3206
		bool old_auto = intel_dp->color_range_auto;
3207
		uint32_t old_range = intel_dp->color_range;
3208
 
3480 Serge 3209
		switch (val) {
3210
		case INTEL_BROADCAST_RGB_AUTO:
3211
			intel_dp->color_range_auto = true;
3212
			break;
3213
		case INTEL_BROADCAST_RGB_FULL:
3214
			intel_dp->color_range_auto = false;
3215
			intel_dp->color_range = 0;
3216
			break;
3217
		case INTEL_BROADCAST_RGB_LIMITED:
3218
			intel_dp->color_range_auto = false;
3219
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3220
			break;
3221
		default:
3222
			return -EINVAL;
3223
		}
3746 Serge 3224
 
3225
		if (old_auto == intel_dp->color_range_auto &&
3226
		    old_range == intel_dp->color_range)
3227
			return 0;
3228
 
2330 Serge 3229
	goto done;
3230
	}
3231
 
3243 Serge 3232
	if (is_edp(intel_dp) &&
3233
	    property == connector->dev->mode_config.scaling_mode_property) {
3234
		if (val == DRM_MODE_SCALE_NONE) {
3235
			DRM_DEBUG_KMS("no scaling not supported\n");
3236
			return -EINVAL;
3237
		}
3238
 
3239
		if (intel_connector->panel.fitting_mode == val) {
3240
			/* the eDP scaling property is not changed */
3241
			return 0;
3242
		}
3243
		intel_connector->panel.fitting_mode = val;
3244
 
3245
		goto done;
3246
	}
3247
 
2330 Serge 3248
	return -EINVAL;
3249
 
3250
done:
3480 Serge 3251
	if (intel_encoder->base.crtc)
3252
		intel_crtc_restore_mode(intel_encoder->base.crtc);
2330 Serge 3253
 
3254
	return 0;
3255
}
3256
 
3257
static void
4104 Serge 3258
intel_dp_connector_destroy(struct drm_connector *connector)
2330 Serge 3259
{
3243 Serge 3260
	struct intel_connector *intel_connector = to_intel_connector(connector);
2330 Serge 3261
 
3243 Serge 3262
	if (!IS_ERR_OR_NULL(intel_connector->edid))
3263
		kfree(intel_connector->edid);
3264
 
4104 Serge 3265
	/* Can't call is_edp() since the encoder may have been destroyed
3266
	 * already. */
3267
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3243 Serge 3268
		intel_panel_fini(&intel_connector->panel);
2330 Serge 3269
 
3270
	drm_connector_cleanup(connector);
3271
	kfree(connector);
3272
}
3273
 
3243 Serge 3274
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2330 Serge 3275
{
3243 Serge 3276
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3277
	struct intel_dp *intel_dp = &intel_dig_port->dp;
3480 Serge 3278
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2330 Serge 3279
 
3243 Serge 3280
	i2c_del_adapter(&intel_dp->adapter);
2330 Serge 3281
	drm_encoder_cleanup(encoder);
2342 Serge 3282
	if (is_edp(intel_dp)) {
4293 Serge 3283
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3480 Serge 3284
		mutex_lock(&dev->mode_config.mutex);
2342 Serge 3285
		ironlake_panel_vdd_off_sync(intel_dp);
3480 Serge 3286
		mutex_unlock(&dev->mode_config.mutex);
2342 Serge 3287
	}
3243 Serge 3288
	kfree(intel_dig_port);
2330 Serge 3289
}
3290
 
3291
static const struct drm_connector_funcs intel_dp_connector_funcs = {
3031 serge 3292
	.dpms = intel_connector_dpms,
2330 Serge 3293
	.detect = intel_dp_detect,
3294
	.fill_modes = drm_helper_probe_single_connector_modes,
3295
	.set_property = intel_dp_set_property,
4104 Serge 3296
	.destroy = intel_dp_connector_destroy,
2330 Serge 3297
};
3298
 
3299
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3300
	.get_modes = intel_dp_get_modes,
3301
	.mode_valid = intel_dp_mode_valid,
3302
	.best_encoder = intel_best_encoder,
3303
};
3304
 
3305
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
3306
	.destroy = intel_dp_encoder_destroy,
3307
};
3308
 
3309
static void
3310
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
3311
{
3243 Serge 3312
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2330 Serge 3313
 
3314
	intel_dp_check_link_status(intel_dp);
3315
}
3316
 
2327 Serge 3317
/* Return which DP Port should be selected for Transcoder DP control */
3318
int
2342 Serge 3319
intel_trans_dp_port_sel(struct drm_crtc *crtc)
2327 Serge 3320
{
3321
	struct drm_device *dev = crtc->dev;
3243 Serge 3322
	struct intel_encoder *intel_encoder;
3323
	struct intel_dp *intel_dp;
2327 Serge 3324
 
3243 Serge 3325
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3326
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
2327 Serge 3327
 
3243 Serge 3328
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3329
		    intel_encoder->type == INTEL_OUTPUT_EDP)
2327 Serge 3330
			return intel_dp->output_reg;
3331
	}
3332
 
3333
	return -1;
3334
}
2330 Serge 3335
 
3336
/* check the VBT to see whether the eDP is on DP-D port */
4560 Serge 3337
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
2330 Serge 3338
{
3339
	struct drm_i915_private *dev_priv = dev->dev_private;
4560 Serge 3340
	union child_device_config *p_child;
2330 Serge 3341
	int i;
4560 Serge 3342
	static const short port_mapping[] = {
3343
		[PORT_B] = PORT_IDPB,
3344
		[PORT_C] = PORT_IDPC,
3345
		[PORT_D] = PORT_IDPD,
3346
	};
2330 Serge 3347
 
4560 Serge 3348
	if (port == PORT_A)
3349
		return true;
3350
 
4104 Serge 3351
	if (!dev_priv->vbt.child_dev_num)
2330 Serge 3352
		return false;
3353
 
4104 Serge 3354
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3355
		p_child = dev_priv->vbt.child_dev + i;
2330 Serge 3356
 
4560 Serge 3357
		if (p_child->common.dvo_port == port_mapping[port] &&
3358
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3359
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
2330 Serge 3360
			return true;
3361
	}
3362
	return false;
3363
}
3364
 
3365
static void
3366
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3367
{
3243 Serge 3368
	struct intel_connector *intel_connector = to_intel_connector(connector);
3369
 
2330 Serge 3370
	intel_attach_force_audio_property(connector);
3371
	intel_attach_broadcast_rgb_property(connector);
3480 Serge 3372
	intel_dp->color_range_auto = true;
3243 Serge 3373
 
3374
	if (is_edp(intel_dp)) {
3375
		drm_mode_create_scaling_mode_property(connector->dev);
3376
		drm_object_attach_property(
3377
			&connector->base,
3378
			connector->dev->mode_config.scaling_mode_property,
3379
			DRM_MODE_SCALE_ASPECT);
3380
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
3381
	}
2330 Serge 3382
}
3383
 
3243 Serge 3384
static void
3385
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
3386
				    struct intel_dp *intel_dp,
3387
				    struct edp_power_seq *out)
3388
{
3389
	struct drm_i915_private *dev_priv = dev->dev_private;
3390
	struct edp_power_seq cur, vbt, spec, final;
3391
	u32 pp_on, pp_off, pp_div, pp;
4560 Serge 3392
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3243 Serge 3393
 
3746 Serge 3394
	if (HAS_PCH_SPLIT(dev)) {
4560 Serge 3395
		pp_ctrl_reg = PCH_PP_CONTROL;
3746 Serge 3396
		pp_on_reg = PCH_PP_ON_DELAYS;
3397
		pp_off_reg = PCH_PP_OFF_DELAYS;
3398
		pp_div_reg = PCH_PP_DIVISOR;
3399
	} else {
4560 Serge 3400
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3401
 
3402
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
3403
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3404
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3405
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 3406
	}
3407
 
3243 Serge 3408
	/* Workaround: Need to write PP_CONTROL with the unlock key as
3409
	 * the very first thing. */
3746 Serge 3410
	pp = ironlake_get_pp_control(intel_dp);
4560 Serge 3411
	I915_WRITE(pp_ctrl_reg, pp);
3243 Serge 3412
 
3746 Serge 3413
	pp_on = I915_READ(pp_on_reg);
3414
	pp_off = I915_READ(pp_off_reg);
3415
	pp_div = I915_READ(pp_div_reg);
3243 Serge 3416
 
3417
	/* Pull timing values out of registers */
3418
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3419
		PANEL_POWER_UP_DELAY_SHIFT;
3420
 
3421
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3422
		PANEL_LIGHT_ON_DELAY_SHIFT;
3423
 
3424
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3425
		PANEL_LIGHT_OFF_DELAY_SHIFT;
3426
 
3427
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3428
		PANEL_POWER_DOWN_DELAY_SHIFT;
3429
 
3430
	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3431
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3432
 
3433
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3434
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3435
 
4104 Serge 3436
	vbt = dev_priv->vbt.edp_pps;
3243 Serge 3437
 
3438
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3439
	 * our hw here, which are all in 100usec. */
3440
	spec.t1_t3 = 210 * 10;
3441
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3442
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3443
	spec.t10 = 500 * 10;
3444
	/* This one is special and actually in units of 100ms, but zero
3445
	 * based in the hw (so we need to add 100 ms). But the sw vbt
3446
	 * table multiplies it with 1000 to make it in units of 100usec,
3447
	 * too. */
3448
	spec.t11_t12 = (510 + 100) * 10;
3449
 
3450
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3451
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3452
 
3453
	/* Use the max of the register settings and vbt. If both are
3454
	 * unset, fall back to the spec limits. */
3455
#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
3456
				       spec.field : \
3457
				       max(cur.field, vbt.field))
3458
	assign_final(t1_t3);
3459
	assign_final(t8);
3460
	assign_final(t9);
3461
	assign_final(t10);
3462
	assign_final(t11_t12);
3463
#undef assign_final
3464
 
3465
#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
3466
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
3467
	intel_dp->backlight_on_delay = get_delay(t8);
3468
	intel_dp->backlight_off_delay = get_delay(t9);
3469
	intel_dp->panel_power_down_delay = get_delay(t10);
3470
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3471
#undef get_delay
3472
 
3473
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3474
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3475
		      intel_dp->panel_power_cycle_delay);
3476
 
3477
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3478
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3479
 
3480
	if (out)
3481
		*out = final;
3482
}
3483
 
3484
static void
3485
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3486
					      struct intel_dp *intel_dp,
3487
					      struct edp_power_seq *seq)
3488
{
3489
	struct drm_i915_private *dev_priv = dev->dev_private;
3746 Serge 3490
	u32 pp_on, pp_off, pp_div, port_sel = 0;
3491
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3492
	int pp_on_reg, pp_off_reg, pp_div_reg;
3243 Serge 3493
 
3746 Serge 3494
	if (HAS_PCH_SPLIT(dev)) {
3495
		pp_on_reg = PCH_PP_ON_DELAYS;
3496
		pp_off_reg = PCH_PP_OFF_DELAYS;
3497
		pp_div_reg = PCH_PP_DIVISOR;
3498
	} else {
4560 Serge 3499
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
3500
 
3501
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
3502
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
3503
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
3746 Serge 3504
	}
3505
 
3243 Serge 3506
	/* And finally store the new values in the power sequencer. */
3507
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3508
		(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
3509
	pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3510
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
3511
	/* Compute the divisor for the pp clock, simply match the Bspec
3512
	 * formula. */
3746 Serge 3513
	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
3243 Serge 3514
	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
3515
			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
3516
 
3517
	/* Haswell doesn't have any port selection bits for the panel
3518
	 * power sequencer any more. */
4104 Serge 3519
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 3520
		if (dp_to_dig_port(intel_dp)->port == PORT_B)
3521
			port_sel = PANEL_PORT_SELECT_DPB_VLV;
3522
		else
3523
			port_sel = PANEL_PORT_SELECT_DPC_VLV;
4104 Serge 3524
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3525
		if (dp_to_dig_port(intel_dp)->port == PORT_A)
4560 Serge 3526
			port_sel = PANEL_PORT_SELECT_DPA;
3243 Serge 3527
		else
4560 Serge 3528
			port_sel = PANEL_PORT_SELECT_DPD;
3243 Serge 3529
	}
3530
 
3746 Serge 3531
	pp_on |= port_sel;
3243 Serge 3532
 
3746 Serge 3533
	I915_WRITE(pp_on_reg, pp_on);
3534
	I915_WRITE(pp_off_reg, pp_off);
3535
	I915_WRITE(pp_div_reg, pp_div);
3536
 
3243 Serge 3537
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
3746 Serge 3538
		      I915_READ(pp_on_reg),
3539
		      I915_READ(pp_off_reg),
3540
		      I915_READ(pp_div_reg));
3243 Serge 3541
}
3542
 
4104 Serge 3543
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3544
				     struct intel_connector *intel_connector)
3545
{
3546
	struct drm_connector *connector = &intel_connector->base;
3547
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3548
	struct drm_device *dev = intel_dig_port->base.base.dev;
3549
	struct drm_i915_private *dev_priv = dev->dev_private;
3550
	struct drm_display_mode *fixed_mode = NULL;
3551
	struct edp_power_seq power_seq = { 0 };
3552
	bool has_dpcd;
3553
	struct drm_display_mode *scan;
3554
	struct edid *edid;
3555
 
3556
	if (!is_edp(intel_dp))
3557
		return true;
3558
 
3559
	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3560
 
3561
	/* Cache DPCD and EDID for edp. */
3562
	ironlake_edp_panel_vdd_on(intel_dp);
3563
	has_dpcd = intel_dp_get_dpcd(intel_dp);
3564
	ironlake_edp_panel_vdd_off(intel_dp, false);
3565
 
3566
	if (has_dpcd) {
3567
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3568
			dev_priv->no_aux_handshake =
3569
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3570
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3571
	} else {
3572
		/* if this fails, presume the device is a ghost */
3573
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
3574
		return false;
3575
	}
3576
 
3577
	/* We now know it's not a ghost, init power sequence regs. */
3578
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3579
						      &power_seq);
3580
 
3581
	edid = drm_get_edid(connector, &intel_dp->adapter);
3582
	if (edid) {
3583
		if (drm_add_edid_modes(connector, edid)) {
3584
			drm_mode_connector_update_edid_property(connector,
3585
								edid);
3586
			drm_edid_to_eld(connector, edid);
3587
		} else {
3588
			kfree(edid);
3589
			edid = ERR_PTR(-EINVAL);
3590
		}
3591
	} else {
3592
		edid = ERR_PTR(-ENOENT);
3593
	}
3594
	intel_connector->edid = edid;
3595
 
3596
	/* prefer fixed mode from EDID if available */
3597
	list_for_each_entry(scan, &connector->probed_modes, head) {
3598
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3599
			fixed_mode = drm_mode_duplicate(dev, scan);
3600
			break;
3601
		}
3602
	}
3603
 
3604
	/* fallback to VBT if available for eDP */
3605
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3606
		fixed_mode = drm_mode_duplicate(dev,
3607
					dev_priv->vbt.lfp_lvds_vbt_mode);
3608
		if (fixed_mode)
3609
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3610
	}
3611
 
3612
	intel_panel_init(&intel_connector->panel, fixed_mode);
3613
	intel_panel_setup_backlight(connector);
3614
 
3615
	return true;
3616
}
3617
 
3618
bool
3243 Serge 3619
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3620
			struct intel_connector *intel_connector)
2330 Serge 3621
{
3243 Serge 3622
	struct drm_connector *connector = &intel_connector->base;
3623
	struct intel_dp *intel_dp = &intel_dig_port->dp;
3624
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3625
	struct drm_device *dev = intel_encoder->base.dev;
2330 Serge 3626
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 3627
	enum port port = intel_dig_port->port;
2330 Serge 3628
	const char *name = NULL;
4104 Serge 3629
	int type, error;
2330 Serge 3630
 
3031 serge 3631
	/* Preserve the current hw state. */
3632
	intel_dp->DP = I915_READ(intel_dp->output_reg);
3243 Serge 3633
	intel_dp->attached_connector = intel_connector;
2330 Serge 3634
 
4560 Serge 3635
	if (intel_dp_is_edp(dev, port))
3636
		type = DRM_MODE_CONNECTOR_eDP;
3637
	else
4104 Serge 3638
	type = DRM_MODE_CONNECTOR_DisplayPort;
2330 Serge 3639
 
4104 Serge 3640
	/*
3641
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3642
	 * for DP the encoder type can be set by the caller to
3643
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3644
	 */
3645
	if (type == DRM_MODE_CONNECTOR_eDP)
3646
		intel_encoder->type = INTEL_OUTPUT_EDP;
3647
 
3648
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3649
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3650
			port_name(port));
3651
 
2330 Serge 3652
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3653
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3654
 
3655
	connector->interlace_allowed = true;
3656
	connector->doublescan_allowed = 0;
3657
 
3243 Serge 3658
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3659
			  ironlake_panel_vdd_work);
2330 Serge 3660
 
3661
	intel_connector_attach_encoder(intel_connector, intel_encoder);
3662
	drm_sysfs_connector_add(connector);
3663
 
3480 Serge 3664
	if (HAS_DDI(dev))
3243 Serge 3665
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3666
	else
3031 serge 3667
	intel_connector->get_hw_state = intel_connector_get_hw_state;
3668
 
3746 Serge 3669
	intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3670
	if (HAS_DDI(dev)) {
3671
		switch (intel_dig_port->port) {
3672
		case PORT_A:
3673
			intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3674
			break;
3675
		case PORT_B:
3676
			intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3677
			break;
3678
		case PORT_C:
3679
			intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3680
			break;
3681
		case PORT_D:
3682
			intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3683
			break;
3684
		default:
3685
			BUG();
3686
		}
3687
	}
3243 Serge 3688
 
2330 Serge 3689
	/* Set up the DDC bus. */
3031 serge 3690
	switch (port) {
3691
	case PORT_A:
3746 Serge 3692
		intel_encoder->hpd_pin = HPD_PORT_A;
2330 Serge 3693
			name = "DPDDC-A";
3694
			break;
3031 serge 3695
	case PORT_B:
3746 Serge 3696
		intel_encoder->hpd_pin = HPD_PORT_B;
2330 Serge 3697
			name = "DPDDC-B";
3698
			break;
3031 serge 3699
	case PORT_C:
3746 Serge 3700
		intel_encoder->hpd_pin = HPD_PORT_C;
2330 Serge 3701
			name = "DPDDC-C";
3702
			break;
3031 serge 3703
	case PORT_D:
3746 Serge 3704
		intel_encoder->hpd_pin = HPD_PORT_D;
2330 Serge 3705
			name = "DPDDC-D";
3706
			break;
3031 serge 3707
	default:
3746 Serge 3708
		BUG();
2330 Serge 3709
	}
3710
 
4104 Serge 3711
	error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3712
	WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3713
	     error, port_name(port));
2330 Serge 3714
 
4104 Serge 3715
	intel_dp->psr_setup_done = false;
3031 serge 3716
 
4104 Serge 3717
	if (!intel_edp_init_connector(intel_dp, intel_connector)) {
3718
		i2c_del_adapter(&intel_dp->adapter);
3031 serge 3719
	if (is_edp(intel_dp)) {
4293 Serge 3720
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4104 Serge 3721
			mutex_lock(&dev->mode_config.mutex);
3722
			ironlake_panel_vdd_off_sync(intel_dp);
3723
			mutex_unlock(&dev->mode_config.mutex);
2330 Serge 3724
		}
4104 Serge 3725
		drm_sysfs_connector_remove(connector);
3726
		drm_connector_cleanup(connector);
3727
		return false;
2330 Serge 3728
	}
3729
 
3730
	intel_dp_add_properties(intel_dp, connector);
3731
 
3732
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3733
	 * 0xd.  Failure to do so will result in spurious interrupts being
3734
	 * generated on the port when a cable is not attached.
3735
	 */
3736
	if (IS_G4X(dev) && !IS_GM45(dev)) {
3737
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3738
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3739
	}
4104 Serge 3740
 
3741
	return true;
2330 Serge 3742
}
3243 Serge 3743
 
3744
void
3745
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3746
{
3747
	struct intel_digital_port *intel_dig_port;
3748
	struct intel_encoder *intel_encoder;
3749
	struct drm_encoder *encoder;
3750
	struct intel_connector *intel_connector;
3751
 
4560 Serge 3752
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3243 Serge 3753
	if (!intel_dig_port)
3754
		return;
3755
 
4560 Serge 3756
	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3243 Serge 3757
	if (!intel_connector) {
3758
		kfree(intel_dig_port);
3759
		return;
3760
	}
3761
 
3762
	intel_encoder = &intel_dig_port->base;
3763
	encoder = &intel_encoder->base;
3764
 
3765
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3766
			 DRM_MODE_ENCODER_TMDS);
3767
 
3746 Serge 3768
	intel_encoder->compute_config = intel_dp_compute_config;
4104 Serge 3769
	intel_encoder->mode_set = intel_dp_mode_set;
3243 Serge 3770
	intel_encoder->disable = intel_disable_dp;
3771
	intel_encoder->post_disable = intel_post_disable_dp;
3772
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4104 Serge 3773
	intel_encoder->get_config = intel_dp_get_config;
3774
	if (IS_VALLEYVIEW(dev)) {
4560 Serge 3775
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4104 Serge 3776
		intel_encoder->pre_enable = vlv_pre_enable_dp;
3777
		intel_encoder->enable = vlv_enable_dp;
3778
	} else {
4560 Serge 3779
		intel_encoder->pre_enable = g4x_pre_enable_dp;
3780
		intel_encoder->enable = g4x_enable_dp;
4104 Serge 3781
	}
3243 Serge 3782
 
3783
	intel_dig_port->port = port;
3784
	intel_dig_port->dp.output_reg = output_reg;
3785
 
3786
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3787
	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3788
	intel_encoder->cloneable = false;
3789
	intel_encoder->hot_plug = intel_dp_hot_plug;
3790
 
4104 Serge 3791
	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3792
		drm_encoder_cleanup(encoder);
3793
		kfree(intel_dig_port);
3794
		kfree(intel_connector);
3795
	}
3243 Serge 3796
}