Subversion Repositories Kolibri OS

Rev

Rev 2351 | Rev 3243 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2327 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Keith Packard 
25
 *
26
 */
27
 
28
#include 
2330 Serge 29
#include 
3031 serge 30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
2327 Serge 35
#include "intel_drv.h"
3031 serge 36
#include 
2327 Serge 37
#include "i915_drv.h"
38
 
2342 Serge 39
#define DP_RECEIVER_CAP_SIZE	0xf
2327 Serge 40
#define DP_LINK_STATUS_SIZE 6
41
#define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
42
 
43
/**
44
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
45
 * @intel_dp: DP struct
46
 *
47
 * If a CPU or PCH DP output is attached to an eDP panel, this function
48
 * will return true, and false otherwise.
49
 */
50
static bool is_edp(struct intel_dp *intel_dp)
51
{
52
	return intel_dp->base.type == INTEL_OUTPUT_EDP;
53
}
54
 
55
/**
56
 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
57
 * @intel_dp: DP struct
58
 *
59
 * Returns true if the given DP struct corresponds to a PCH DP port attached
60
 * to an eDP panel, false otherwise.  Helpful for determining whether we
61
 * may need FDI resources for a given DP output or not.
62
 */
63
static bool is_pch_edp(struct intel_dp *intel_dp)
64
{
65
	return intel_dp->is_pch_edp;
66
}
67
 
2342 Serge 68
/**
69
 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
70
 * @intel_dp: DP struct
71
 *
72
 * Returns true if the given DP struct corresponds to a CPU eDP port.
73
 */
74
static bool is_cpu_edp(struct intel_dp *intel_dp)
75
{
76
	return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77
}
78
 
2327 Serge 79
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
80
{
81
	return container_of(encoder, struct intel_dp, base.base);
82
}
83
 
2330 Serge 84
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
85
{
86
	return container_of(intel_attached_encoder(connector),
87
			    struct intel_dp, base);
88
}
2327 Serge 89
 
90
/**
91
 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
92
 * @encoder: DRM encoder
93
 *
94
 * Return true if @encoder corresponds to a PCH attached eDP panel.  Needed
95
 * by intel_display.c.
96
 */
97
bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
98
{
99
    struct intel_dp *intel_dp;
100
 
101
    if (!encoder)
102
        return false;
103
 
104
    intel_dp = enc_to_intel_dp(encoder);
105
 
106
    return is_pch_edp(intel_dp);
107
}
108
 
2330 Serge 109
static void intel_dp_start_link_train(struct intel_dp *intel_dp);
110
static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
111
static void intel_dp_link_down(struct intel_dp *intel_dp);
112
 
2327 Serge 113
void
2342 Serge 114
intel_edp_link_config(struct intel_encoder *intel_encoder,
2327 Serge 115
		       int *lane_num, int *link_bw)
116
{
117
	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
118
 
119
	*lane_num = intel_dp->lane_count;
120
	if (intel_dp->link_bw == DP_LINK_BW_1_62)
121
		*link_bw = 162000;
122
	else if (intel_dp->link_bw == DP_LINK_BW_2_7)
123
		*link_bw = 270000;
124
}
125
 
3031 serge 126
int
127
intel_edp_target_clock(struct intel_encoder *intel_encoder,
128
		       struct drm_display_mode *mode)
129
{
130
	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
131
 
132
	if (intel_dp->panel_fixed_mode)
133
		return intel_dp->panel_fixed_mode->clock;
134
	else
135
		return mode->clock;
136
}
137
 
2330 Serge 138
static int
139
intel_dp_max_lane_count(struct intel_dp *intel_dp)
140
{
2342 Serge 141
	int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
2330 Serge 142
		switch (max_lane_count) {
143
		case 1: case 2: case 4:
144
			break;
145
		default:
146
			max_lane_count = 4;
147
		}
148
	return max_lane_count;
149
}
2327 Serge 150
 
2330 Serge 151
static int
152
intel_dp_max_link_bw(struct intel_dp *intel_dp)
153
{
154
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
2327 Serge 155
 
2330 Serge 156
	switch (max_link_bw) {
157
	case DP_LINK_BW_1_62:
158
	case DP_LINK_BW_2_7:
159
		break;
160
	default:
161
		max_link_bw = DP_LINK_BW_1_62;
162
		break;
163
	}
164
	return max_link_bw;
165
}
2327 Serge 166
 
2330 Serge 167
static int
168
intel_dp_link_clock(uint8_t link_bw)
169
{
170
	if (link_bw == DP_LINK_BW_2_7)
171
		return 270000;
172
	else
173
		return 162000;
174
}
2327 Serge 175
 
2342 Serge 176
/*
177
 * The units on the numbers in the next two are... bizarre.  Examples will
178
 * make it clearer; this one parallels an example in the eDP spec.
179
 *
180
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181
 *
182
 *     270000 * 1 * 8 / 10 == 216000
183
 *
184
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
185
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
186
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
187
 * 119000.  At 18bpp that's 2142000 kilobits per second.
188
 *
189
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
190
 * get the result in decakilobits instead of kilobits.
191
 */
192
 
2330 Serge 193
static int
2351 Serge 194
intel_dp_link_required(int pixel_clock, int bpp)
2330 Serge 195
{
2342 Serge 196
	return (pixel_clock * bpp + 9) / 10;
2330 Serge 197
}
2327 Serge 198
 
2330 Serge 199
static int
200
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201
{
202
	return (max_link_clock * max_lanes * 8) / 10;
203
}
2327 Serge 204
 
3031 serge 205
static bool
206
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
207
			  struct drm_display_mode *mode,
208
			  bool adjust_mode)
209
{
210
	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
211
	int max_lanes = intel_dp_max_lane_count(intel_dp);
212
	int max_rate, mode_rate;
213
 
214
	mode_rate = intel_dp_link_required(mode->clock, 24);
215
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216
 
217
	if (mode_rate > max_rate) {
218
		mode_rate = intel_dp_link_required(mode->clock, 18);
219
		if (mode_rate > max_rate)
220
			return false;
221
 
222
		if (adjust_mode)
223
			mode->private_flags
224
				|= INTEL_MODE_DP_FORCE_6BPC;
225
 
226
		return true;
227
	}
228
 
229
	return true;
230
}
231
 
2330 Serge 232
static int
233
intel_dp_mode_valid(struct drm_connector *connector,
234
		    struct drm_display_mode *mode)
235
{
236
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2327 Serge 237
 
2342 Serge 238
	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
239
		if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
2330 Serge 240
			return MODE_PANEL;
2327 Serge 241
 
2342 Serge 242
		if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
2330 Serge 243
			return MODE_PANEL;
244
	}
2327 Serge 245
 
3031 serge 246
	if (!intel_dp_adjust_dithering(intel_dp, mode, false))
2330 Serge 247
		return MODE_CLOCK_HIGH;
2327 Serge 248
 
2330 Serge 249
	if (mode->clock < 10000)
250
		return MODE_CLOCK_LOW;
251
 
3031 serge 252
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
253
		return MODE_H_ILLEGAL;
254
 
2330 Serge 255
	return MODE_OK;
256
}
257
 
258
static uint32_t
259
pack_aux(uint8_t *src, int src_bytes)
260
{
261
	int	i;
262
	uint32_t v = 0;
263
 
264
	if (src_bytes > 4)
265
		src_bytes = 4;
266
	for (i = 0; i < src_bytes; i++)
267
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
268
	return v;
269
}
270
 
271
static void
272
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
273
{
274
	int i;
275
	if (dst_bytes > 4)
276
		dst_bytes = 4;
277
	for (i = 0; i < dst_bytes; i++)
278
		dst[i] = src >> ((3-i) * 8);
279
}
280
 
281
/* hrawclock is 1/4 the FSB frequency */
282
static int
283
intel_hrawclk(struct drm_device *dev)
284
{
285
	struct drm_i915_private *dev_priv = dev->dev_private;
286
	uint32_t clkcfg;
287
 
288
	clkcfg = I915_READ(CLKCFG);
289
	switch (clkcfg & CLKCFG_FSB_MASK) {
290
	case CLKCFG_FSB_400:
291
		return 100;
292
	case CLKCFG_FSB_533:
293
		return 133;
294
	case CLKCFG_FSB_667:
295
		return 166;
296
	case CLKCFG_FSB_800:
297
		return 200;
298
	case CLKCFG_FSB_1067:
299
		return 266;
300
	case CLKCFG_FSB_1333:
301
		return 333;
302
	/* these two are just a guess; one of them might be right */
303
	case CLKCFG_FSB_1600:
304
	case CLKCFG_FSB_1600_ALT:
305
		return 400;
306
	default:
307
		return 133;
308
	}
309
}
310
 
2342 Serge 311
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
312
{
313
	struct drm_device *dev = intel_dp->base.base.dev;
314
	struct drm_i915_private *dev_priv = dev->dev_private;
315
 
316
	return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
317
}
318
 
319
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
320
{
321
	struct drm_device *dev = intel_dp->base.base.dev;
322
	struct drm_i915_private *dev_priv = dev->dev_private;
323
 
324
	return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
325
}
326
 
327
static void
328
intel_dp_check_edp(struct intel_dp *intel_dp)
329
{
330
	struct drm_device *dev = intel_dp->base.base.dev;
331
	struct drm_i915_private *dev_priv = dev->dev_private;
332
 
333
	if (!is_edp(intel_dp))
334
		return;
335
	if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
336
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
337
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
338
			      I915_READ(PCH_PP_STATUS),
339
			      I915_READ(PCH_PP_CONTROL));
340
	}
341
}
342
 
2330 Serge 343
static int
344
intel_dp_aux_ch(struct intel_dp *intel_dp,
345
		uint8_t *send, int send_bytes,
346
		uint8_t *recv, int recv_size)
347
{
348
	uint32_t output_reg = intel_dp->output_reg;
349
	struct drm_device *dev = intel_dp->base.base.dev;
350
	struct drm_i915_private *dev_priv = dev->dev_private;
351
	uint32_t ch_ctl = output_reg + 0x10;
352
	uint32_t ch_data = ch_ctl + 4;
353
	int i;
354
	int recv_bytes;
355
	uint32_t status;
356
	uint32_t aux_clock_divider;
357
	int try, precharge;
358
 
2342 Serge 359
	intel_dp_check_edp(intel_dp);
2330 Serge 360
	/* The clock divider is based off the hrawclk,
361
	 * and would like to run at 2MHz. So, take the
362
	 * hrawclk value and divide by 2 and use that
363
	 *
364
	 * Note that PCH attached eDP panels should use a 125MHz input
365
	 * clock divider.
366
	 */
2342 Serge 367
	if (is_cpu_edp(intel_dp)) {
368
		if (IS_GEN6(dev) || IS_GEN7(dev))
369
			aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
2330 Serge 370
		else
371
			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
372
	} else if (HAS_PCH_SPLIT(dev))
3031 serge 373
		aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
2330 Serge 374
	else
375
		aux_clock_divider = intel_hrawclk(dev) / 2;
376
 
377
	if (IS_GEN6(dev))
378
		precharge = 3;
379
	else
380
		precharge = 5;
381
 
382
	/* Try to wait for any previous AUX channel activity */
383
	for (try = 0; try < 3; try++) {
384
		status = I915_READ(ch_ctl);
385
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
386
			break;
387
		msleep(1);
388
	}
389
 
390
	if (try == 3) {
391
		WARN(1, "dp_aux_ch not started status 0x%08x\n",
392
		     I915_READ(ch_ctl));
393
		return -EBUSY;
394
	}
395
 
396
	/* Must try at least 3 times according to DP spec */
397
	for (try = 0; try < 5; try++) {
398
		/* Load the send data into the aux channel data registers */
399
		for (i = 0; i < send_bytes; i += 4)
400
			I915_WRITE(ch_data + i,
401
				   pack_aux(send + i, send_bytes - i));
402
 
403
		/* Send the command and wait for it to complete */
404
		I915_WRITE(ch_ctl,
405
			   DP_AUX_CH_CTL_SEND_BUSY |
406
			   DP_AUX_CH_CTL_TIME_OUT_400us |
407
			   (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
408
			   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
409
			   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
410
			   DP_AUX_CH_CTL_DONE |
411
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
412
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
413
		for (;;) {
414
			status = I915_READ(ch_ctl);
415
			if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
416
				break;
417
			udelay(100);
418
		}
419
 
420
		/* Clear done status and any errors */
421
		I915_WRITE(ch_ctl,
422
			   status |
423
			   DP_AUX_CH_CTL_DONE |
424
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
425
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
3031 serge 426
 
427
		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
428
			      DP_AUX_CH_CTL_RECEIVE_ERROR))
429
			continue;
2330 Serge 430
		if (status & DP_AUX_CH_CTL_DONE)
431
			break;
432
	}
433
 
434
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
435
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
436
		return -EBUSY;
437
	}
438
 
439
	/* Check for timeout or receive error.
440
	 * Timeouts occur when the sink is not connected
441
	 */
442
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
443
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
444
		return -EIO;
445
	}
446
 
447
	/* Timeouts occur when the device isn't connected, so they're
448
	 * "normal" -- don't fill the kernel log with these */
449
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
450
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
451
		return -ETIMEDOUT;
452
	}
453
 
454
	/* Unload any bytes sent back from the other side */
455
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
456
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
457
	if (recv_bytes > recv_size)
458
		recv_bytes = recv_size;
459
 
460
	for (i = 0; i < recv_bytes; i += 4)
461
		unpack_aux(I915_READ(ch_data + i),
462
			   recv + i, recv_bytes - i);
463
 
464
	return recv_bytes;
465
}
466
 
467
/* Write data to the aux channel in native mode */
468
static int
469
intel_dp_aux_native_write(struct intel_dp *intel_dp,
470
			  uint16_t address, uint8_t *send, int send_bytes)
471
{
472
	int ret;
473
	uint8_t	msg[20];
474
	int msg_bytes;
475
	uint8_t	ack;
476
 
2342 Serge 477
	intel_dp_check_edp(intel_dp);
2330 Serge 478
	if (send_bytes > 16)
479
		return -1;
480
	msg[0] = AUX_NATIVE_WRITE << 4;
481
	msg[1] = address >> 8;
482
	msg[2] = address & 0xff;
483
	msg[3] = send_bytes - 1;
484
	memcpy(&msg[4], send, send_bytes);
485
	msg_bytes = send_bytes + 4;
486
	for (;;) {
487
		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
488
		if (ret < 0)
489
			return ret;
490
		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
491
			break;
492
		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
493
			udelay(100);
494
		else
495
			return -EIO;
496
	}
497
	return send_bytes;
498
}
499
 
500
/* Write a single byte to the aux channel in native mode */
501
static int
502
intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
503
			    uint16_t address, uint8_t byte)
504
{
505
	return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
506
}
507
 
508
/* read bytes from a native aux channel */
509
static int
510
intel_dp_aux_native_read(struct intel_dp *intel_dp,
511
			 uint16_t address, uint8_t *recv, int recv_bytes)
512
{
513
	uint8_t msg[4];
514
	int msg_bytes;
515
	uint8_t reply[20];
516
	int reply_bytes;
517
	uint8_t ack;
518
	int ret;
519
 
2342 Serge 520
	intel_dp_check_edp(intel_dp);
2330 Serge 521
	msg[0] = AUX_NATIVE_READ << 4;
522
	msg[1] = address >> 8;
523
	msg[2] = address & 0xff;
524
	msg[3] = recv_bytes - 1;
525
 
526
	msg_bytes = 4;
527
	reply_bytes = recv_bytes + 1;
528
 
529
	for (;;) {
530
		ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
531
				      reply, reply_bytes);
532
		if (ret == 0)
533
			return -EPROTO;
534
		if (ret < 0)
535
			return ret;
536
		ack = reply[0];
537
		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
538
			memcpy(recv, reply + 1, ret - 1);
539
			return ret - 1;
540
		}
541
		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
542
			udelay(100);
543
		else
544
			return -EIO;
545
	}
546
}
547
 
548
static int
549
intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
550
		    uint8_t write_byte, uint8_t *read_byte)
551
{
552
	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
553
	struct intel_dp *intel_dp = container_of(adapter,
554
						struct intel_dp,
555
						adapter);
556
	uint16_t address = algo_data->address;
557
	uint8_t msg[5];
558
	uint8_t reply[2];
559
	unsigned retry;
560
	int msg_bytes;
561
	int reply_bytes;
562
	int ret;
563
 
2342 Serge 564
	intel_dp_check_edp(intel_dp);
2330 Serge 565
	/* Set up the command byte */
566
	if (mode & MODE_I2C_READ)
567
		msg[0] = AUX_I2C_READ << 4;
568
	else
569
		msg[0] = AUX_I2C_WRITE << 4;
570
 
571
	if (!(mode & MODE_I2C_STOP))
572
		msg[0] |= AUX_I2C_MOT << 4;
573
 
574
	msg[1] = address >> 8;
575
	msg[2] = address;
576
 
577
	switch (mode) {
578
	case MODE_I2C_WRITE:
579
		msg[3] = 0;
580
		msg[4] = write_byte;
581
		msg_bytes = 5;
582
		reply_bytes = 1;
583
		break;
584
	case MODE_I2C_READ:
585
		msg[3] = 0;
586
		msg_bytes = 4;
587
		reply_bytes = 2;
588
		break;
589
	default:
590
		msg_bytes = 3;
591
		reply_bytes = 1;
592
		break;
593
	}
594
 
595
	for (retry = 0; retry < 5; retry++) {
596
		ret = intel_dp_aux_ch(intel_dp,
597
				      msg, msg_bytes,
598
				      reply, reply_bytes);
599
		if (ret < 0) {
600
			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
601
			return ret;
602
		}
603
 
604
		switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
605
		case AUX_NATIVE_REPLY_ACK:
606
			/* I2C-over-AUX Reply field is only valid
607
			 * when paired with AUX ACK.
608
			 */
609
			break;
610
		case AUX_NATIVE_REPLY_NACK:
611
			DRM_DEBUG_KMS("aux_ch native nack\n");
612
			return -EREMOTEIO;
613
		case AUX_NATIVE_REPLY_DEFER:
614
			udelay(100);
615
			continue;
616
		default:
617
			DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
618
				  reply[0]);
619
			return -EREMOTEIO;
620
		}
621
 
622
		switch (reply[0] & AUX_I2C_REPLY_MASK) {
623
		case AUX_I2C_REPLY_ACK:
624
			if (mode == MODE_I2C_READ) {
625
				*read_byte = reply[1];
626
			}
627
			return reply_bytes - 1;
628
		case AUX_I2C_REPLY_NACK:
629
			DRM_DEBUG_KMS("aux_i2c nack\n");
630
			return -EREMOTEIO;
631
		case AUX_I2C_REPLY_DEFER:
632
			DRM_DEBUG_KMS("aux_i2c defer\n");
633
			udelay(100);
634
			break;
635
		default:
636
			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
637
			return -EREMOTEIO;
638
		}
639
	}
640
 
641
	DRM_ERROR("too many retries, giving up\n");
642
	return -EREMOTEIO;
643
}
644
 
2342 Serge 645
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
646
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
647
 
2330 Serge 648
static int
649
intel_dp_i2c_init(struct intel_dp *intel_dp,
650
		  struct intel_connector *intel_connector, const char *name)
651
{
2342 Serge 652
	int	ret;
653
 
2330 Serge 654
	DRM_DEBUG_KMS("i2c_init %s\n", name);
655
	intel_dp->algo.running = false;
656
	intel_dp->algo.address = 0;
657
	intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
658
 
2342 Serge 659
	memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
3031 serge 660
	intel_dp->adapter.owner = THIS_MODULE;
2330 Serge 661
	intel_dp->adapter.class = I2C_CLASS_DDC;
2342 Serge 662
	strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
2330 Serge 663
	intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
664
	intel_dp->adapter.algo_data = &intel_dp->algo;
665
	intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
666
 
2342 Serge 667
	ironlake_edp_panel_vdd_on(intel_dp);
668
	ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
669
	ironlake_edp_panel_vdd_off(intel_dp, false);
670
	return ret;
2330 Serge 671
}
672
 
673
static bool
3031 serge 674
intel_dp_mode_fixup(struct drm_encoder *encoder,
675
		    const struct drm_display_mode *mode,
2330 Serge 676
		    struct drm_display_mode *adjusted_mode)
677
{
678
	struct drm_device *dev = encoder->dev;
679
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
680
	int lane_count, clock;
681
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
682
	int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
3031 serge 683
	int bpp, mode_rate;
2330 Serge 684
	static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
685
 
2342 Serge 686
	if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
687
		intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
2330 Serge 688
		intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
689
					mode, adjusted_mode);
690
	}
691
 
3031 serge 692
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
693
		return false;
694
 
695
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
696
		      "max bw %02x pixel clock %iKHz\n",
697
		      max_lane_count, bws[max_clock], adjusted_mode->clock);
698
 
699
	if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
700
		return false;
701
 
702
	bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
703
	mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
704
 
705
	for (clock = 0; clock <= max_clock; clock++) {
2330 Serge 706
	for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
707
			int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
708
 
3031 serge 709
			if (mode_rate <= link_avail) {
2330 Serge 710
				intel_dp->link_bw = bws[clock];
711
				intel_dp->lane_count = lane_count;
712
				adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
3031 serge 713
				DRM_DEBUG_KMS("DP link bw %02x lane "
714
						"count %d clock %d bpp %d\n",
2330 Serge 715
				       intel_dp->link_bw, intel_dp->lane_count,
3031 serge 716
				       adjusted_mode->clock, bpp);
717
				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
718
					      mode_rate, link_avail);
2330 Serge 719
				return true;
720
			}
721
		}
722
	}
723
 
724
	return false;
725
}
726
 
2327 Serge 727
struct intel_dp_m_n {
728
	uint32_t	tu;
729
	uint32_t	gmch_m;
730
	uint32_t	gmch_n;
731
	uint32_t	link_m;
732
	uint32_t	link_n;
733
};
734
 
735
static void
736
intel_reduce_ratio(uint32_t *num, uint32_t *den)
737
{
738
	while (*num > 0xffffff || *den > 0xffffff) {
739
		*num >>= 1;
740
		*den >>= 1;
741
	}
742
}
743
 
744
static void
745
intel_dp_compute_m_n(int bpp,
746
		     int nlanes,
747
		     int pixel_clock,
748
		     int link_clock,
749
		     struct intel_dp_m_n *m_n)
750
{
751
	m_n->tu = 64;
752
	m_n->gmch_m = (pixel_clock * bpp) >> 3;
753
	m_n->gmch_n = link_clock * nlanes;
754
	intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
755
	m_n->link_m = pixel_clock;
756
	m_n->link_n = link_clock;
757
	intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
758
}
759
 
760
void
761
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
762
         struct drm_display_mode *adjusted_mode)
763
{
764
    struct drm_device *dev = crtc->dev;
3031 serge 765
	struct intel_encoder *encoder;
2327 Serge 766
    struct drm_i915_private *dev_priv = dev->dev_private;
767
    struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
768
    int lane_count = 4;
769
    struct intel_dp_m_n m_n;
770
    int pipe = intel_crtc->pipe;
771
 
772
    /*
773
     * Find the lane count in the intel_encoder private
774
     */
3031 serge 775
	for_each_encoder_on_crtc(dev, crtc, encoder) {
776
		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2327 Serge 777
 
2342 Serge 778
		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
779
		    intel_dp->base.type == INTEL_OUTPUT_EDP)
780
		{
2327 Serge 781
            lane_count = intel_dp->lane_count;
782
            break;
783
        }
784
    }
785
 
786
    /*
787
     * Compute the GMCH and Link ratios. The '3' here is
788
     * the number of bytes_per_pixel post-LUT, which we always
789
     * set up for 8-bits of R/G/B, or 3 bytes total.
790
     */
791
    intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
792
                 mode->clock, adjusted_mode->clock, &m_n);
793
 
794
    if (HAS_PCH_SPLIT(dev)) {
795
        I915_WRITE(TRANSDATA_M1(pipe),
796
               ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
797
               m_n.gmch_m);
798
        I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
799
        I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
800
        I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
801
    } else {
802
        I915_WRITE(PIPE_GMCH_DATA_M(pipe),
803
               ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
804
               m_n.gmch_m);
805
        I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
806
        I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
807
        I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
808
    }
809
}
810
 
2330 Serge 811
static void
812
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
813
		  struct drm_display_mode *adjusted_mode)
814
{
815
	struct drm_device *dev = encoder->dev;
2342 Serge 816
	struct drm_i915_private *dev_priv = dev->dev_private;
2330 Serge 817
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
818
	struct drm_crtc *crtc = intel_dp->base.base.crtc;
819
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2327 Serge 820
 
2342 Serge 821
	/*
822
	 * There are four kinds of DP registers:
823
	 *
824
	 * 	IBX PCH
825
	 * 	SNB CPU
826
	 *	IVB CPU
827
	 * 	CPT PCH
828
	 *
829
	 * IBX PCH and CPU are the same for almost everything,
830
	 * except that the CPU DP PLL is configured in this
831
	 * register
832
	 *
833
	 * CPT PCH is quite different, having many bits moved
834
	 * to the TRANS_DP_CTL register instead. That
835
	 * configuration happens (oddly) in ironlake_pch_enable
836
	 */
2327 Serge 837
 
2342 Serge 838
	/* Preserve the BIOS-computed detected bit. This is
839
	 * supposed to be read-only.
840
	 */
841
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
2327 Serge 842
 
2342 Serge 843
	/* Handle DP bits in common between all three register formats */
844
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
845
 
2330 Serge 846
	switch (intel_dp->lane_count) {
847
	case 1:
848
		intel_dp->DP |= DP_PORT_WIDTH_1;
849
		break;
850
	case 2:
851
		intel_dp->DP |= DP_PORT_WIDTH_2;
852
		break;
853
	case 4:
854
		intel_dp->DP |= DP_PORT_WIDTH_4;
855
		break;
856
	}
2342 Serge 857
	if (intel_dp->has_audio) {
858
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
859
				 pipe_name(intel_crtc->pipe));
2330 Serge 860
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2342 Serge 861
		intel_write_eld(encoder, adjusted_mode);
862
	}
2330 Serge 863
	memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
864
	intel_dp->link_configuration[0] = intel_dp->link_bw;
865
	intel_dp->link_configuration[1] = intel_dp->lane_count;
866
	intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
867
	/*
868
	 * Check for DPCD version > 1.1 and enhanced framing support
869
	 */
870
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
871
	    (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
872
		intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
873
	}
2327 Serge 874
 
2342 Serge 875
	/* Split out the IBX/CPU vs CPT settings */
876
 
877
	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
878
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
879
			intel_dp->DP |= DP_SYNC_HS_HIGH;
880
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
881
			intel_dp->DP |= DP_SYNC_VS_HIGH;
882
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
883
 
884
		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
885
			intel_dp->DP |= DP_ENHANCED_FRAMING;
886
 
887
		intel_dp->DP |= intel_crtc->pipe << 29;
888
 
889
		/* don't miss out required setting for eDP */
890
		if (adjusted_mode->clock < 200000)
891
			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
892
		else
893
			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
894
	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
895
		intel_dp->DP |= intel_dp->color_range;
896
 
897
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
898
			intel_dp->DP |= DP_SYNC_HS_HIGH;
899
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
900
			intel_dp->DP |= DP_SYNC_VS_HIGH;
901
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
902
 
903
		if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
904
		intel_dp->DP |= DP_ENHANCED_FRAMING;
905
 
906
		if (intel_crtc->pipe == 1)
2330 Serge 907
		intel_dp->DP |= DP_PIPEB_SELECT;
2327 Serge 908
 
2342 Serge 909
		if (is_cpu_edp(intel_dp)) {
2330 Serge 910
		/* don't miss out required setting for eDP */
911
		if (adjusted_mode->clock < 200000)
912
			intel_dp->DP |= DP_PLL_FREQ_160MHZ;
913
		else
914
			intel_dp->DP |= DP_PLL_FREQ_270MHZ;
915
	}
2342 Serge 916
	} else {
917
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
918
	}
2330 Serge 919
}
2327 Serge 920
 
2342 Serge 921
#define IDLE_ON_MASK		(PP_ON | 0 	  | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
922
#define IDLE_ON_VALUE   	(PP_ON | 0 	  | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
923
 
924
#define IDLE_OFF_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
925
#define IDLE_OFF_VALUE		(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
926
 
927
#define IDLE_CYCLE_MASK		(PP_ON | 0        | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
928
#define IDLE_CYCLE_VALUE	(0     | 0        | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
929
 
930
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
931
				       u32 mask,
932
				       u32 value)
933
{
934
	struct drm_device *dev = intel_dp->base.base.dev;
935
	struct drm_i915_private *dev_priv = dev->dev_private;
936
 
937
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
938
		      mask, value,
939
		      I915_READ(PCH_PP_STATUS),
940
		      I915_READ(PCH_PP_CONTROL));
941
 
942
	if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
943
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
944
			  I915_READ(PCH_PP_STATUS),
945
			  I915_READ(PCH_PP_CONTROL));
946
	}
947
}
948
 
949
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
950
{
951
	DRM_DEBUG_KMS("Wait for panel power on\n");
952
	ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
953
}
954
 
955
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
956
{
957
	DRM_DEBUG_KMS("Wait for panel power off time\n");
958
	ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
959
}
960
 
961
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
962
{
963
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
964
	ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
965
}
966
 
967
 
968
/* Read the current pp_control value, unlocking the register if it
969
 * is locked
970
 */
971
 
972
static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
973
{
974
	u32	control = I915_READ(PCH_PP_CONTROL);
975
 
976
	control &= ~PANEL_UNLOCK_MASK;
977
	control |= PANEL_UNLOCK_REGS;
978
	return control;
979
}
980
 
2330 Serge 981
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
982
{
983
	struct drm_device *dev = intel_dp->base.base.dev;
984
	struct drm_i915_private *dev_priv = dev->dev_private;
985
	u32 pp;
2327 Serge 986
 
2342 Serge 987
	if (!is_edp(intel_dp))
988
		return;
989
	DRM_DEBUG_KMS("Turn eDP VDD on\n");
2327 Serge 990
 
2342 Serge 991
	WARN(intel_dp->want_panel_vdd,
992
	     "eDP VDD already requested on\n");
993
 
994
	intel_dp->want_panel_vdd = true;
995
 
996
	if (ironlake_edp_have_panel_vdd(intel_dp)) {
997
		DRM_DEBUG_KMS("eDP VDD already on\n");
998
		return;
999
	}
1000
 
1001
	if (!ironlake_edp_have_panel_power(intel_dp))
1002
		ironlake_wait_panel_power_cycle(intel_dp);
1003
 
1004
	pp = ironlake_get_pp_control(dev_priv);
2330 Serge 1005
	pp |= EDP_FORCE_VDD;
1006
	I915_WRITE(PCH_PP_CONTROL, pp);
1007
	POSTING_READ(PCH_PP_CONTROL);
2342 Serge 1008
	DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1009
		      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1010
 
1011
	/*
1012
	 * If the panel wasn't on, delay before accessing aux channel
1013
	 */
1014
	if (!ironlake_edp_have_panel_power(intel_dp)) {
1015
		DRM_DEBUG_KMS("eDP was not running\n");
1016
		msleep(intel_dp->panel_power_up_delay);
1017
	}
2330 Serge 1018
}
2327 Serge 1019
 
2342 Serge 1020
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
2330 Serge 1021
{
1022
	struct drm_device *dev = intel_dp->base.base.dev;
1023
	struct drm_i915_private *dev_priv = dev->dev_private;
1024
	u32 pp;
2327 Serge 1025
 
2342 Serge 1026
	if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1027
		pp = ironlake_get_pp_control(dev_priv);
2330 Serge 1028
	pp &= ~EDP_FORCE_VDD;
1029
	I915_WRITE(PCH_PP_CONTROL, pp);
1030
	POSTING_READ(PCH_PP_CONTROL);
2327 Serge 1031
 
2330 Serge 1032
	/* Make sure sequencer is idle before allowing subsequent activity */
2342 Serge 1033
		DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1034
			      I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
1035
 
1036
		msleep(intel_dp->panel_power_down_delay);
1037
	}
2330 Serge 1038
}
2327 Serge 1039
 
2342 Serge 1040
 
1041
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2330 Serge 1042
{
2342 Serge 1043
	if (!is_edp(intel_dp))
1044
		return;
1045
 
1046
	DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1047
	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1048
 
1049
	intel_dp->want_panel_vdd = false;
1050
 
1051
	if (sync) {
1052
		ironlake_panel_vdd_off_sync(intel_dp);
1053
	} else {
1054
		/*
1055
		 * Queue the timer to fire a long
1056
		 * time from now (relative to the power down delay)
1057
		 * to keep the panel power up across a sequence of operations
1058
		 */
1059
//       schedule_delayed_work(&intel_dp->panel_vdd_work,
1060
//                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1061
	}
1062
}
1063
 
1064
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1065
{
2330 Serge 1066
	struct drm_device *dev = intel_dp->base.base.dev;
1067
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1068
	u32 pp;
2327 Serge 1069
 
2342 Serge 1070
	if (!is_edp(intel_dp))
1071
		return;
2327 Serge 1072
 
2342 Serge 1073
	DRM_DEBUG_KMS("Turn eDP power on\n");
2327 Serge 1074
 
2342 Serge 1075
	if (ironlake_edp_have_panel_power(intel_dp)) {
1076
		DRM_DEBUG_KMS("eDP power already on\n");
1077
		return;
1078
	}
1079
 
1080
	ironlake_wait_panel_power_cycle(intel_dp);
1081
 
1082
	pp = ironlake_get_pp_control(dev_priv);
1083
	if (IS_GEN5(dev)) {
2330 Serge 1084
	/* ILK workaround: disable reset around power sequence */
1085
	pp &= ~PANEL_POWER_RESET;
1086
	I915_WRITE(PCH_PP_CONTROL, pp);
1087
	POSTING_READ(PCH_PP_CONTROL);
2342 Serge 1088
	}
2327 Serge 1089
 
2342 Serge 1090
	pp |= POWER_TARGET_ON;
1091
	if (!IS_GEN5(dev))
1092
		pp |= PANEL_POWER_RESET;
1093
 
2330 Serge 1094
	I915_WRITE(PCH_PP_CONTROL, pp);
1095
	POSTING_READ(PCH_PP_CONTROL);
2327 Serge 1096
 
2342 Serge 1097
	ironlake_wait_panel_on(intel_dp);
2327 Serge 1098
 
2342 Serge 1099
	if (IS_GEN5(dev)) {
2330 Serge 1100
	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1101
	I915_WRITE(PCH_PP_CONTROL, pp);
1102
	POSTING_READ(PCH_PP_CONTROL);
2342 Serge 1103
	}
2330 Serge 1104
}
2327 Serge 1105
 
2342 Serge 1106
static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
2330 Serge 1107
{
2342 Serge 1108
	struct drm_device *dev = intel_dp->base.base.dev;
2330 Serge 1109
	struct drm_i915_private *dev_priv = dev->dev_private;
2342 Serge 1110
	u32 pp;
2327 Serge 1111
 
2342 Serge 1112
	if (!is_edp(intel_dp))
1113
		return;
2327 Serge 1114
 
2342 Serge 1115
	DRM_DEBUG_KMS("Turn eDP power off\n");
2327 Serge 1116
 
3031 serge 1117
	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
2342 Serge 1118
 
1119
	pp = ironlake_get_pp_control(dev_priv);
3031 serge 1120
	/* We need to switch off panel power _and_ force vdd, for otherwise some
1121
	 * panels get very unhappy and cease to work. */
2342 Serge 1122
	pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
2330 Serge 1123
	I915_WRITE(PCH_PP_CONTROL, pp);
1124
	POSTING_READ(PCH_PP_CONTROL);
2327 Serge 1125
 
3031 serge 1126
	intel_dp->want_panel_vdd = false;
1127
 
2342 Serge 1128
	ironlake_wait_panel_off(intel_dp);
2330 Serge 1129
}
2327 Serge 1130
 
2342 Serge 1131
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
2330 Serge 1132
{
2342 Serge 1133
	struct drm_device *dev = intel_dp->base.base.dev;
2330 Serge 1134
	struct drm_i915_private *dev_priv = dev->dev_private;
1135
	u32 pp;
2327 Serge 1136
 
2342 Serge 1137
	if (!is_edp(intel_dp))
1138
		return;
1139
 
2330 Serge 1140
	DRM_DEBUG_KMS("\n");
1141
	/*
1142
	 * If we enable the backlight right away following a panel power
1143
	 * on, we may see slight flicker as the panel syncs with the eDP
1144
	 * link.  So delay a bit to make sure the image is solid before
1145
	 * allowing it to appear.
1146
	 */
2342 Serge 1147
	msleep(intel_dp->backlight_on_delay);
1148
	pp = ironlake_get_pp_control(dev_priv);
2330 Serge 1149
	pp |= EDP_BLC_ENABLE;
1150
	I915_WRITE(PCH_PP_CONTROL, pp);
2342 Serge 1151
	POSTING_READ(PCH_PP_CONTROL);
2330 Serge 1152
}
2327 Serge 1153
 
2342 Serge 1154
static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
2330 Serge 1155
{
2342 Serge 1156
	struct drm_device *dev = intel_dp->base.base.dev;
2330 Serge 1157
	struct drm_i915_private *dev_priv = dev->dev_private;
1158
	u32 pp;
2327 Serge 1159
 
2342 Serge 1160
	if (!is_edp(intel_dp))
1161
		return;
1162
 
2330 Serge 1163
	DRM_DEBUG_KMS("\n");
2342 Serge 1164
	pp = ironlake_get_pp_control(dev_priv);
2330 Serge 1165
	pp &= ~EDP_BLC_ENABLE;
1166
	I915_WRITE(PCH_PP_CONTROL, pp);
2342 Serge 1167
	POSTING_READ(PCH_PP_CONTROL);
1168
	msleep(intel_dp->backlight_off_delay);
2330 Serge 1169
}
2327 Serge 1170
 
3031 serge 1171
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2330 Serge 1172
{
3031 serge 1173
	struct drm_device *dev = intel_dp->base.base.dev;
1174
	struct drm_crtc *crtc = intel_dp->base.base.crtc;
2330 Serge 1175
	struct drm_i915_private *dev_priv = dev->dev_private;
1176
	u32 dpa_ctl;
2327 Serge 1177
 
3031 serge 1178
	assert_pipe_disabled(dev_priv,
1179
			     to_intel_crtc(crtc)->pipe);
1180
 
2330 Serge 1181
	DRM_DEBUG_KMS("\n");
1182
	dpa_ctl = I915_READ(DP_A);
3031 serge 1183
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1184
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1185
 
1186
	/* We don't adjust intel_dp->DP while tearing down the link, to
1187
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1188
	 * enable bits here to ensure that we don't enable too much. */
1189
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1190
	intel_dp->DP |= DP_PLL_ENABLE;
1191
	I915_WRITE(DP_A, intel_dp->DP);
2330 Serge 1192
	POSTING_READ(DP_A);
1193
	udelay(200);
1194
}
2327 Serge 1195
 
3031 serge 1196
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2330 Serge 1197
{
3031 serge 1198
	struct drm_device *dev = intel_dp->base.base.dev;
1199
	struct drm_crtc *crtc = intel_dp->base.base.crtc;
2330 Serge 1200
	struct drm_i915_private *dev_priv = dev->dev_private;
1201
	u32 dpa_ctl;
2327 Serge 1202
 
3031 serge 1203
	assert_pipe_disabled(dev_priv,
1204
			     to_intel_crtc(crtc)->pipe);
1205
 
2330 Serge 1206
	dpa_ctl = I915_READ(DP_A);
3031 serge 1207
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1208
	     "dp pll off, should be on\n");
1209
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1210
 
1211
	/* We can't rely on the value tracked for the DP register in
1212
	 * intel_dp->DP because link_down must not change that (otherwise link
1213
	 * re-training will fail. */
2330 Serge 1214
	dpa_ctl &= ~DP_PLL_ENABLE;
1215
	I915_WRITE(DP_A, dpa_ctl);
1216
	POSTING_READ(DP_A);
1217
	udelay(200);
1218
}
2327 Serge 1219
 
2330 Serge 1220
/* If the sink supports it, try to set the power state appropriately */
1221
static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1222
{
1223
	int ret, i;
2327 Serge 1224
 
2330 Serge 1225
	/* Should have a valid DPCD by this point */
1226
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1227
		return;
2327 Serge 1228
 
2330 Serge 1229
	if (mode != DRM_MODE_DPMS_ON) {
1230
		ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1231
						  DP_SET_POWER_D3);
1232
		if (ret != 1)
1233
			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1234
	} else {
1235
		/*
1236
		 * When turning on, we need to retry for 1ms to give the sink
1237
		 * time to wake up.
1238
		 */
1239
		for (i = 0; i < 3; i++) {
1240
			ret = intel_dp_aux_native_write_1(intel_dp,
1241
							  DP_SET_POWER,
1242
							  DP_SET_POWER_D0);
1243
			if (ret == 1)
1244
				break;
1245
			msleep(1);
1246
		}
1247
	}
1248
}
2327 Serge 1249
 
3031 serge 1250
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1251
				  enum pipe *pipe)
2330 Serge 1252
{
3031 serge 1253
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1254
	struct drm_device *dev = encoder->base.dev;
1255
	struct drm_i915_private *dev_priv = dev->dev_private;
1256
	u32 tmp = I915_READ(intel_dp->output_reg);
2327 Serge 1257
 
3031 serge 1258
	if (!(tmp & DP_PORT_EN))
1259
		return false;
2342 Serge 1260
 
3031 serge 1261
	if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1262
		*pipe = PORT_TO_PIPE_CPT(tmp);
1263
	} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1264
		*pipe = PORT_TO_PIPE(tmp);
1265
	} else {
1266
		u32 trans_sel;
1267
		u32 trans_dp;
1268
		int i;
2327 Serge 1269
 
3031 serge 1270
		switch (intel_dp->output_reg) {
1271
		case PCH_DP_B:
1272
			trans_sel = TRANS_DP_PORT_SEL_B;
1273
			break;
1274
		case PCH_DP_C:
1275
			trans_sel = TRANS_DP_PORT_SEL_C;
1276
			break;
1277
		case PCH_DP_D:
1278
			trans_sel = TRANS_DP_PORT_SEL_D;
1279
			break;
1280
		default:
1281
			return true;
1282
		}
1283
 
1284
		for_each_pipe(i) {
1285
			trans_dp = I915_READ(TRANS_DP_CTL(i));
1286
			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1287
				*pipe = i;
1288
				return true;
1289
			}
1290
		}
1291
	}
1292
 
1293
	DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
1294
 
1295
	return true;
2330 Serge 1296
}
2327 Serge 1297
 
3031 serge 1298
static void intel_disable_dp(struct intel_encoder *encoder)
2330 Serge 1299
{
3031 serge 1300
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2327 Serge 1301
 
3031 serge 1302
	/* Make sure the panel is off before trying to change the mode. But also
1303
	 * ensure that we have vdd while we switch off the panel. */
2330 Serge 1304
		ironlake_edp_panel_vdd_on(intel_dp);
3031 serge 1305
	ironlake_edp_backlight_off(intel_dp);
2342 Serge 1306
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3031 serge 1307
	ironlake_edp_panel_off(intel_dp);
2330 Serge 1308
 
3031 serge 1309
	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1310
	if (!is_cpu_edp(intel_dp))
1311
		intel_dp_link_down(intel_dp);
1312
}
2330 Serge 1313
 
3031 serge 1314
static void intel_post_disable_dp(struct intel_encoder *encoder)
1315
{
1316
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1317
 
1318
	if (is_cpu_edp(intel_dp)) {
1319
		intel_dp_link_down(intel_dp);
1320
		ironlake_edp_pll_off(intel_dp);
1321
	}
2330 Serge 1322
}
1323
 
3031 serge 1324
static void intel_enable_dp(struct intel_encoder *encoder)
2330 Serge 1325
{
3031 serge 1326
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1327
	struct drm_device *dev = encoder->base.dev;
2330 Serge 1328
	struct drm_i915_private *dev_priv = dev->dev_private;
1329
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1330
 
3031 serge 1331
	if (WARN_ON(dp_reg & DP_PORT_EN))
1332
		return;
2342 Serge 1333
 
1334
		ironlake_edp_panel_vdd_on(intel_dp);
3031 serge 1335
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2330 Serge 1336
			intel_dp_start_link_train(intel_dp);
1337
				ironlake_edp_panel_on(intel_dp);
2342 Serge 1338
			ironlake_edp_panel_vdd_off(intel_dp, true);
2330 Serge 1339
			intel_dp_complete_link_train(intel_dp);
2342 Serge 1340
		ironlake_edp_backlight_on(intel_dp);
2330 Serge 1341
}
1342
 
3031 serge 1343
static void intel_pre_enable_dp(struct intel_encoder *encoder)
1344
{
1345
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1346
 
1347
	if (is_cpu_edp(intel_dp))
1348
		ironlake_edp_pll_on(intel_dp);
1349
}
1350
 
2330 Serge 1351
/*
1352
 * Native read with retry for link status and receiver capability reads for
1353
 * cases where the sink may still be asleep.
1354
 */
1355
static bool
1356
intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1357
			       uint8_t *recv, int recv_bytes)
1358
{
1359
	int ret, i;
1360
 
1361
	/*
1362
	 * Sinks are *supposed* to come up within 1ms from an off state,
1363
	 * but we're also supposed to retry 3 times per the spec.
1364
	 */
1365
	for (i = 0; i < 3; i++) {
1366
		ret = intel_dp_aux_native_read(intel_dp, address, recv,
1367
					       recv_bytes);
1368
		if (ret == recv_bytes)
1369
			return true;
1370
		msleep(1);
1371
	}
1372
 
1373
	return false;
1374
}
1375
 
1376
/*
1377
 * Fetch AUX CH registers 0x202 - 0x207 which contain
1378
 * link status information
1379
 */
1380
static bool
2342 Serge 1381
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 1382
{
1383
	return intel_dp_aux_native_read_retry(intel_dp,
1384
					      DP_LANE0_1_STATUS,
2342 Serge 1385
					      link_status,
2330 Serge 1386
					      DP_LINK_STATUS_SIZE);
1387
}
1388
 
1389
static uint8_t
1390
intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1391
		     int r)
1392
{
1393
	return link_status[r - DP_LANE0_1_STATUS];
1394
}
1395
 
1396
static uint8_t
2342 Serge 1397
intel_get_adjust_request_voltage(uint8_t adjust_request[2],
2330 Serge 1398
				 int lane)
1399
{
1400
	int	    s = ((lane & 1) ?
1401
			 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1402
			 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
2342 Serge 1403
	uint8_t l = adjust_request[lane>>1];
2330 Serge 1404
 
1405
	return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1406
}
1407
 
1408
static uint8_t
2342 Serge 1409
intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
2330 Serge 1410
				      int lane)
1411
{
1412
	int	    s = ((lane & 1) ?
1413
			 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1414
			 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
2342 Serge 1415
	uint8_t l = adjust_request[lane>>1];
2330 Serge 1416
 
1417
	return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1418
}
1419
 
1420
 
1421
#if 0
1422
static char	*voltage_names[] = {
1423
	"0.4V", "0.6V", "0.8V", "1.2V"
1424
};
1425
static char	*pre_emph_names[] = {
1426
	"0dB", "3.5dB", "6dB", "9.5dB"
1427
};
1428
static char	*link_train_names[] = {
1429
	"pattern 1", "pattern 2", "idle", "off"
1430
};
1431
#endif
1432
 
1433
/*
1434
 * These are source-specific values; current Intel hardware supports
1435
 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1436
 */
1437
 
1438
static uint8_t
2342 Serge 1439
intel_dp_voltage_max(struct intel_dp *intel_dp)
2330 Serge 1440
{
2342 Serge 1441
	struct drm_device *dev = intel_dp->base.base.dev;
1442
 
1443
	if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1444
		return DP_TRAIN_VOLTAGE_SWING_800;
1445
	else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1446
		return DP_TRAIN_VOLTAGE_SWING_1200;
1447
	else
1448
		return DP_TRAIN_VOLTAGE_SWING_800;
1449
}
1450
 
1451
static uint8_t
1452
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1453
{
1454
	struct drm_device *dev = intel_dp->base.base.dev;
1455
 
1456
	if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1457
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1458
		case DP_TRAIN_VOLTAGE_SWING_400:
1459
			return DP_TRAIN_PRE_EMPHASIS_6;
1460
		case DP_TRAIN_VOLTAGE_SWING_600:
1461
		case DP_TRAIN_VOLTAGE_SWING_800:
1462
			return DP_TRAIN_PRE_EMPHASIS_3_5;
1463
		default:
1464
			return DP_TRAIN_PRE_EMPHASIS_0;
1465
		}
1466
	} else {
2330 Serge 1467
	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1468
	case DP_TRAIN_VOLTAGE_SWING_400:
1469
		return DP_TRAIN_PRE_EMPHASIS_6;
1470
	case DP_TRAIN_VOLTAGE_SWING_600:
1471
		return DP_TRAIN_PRE_EMPHASIS_6;
1472
	case DP_TRAIN_VOLTAGE_SWING_800:
1473
		return DP_TRAIN_PRE_EMPHASIS_3_5;
1474
	case DP_TRAIN_VOLTAGE_SWING_1200:
1475
	default:
1476
		return DP_TRAIN_PRE_EMPHASIS_0;
1477
	}
2342 Serge 1478
	}
2330 Serge 1479
}
1480
 
1481
static void
2342 Serge 1482
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 1483
{
1484
	uint8_t v = 0;
1485
	uint8_t p = 0;
1486
	int lane;
2342 Serge 1487
	uint8_t	*adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1488
	uint8_t voltage_max;
1489
	uint8_t preemph_max;
2330 Serge 1490
 
1491
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2342 Serge 1492
		uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1493
		uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
2330 Serge 1494
 
1495
		if (this_v > v)
1496
			v = this_v;
1497
		if (this_p > p)
1498
			p = this_p;
1499
	}
1500
 
2342 Serge 1501
	voltage_max = intel_dp_voltage_max(intel_dp);
1502
	if (v >= voltage_max)
1503
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2330 Serge 1504
 
2342 Serge 1505
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1506
	if (p >= preemph_max)
1507
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2330 Serge 1508
 
1509
	for (lane = 0; lane < 4; lane++)
1510
		intel_dp->train_set[lane] = v | p;
1511
}
1512
 
1513
static uint32_t
2342 Serge 1514
intel_dp_signal_levels(uint8_t train_set)
2330 Serge 1515
{
1516
	uint32_t	signal_levels = 0;
1517
 
1518
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1519
	case DP_TRAIN_VOLTAGE_SWING_400:
1520
	default:
1521
		signal_levels |= DP_VOLTAGE_0_4;
1522
		break;
1523
	case DP_TRAIN_VOLTAGE_SWING_600:
1524
		signal_levels |= DP_VOLTAGE_0_6;
1525
		break;
1526
	case DP_TRAIN_VOLTAGE_SWING_800:
1527
		signal_levels |= DP_VOLTAGE_0_8;
1528
		break;
1529
	case DP_TRAIN_VOLTAGE_SWING_1200:
1530
		signal_levels |= DP_VOLTAGE_1_2;
1531
		break;
1532
	}
1533
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1534
	case DP_TRAIN_PRE_EMPHASIS_0:
1535
	default:
1536
		signal_levels |= DP_PRE_EMPHASIS_0;
1537
		break;
1538
	case DP_TRAIN_PRE_EMPHASIS_3_5:
1539
		signal_levels |= DP_PRE_EMPHASIS_3_5;
1540
		break;
1541
	case DP_TRAIN_PRE_EMPHASIS_6:
1542
		signal_levels |= DP_PRE_EMPHASIS_6;
1543
		break;
1544
	case DP_TRAIN_PRE_EMPHASIS_9_5:
1545
		signal_levels |= DP_PRE_EMPHASIS_9_5;
1546
		break;
1547
	}
1548
	return signal_levels;
1549
}
1550
 
1551
/* Gen6's DP voltage swing and pre-emphasis control */
1552
static uint32_t
1553
intel_gen6_edp_signal_levels(uint8_t train_set)
1554
{
1555
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1556
					 DP_TRAIN_PRE_EMPHASIS_MASK);
1557
	switch (signal_levels) {
1558
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1559
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1560
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1561
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1562
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1563
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1564
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1565
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1566
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1567
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1568
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1569
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1570
	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1571
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1572
	default:
1573
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1574
			      "0x%x\n", signal_levels);
1575
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1576
	}
1577
}
1578
 
2342 Serge 1579
/* Gen7's DP voltage swing and pre-emphasis control */
1580
static uint32_t
1581
intel_gen7_edp_signal_levels(uint8_t train_set)
1582
{
1583
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1584
					 DP_TRAIN_PRE_EMPHASIS_MASK);
1585
	switch (signal_levels) {
1586
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1587
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
1588
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1589
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1590
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1591
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
1592
 
1593
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1594
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
1595
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1596
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1597
 
1598
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1599
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
1600
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1601
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1602
 
1603
	default:
1604
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1605
			      "0x%x\n", signal_levels);
1606
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
1607
	}
1608
}
1609
 
2330 Serge 1610
static uint8_t
1611
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1612
		      int lane)
1613
{
1614
	int s = (lane & 1) * 4;
2342 Serge 1615
	uint8_t l = link_status[lane>>1];
2330 Serge 1616
 
1617
	return (l >> s) & 0xf;
1618
}
1619
 
1620
/* Check for clock recovery is done on all channels */
1621
static bool
1622
intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1623
{
1624
	int lane;
1625
	uint8_t lane_status;
1626
 
1627
	for (lane = 0; lane < lane_count; lane++) {
1628
		lane_status = intel_get_lane_status(link_status, lane);
1629
		if ((lane_status & DP_LANE_CR_DONE) == 0)
1630
			return false;
1631
	}
1632
	return true;
1633
}
1634
 
1635
/* Check to see if channel eq is done on all channels */
1636
#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1637
			 DP_LANE_CHANNEL_EQ_DONE|\
1638
			 DP_LANE_SYMBOL_LOCKED)
1639
static bool
2342 Serge 1640
intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2330 Serge 1641
{
1642
	uint8_t lane_align;
1643
	uint8_t lane_status;
1644
	int lane;
1645
 
2342 Serge 1646
	lane_align = intel_dp_link_status(link_status,
2330 Serge 1647
					  DP_LANE_ALIGN_STATUS_UPDATED);
1648
	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1649
		return false;
1650
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2342 Serge 1651
		lane_status = intel_get_lane_status(link_status, lane);
2330 Serge 1652
		if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1653
			return false;
1654
	}
1655
	return true;
1656
}
1657
 
1658
static bool
1659
intel_dp_set_link_train(struct intel_dp *intel_dp,
1660
			uint32_t dp_reg_value,
1661
			uint8_t dp_train_pat)
1662
{
1663
	struct drm_device *dev = intel_dp->base.base.dev;
1664
	struct drm_i915_private *dev_priv = dev->dev_private;
1665
	int ret;
1666
 
3031 serge 1667
	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1668
		dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1669
 
1670
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1671
		case DP_TRAINING_PATTERN_DISABLE:
1672
			dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1673
			break;
1674
		case DP_TRAINING_PATTERN_1:
1675
			dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1676
			break;
1677
		case DP_TRAINING_PATTERN_2:
1678
			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1679
			break;
1680
		case DP_TRAINING_PATTERN_3:
1681
			DRM_ERROR("DP training pattern 3 not supported\n");
1682
			dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1683
			break;
1684
		}
1685
 
1686
	} else {
1687
		dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1688
 
1689
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1690
		case DP_TRAINING_PATTERN_DISABLE:
1691
			dp_reg_value |= DP_LINK_TRAIN_OFF;
1692
			break;
1693
		case DP_TRAINING_PATTERN_1:
1694
			dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1695
			break;
1696
		case DP_TRAINING_PATTERN_2:
1697
			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1698
			break;
1699
		case DP_TRAINING_PATTERN_3:
1700
			DRM_ERROR("DP training pattern 3 not supported\n");
1701
			dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1702
			break;
1703
		}
1704
	}
1705
 
2330 Serge 1706
	I915_WRITE(intel_dp->output_reg, dp_reg_value);
1707
	POSTING_READ(intel_dp->output_reg);
1708
 
1709
	intel_dp_aux_native_write_1(intel_dp,
1710
				    DP_TRAINING_PATTERN_SET,
1711
				    dp_train_pat);
1712
 
3031 serge 1713
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1714
	    DP_TRAINING_PATTERN_DISABLE) {
2330 Serge 1715
	ret = intel_dp_aux_native_write(intel_dp,
1716
					DP_TRAINING_LANE0_SET,
2342 Serge 1717
					intel_dp->train_set,
1718
					intel_dp->lane_count);
1719
	if (ret != intel_dp->lane_count)
2330 Serge 1720
		return false;
3031 serge 1721
	}
2330 Serge 1722
 
1723
	return true;
1724
}
1725
 
1726
/* Enable corresponding port and start training pattern 1 */
1727
static void
1728
intel_dp_start_link_train(struct intel_dp *intel_dp)
1729
{
1730
	struct drm_device *dev = intel_dp->base.base.dev;
1731
	int i;
1732
	uint8_t voltage;
1733
	bool clock_recovery = false;
2342 Serge 1734
	int voltage_tries, loop_tries;
2330 Serge 1735
	uint32_t DP = intel_dp->DP;
1736
 
1737
	/* Write the link configuration data */
1738
	intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1739
				  intel_dp->link_configuration,
1740
				  DP_LINK_CONFIGURATION_SIZE);
1741
 
1742
	DP |= DP_PORT_EN;
2342 Serge 1743
 
2330 Serge 1744
	memset(intel_dp->train_set, 0, 4);
1745
	voltage = 0xff;
2342 Serge 1746
	voltage_tries = 0;
1747
	loop_tries = 0;
2330 Serge 1748
	clock_recovery = false;
1749
	for (;;) {
1750
		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2342 Serge 1751
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2330 Serge 1752
		uint32_t    signal_levels;
2342 Serge 1753
 
1754
 
1755
		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1756
			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1757
			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1758
		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
2330 Serge 1759
			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1760
			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1761
		} else {
2342 Serge 1762
			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1763
			DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
2330 Serge 1764
			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1765
		}
1766
 
3031 serge 1767
		if (!intel_dp_set_link_train(intel_dp, DP,
2330 Serge 1768
					     DP_TRAINING_PATTERN_1 |
1769
					     DP_LINK_SCRAMBLING_DISABLE))
1770
			break;
1771
		/* Set training pattern 1 */
1772
 
1773
		udelay(100);
2342 Serge 1774
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
1775
			DRM_ERROR("failed to get link status\n");
2330 Serge 1776
			break;
2342 Serge 1777
		}
2330 Serge 1778
 
2342 Serge 1779
		if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1780
			DRM_DEBUG_KMS("clock recovery OK\n");
2330 Serge 1781
			clock_recovery = true;
1782
			break;
1783
		}
1784
 
1785
		/* Check to see if we've tried the max voltage */
1786
		for (i = 0; i < intel_dp->lane_count; i++)
1787
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1788
				break;
3031 serge 1789
		if (i == intel_dp->lane_count && voltage_tries == 5) {
2342 Serge 1790
			++loop_tries;
1791
			if (loop_tries == 5) {
1792
				DRM_DEBUG_KMS("too many full retries, give up\n");
2330 Serge 1793
			break;
2342 Serge 1794
			}
1795
			memset(intel_dp->train_set, 0, 4);
1796
			voltage_tries = 0;
1797
			continue;
1798
		}
2330 Serge 1799
 
1800
		/* Check to see if we've tried the same voltage 5 times */
1801
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2342 Serge 1802
			++voltage_tries;
1803
			if (voltage_tries == 5) {
1804
				DRM_DEBUG_KMS("too many voltage retries, give up\n");
2330 Serge 1805
				break;
2342 Serge 1806
			}
2330 Serge 1807
		} else
2342 Serge 1808
			voltage_tries = 0;
2330 Serge 1809
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1810
 
1811
		/* Compute new intel_dp->train_set as requested by target */
2342 Serge 1812
		intel_get_adjust_train(intel_dp, link_status);
2330 Serge 1813
	}
1814
 
1815
	intel_dp->DP = DP;
1816
}
1817
 
1818
static void
1819
intel_dp_complete_link_train(struct intel_dp *intel_dp)
1820
{
1821
	struct drm_device *dev = intel_dp->base.base.dev;
1822
	bool channel_eq = false;
1823
	int tries, cr_tries;
1824
	uint32_t DP = intel_dp->DP;
1825
 
1826
	/* channel equalization */
1827
	tries = 0;
1828
	cr_tries = 0;
1829
	channel_eq = false;
1830
	for (;;) {
1831
		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1832
		uint32_t    signal_levels;
2342 Serge 1833
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
2330 Serge 1834
 
1835
		if (cr_tries > 5) {
1836
			DRM_ERROR("failed to train DP, aborting\n");
1837
			intel_dp_link_down(intel_dp);
1838
			break;
1839
		}
1840
 
2342 Serge 1841
		if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1842
			signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1843
			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1844
		} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
2330 Serge 1845
			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1846
			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1847
		} else {
2342 Serge 1848
			signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
2330 Serge 1849
			DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1850
		}
1851
 
1852
		/* channel eq pattern */
3031 serge 1853
		if (!intel_dp_set_link_train(intel_dp, DP,
2330 Serge 1854
					     DP_TRAINING_PATTERN_2 |
1855
					     DP_LINK_SCRAMBLING_DISABLE))
1856
			break;
1857
 
1858
		udelay(400);
2342 Serge 1859
		if (!intel_dp_get_link_status(intel_dp, link_status))
2330 Serge 1860
			break;
1861
 
1862
		/* Make sure clock is still ok */
2342 Serge 1863
		if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2330 Serge 1864
			intel_dp_start_link_train(intel_dp);
1865
			cr_tries++;
1866
			continue;
1867
		}
1868
 
2342 Serge 1869
		if (intel_channel_eq_ok(intel_dp, link_status)) {
2330 Serge 1870
			channel_eq = true;
1871
			break;
1872
		}
1873
 
1874
		/* Try 5 times, then try clock recovery if that fails */
1875
		if (tries > 5) {
1876
			intel_dp_link_down(intel_dp);
1877
			intel_dp_start_link_train(intel_dp);
1878
			tries = 0;
1879
			cr_tries++;
1880
			continue;
1881
		}
1882
 
1883
		/* Compute new intel_dp->train_set as requested by target */
2342 Serge 1884
		intel_get_adjust_train(intel_dp, link_status);
2330 Serge 1885
		++tries;
1886
	}
1887
 
3031 serge 1888
	intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
2330 Serge 1889
}
1890
 
1891
static void
1892
intel_dp_link_down(struct intel_dp *intel_dp)
1893
{
1894
	struct drm_device *dev = intel_dp->base.base.dev;
1895
	struct drm_i915_private *dev_priv = dev->dev_private;
1896
	uint32_t DP = intel_dp->DP;
1897
 
3031 serge 1898
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2330 Serge 1899
		return;
1900
 
1901
	DRM_DEBUG_KMS("\n");
1902
 
2342 Serge 1903
	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
2330 Serge 1904
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
1905
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1906
	} else {
1907
		DP &= ~DP_LINK_TRAIN_MASK;
1908
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1909
	}
1910
	POSTING_READ(intel_dp->output_reg);
1911
 
1912
	msleep(17);
1913
 
3031 serge 1914
	if (HAS_PCH_IBX(dev) &&
2330 Serge 1915
	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1916
		struct drm_crtc *crtc = intel_dp->base.base.crtc;
1917
 
1918
		/* Hardware workaround: leaving our transcoder select
1919
		 * set to transcoder B while it's off will prevent the
1920
		 * corresponding HDMI output on transcoder A.
1921
		 *
1922
		 * Combine this with another hardware workaround:
1923
		 * transcoder select bit can only be cleared while the
1924
		 * port is enabled.
1925
		 */
1926
		DP &= ~DP_PIPEB_SELECT;
1927
		I915_WRITE(intel_dp->output_reg, DP);
1928
 
1929
		/* Changes to enable or select take place the vblank
1930
		 * after being written.
1931
		 */
1932
		if (crtc == NULL) {
1933
			/* We can arrive here never having been attached
1934
			 * to a CRTC, for instance, due to inheriting
1935
			 * random state from the BIOS.
1936
			 *
1937
			 * If the pipe is not running, play safe and
1938
			 * wait for the clocks to stabilise before
1939
			 * continuing.
1940
			 */
1941
			POSTING_READ(intel_dp->output_reg);
1942
			msleep(50);
1943
		} else
1944
			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
1945
	}
1946
 
2342 Serge 1947
	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2330 Serge 1948
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1949
	POSTING_READ(intel_dp->output_reg);
2342 Serge 1950
	msleep(intel_dp->panel_power_down_delay);
2330 Serge 1951
}
1952
 
1953
static bool
1954
intel_dp_get_dpcd(struct intel_dp *intel_dp)
1955
{
1956
	if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
3031 serge 1957
					   sizeof(intel_dp->dpcd)) == 0)
1958
		return false; /* aux transfer failed */
1959
 
1960
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
1961
		return false; /* DPCD not present */
1962
 
1963
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1964
	      DP_DWN_STRM_PORT_PRESENT))
1965
		return true; /* native DP sink */
1966
 
1967
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
1968
		return true; /* no per-port downstream info */
1969
 
1970
	if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
1971
					   intel_dp->downstream_ports,
1972
					   DP_MAX_DOWNSTREAM_PORTS) == 0)
1973
		return false; /* downstream port status fetch failed */
1974
 
2330 Serge 1975
		return true;
3031 serge 1976
}
2330 Serge 1977
 
3031 serge 1978
static void
1979
intel_dp_probe_oui(struct intel_dp *intel_dp)
1980
{
1981
	u8 buf[3];
1982
 
1983
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1984
		return;
1985
 
1986
	ironlake_edp_panel_vdd_on(intel_dp);
1987
 
1988
	if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1989
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1990
			      buf[0], buf[1], buf[2]);
1991
 
1992
	if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1993
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1994
			      buf[0], buf[1], buf[2]);
1995
 
1996
	ironlake_edp_panel_vdd_off(intel_dp, false);
2330 Serge 1997
}
1998
 
2342 Serge 1999
static bool
2000
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2001
{
2002
	int ret;
2003
 
2004
	ret = intel_dp_aux_native_read_retry(intel_dp,
2005
					     DP_DEVICE_SERVICE_IRQ_VECTOR,
2006
					     sink_irq_vector, 1);
2007
	if (!ret)
2008
		return false;
2009
 
2010
	return true;
2011
}
2012
 
2013
static void
2014
intel_dp_handle_test_request(struct intel_dp *intel_dp)
2015
{
2016
	/* NAK by default */
2017
	intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
2018
}
2019
 
2330 Serge 2020
/*
2021
 * According to DP spec
2022
 * 5.1.2:
2023
 *  1. Read DPCD
2024
 *  2. Configure link according to Receiver Capabilities
2025
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
2026
 *  4. Check link status on receipt of hot-plug interrupt
2027
 */
2028
 
2029
static void
2030
intel_dp_check_link_status(struct intel_dp *intel_dp)
2031
{
2342 Serge 2032
	u8 sink_irq_vector;
2033
	u8 link_status[DP_LINK_STATUS_SIZE];
2034
 
3031 serge 2035
	if (!intel_dp->base.connectors_active)
2330 Serge 2036
		return;
2037
 
3031 serge 2038
	if (WARN_ON(!intel_dp->base.base.crtc))
2330 Serge 2039
		return;
2040
 
2041
	/* Try to read receiver status if the link appears to be up */
2342 Serge 2042
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
2330 Serge 2043
		intel_dp_link_down(intel_dp);
2044
		return;
2045
	}
2046
 
2047
	/* Now read the DPCD to see if it's actually running */
2048
	if (!intel_dp_get_dpcd(intel_dp)) {
2049
		intel_dp_link_down(intel_dp);
2050
		return;
2051
	}
2052
 
2342 Serge 2053
	/* Try to read the source of the interrupt */
2054
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2055
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2056
		/* Clear interrupt source */
2057
		intel_dp_aux_native_write_1(intel_dp,
2058
					    DP_DEVICE_SERVICE_IRQ_VECTOR,
2059
					    sink_irq_vector);
2060
 
2061
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2062
			intel_dp_handle_test_request(intel_dp);
2063
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2064
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2065
	}
2066
 
2067
	if (!intel_channel_eq_ok(intel_dp, link_status)) {
2330 Serge 2068
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2069
			      drm_get_encoder_name(&intel_dp->base.base));
2070
		intel_dp_start_link_train(intel_dp);
2071
		intel_dp_complete_link_train(intel_dp);
2072
	}
2073
}
2074
 
3031 serge 2075
/* XXX this is probably wrong for multiple downstream ports */
2330 Serge 2076
static enum drm_connector_status
2077
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2078
{
3031 serge 2079
	uint8_t *dpcd = intel_dp->dpcd;
2080
	bool hpd;
2081
	uint8_t type;
2082
 
2083
	if (!intel_dp_get_dpcd(intel_dp))
2084
		return connector_status_disconnected;
2085
 
2086
	/* if there's no downstream port, we're done */
2087
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2330 Serge 2088
		return connector_status_connected;
3031 serge 2089
 
2090
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
2091
	hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2092
	if (hpd) {
2093
		uint8_t reg;
2094
		if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2095
						    ®, 1))
2096
			return connector_status_unknown;
2097
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2098
					      : connector_status_disconnected;
2099
	}
2100
 
2101
	/* If no HPD, poke DDC gently */
2102
	if (drm_probe_ddc(&intel_dp->adapter))
2103
		return connector_status_connected;
2104
 
2105
	/* Well we tried, say unknown for unreliable port types */
2106
	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2107
	if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2108
		return connector_status_unknown;
2109
 
2110
	/* Anything else is out of spec, warn and ignore */
2111
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2330 Serge 2112
	return connector_status_disconnected;
2113
}
2114
 
2115
static enum drm_connector_status
2116
ironlake_dp_detect(struct intel_dp *intel_dp)
2117
{
2118
	enum drm_connector_status status;
2119
 
2120
	/* Can't disconnect eDP, but you can close the lid... */
2121
	if (is_edp(intel_dp)) {
2122
		status = intel_panel_detect(intel_dp->base.base.dev);
2123
		if (status == connector_status_unknown)
2124
			status = connector_status_connected;
2125
		return status;
2126
	}
2127
 
2128
	return intel_dp_detect_dpcd(intel_dp);
2129
}
2130
 
2131
static enum drm_connector_status
2132
g4x_dp_detect(struct intel_dp *intel_dp)
2133
{
2134
	struct drm_device *dev = intel_dp->base.base.dev;
2135
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 2136
	uint32_t bit;
2330 Serge 2137
 
2138
	switch (intel_dp->output_reg) {
2139
	case DP_B:
3031 serge 2140
		bit = DPB_HOTPLUG_LIVE_STATUS;
2330 Serge 2141
		break;
2142
	case DP_C:
3031 serge 2143
		bit = DPC_HOTPLUG_LIVE_STATUS;
2330 Serge 2144
		break;
2145
	case DP_D:
3031 serge 2146
		bit = DPD_HOTPLUG_LIVE_STATUS;
2330 Serge 2147
		break;
2148
	default:
2149
		return connector_status_unknown;
2150
	}
2151
 
3031 serge 2152
	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2330 Serge 2153
		return connector_status_disconnected;
2154
 
2155
	return intel_dp_detect_dpcd(intel_dp);
2156
}
2157
 
2342 Serge 2158
static struct edid *
2159
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2160
{
2161
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2162
	struct edid	*edid;
3031 serge 2163
	int size;
2342 Serge 2164
 
3031 serge 2165
	if (is_edp(intel_dp)) {
2166
		if (!intel_dp->edid)
2167
			return NULL;
2168
 
2169
		size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
2170
		edid = kmalloc(size, GFP_KERNEL);
2171
		if (!edid)
2172
			return NULL;
2173
 
2174
		memcpy(edid, intel_dp->edid, size);
2175
		return edid;
2176
	}
2177
 
2342 Serge 2178
	edid = drm_get_edid(connector, adapter);
2179
	return edid;
2180
}
2181
 
2182
static int
2183
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2184
{
2185
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2186
	int	ret;
2187
 
3031 serge 2188
	if (is_edp(intel_dp)) {
2189
		drm_mode_connector_update_edid_property(connector,
2190
							intel_dp->edid);
2191
		ret = drm_add_edid_modes(connector, intel_dp->edid);
2192
		drm_edid_to_eld(connector,
2193
				intel_dp->edid);
2194
		return intel_dp->edid_mode_count;
2195
	}
2196
 
2342 Serge 2197
	ret = intel_ddc_get_modes(connector, adapter);
2198
	return ret;
2199
}
2200
 
2201
 
2330 Serge 2202
/**
2203
 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2204
 *
2205
 * \return true if DP port is connected.
2206
 * \return false if DP port is disconnected.
2207
 */
2208
static enum drm_connector_status
2209
intel_dp_detect(struct drm_connector *connector, bool force)
2210
{
2211
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2212
	struct drm_device *dev = intel_dp->base.base.dev;
2213
	enum drm_connector_status status;
2214
	struct edid *edid = NULL;
2215
 
2216
	intel_dp->has_audio = false;
2217
 
2218
	if (HAS_PCH_SPLIT(dev))
2219
		status = ironlake_dp_detect(intel_dp);
2220
	else
2221
		status = g4x_dp_detect(intel_dp);
2222
 
2223
	DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
2224
		      intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
2225
		      intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
2226
		      intel_dp->dpcd[6], intel_dp->dpcd[7]);
2227
 
2228
	if (status != connector_status_connected)
2229
		return status;
3031 serge 2230
 
2231
	intel_dp_probe_oui(intel_dp);
2232
 
2330 Serge 2233
/*
2234
	if (intel_dp->force_audio) {
2235
		intel_dp->has_audio = intel_dp->force_audio > 0;
2236
	} else {
3031 serge 2237
		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2330 Serge 2238
		if (edid) {
2239
			intel_dp->has_audio = drm_detect_monitor_audio(edid);
2240
			kfree(edid);
2241
		}
2242
	}
2243
*/
2244
	return connector_status_connected;
2245
}
2246
 
2247
static int intel_dp_get_modes(struct drm_connector *connector)
2248
{
2249
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2250
	struct drm_device *dev = intel_dp->base.base.dev;
2251
	struct drm_i915_private *dev_priv = dev->dev_private;
2252
	int ret;
2253
 
2254
	/* We should parse the EDID data and find out if it has an audio sink
2255
	 */
2256
 
2342 Serge 2257
	ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2330 Serge 2258
	if (ret) {
2342 Serge 2259
		if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
2330 Serge 2260
			struct drm_display_mode *newmode;
2261
			list_for_each_entry(newmode, &connector->probed_modes,
2262
					    head) {
2342 Serge 2263
				if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
2264
					intel_dp->panel_fixed_mode =
2330 Serge 2265
						drm_mode_duplicate(dev, newmode);
2266
					break;
2267
				}
2268
			}
2269
		}
2270
		return ret;
2271
	}
2272
 
2273
	/* if eDP has no EDID, try to use fixed panel mode from VBT */
2274
	if (is_edp(intel_dp)) {
2342 Serge 2275
		/* initialize panel mode from VBT if available for eDP */
2276
		if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
2277
			intel_dp->panel_fixed_mode =
2278
				drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2279
			if (intel_dp->panel_fixed_mode) {
2280
				intel_dp->panel_fixed_mode->type |=
2281
					DRM_MODE_TYPE_PREFERRED;
2282
			}
2283
		}
2284
		if (intel_dp->panel_fixed_mode) {
2330 Serge 2285
			struct drm_display_mode *mode;
2342 Serge 2286
			mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
2330 Serge 2287
			drm_mode_probed_add(connector, mode);
2288
			return 1;
2289
		}
2290
	}
2291
	return 0;
2292
}
2293
 
2294
 
2295
 
2296
 
2297
 
2298
static int
2299
intel_dp_set_property(struct drm_connector *connector,
2300
		      struct drm_property *property,
2301
		      uint64_t val)
2302
{
2303
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
2304
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2305
	int ret;
2306
 
2307
	ret = drm_connector_property_set_value(connector, property, val);
2308
	if (ret)
2309
		return ret;
2310
#if 0
2311
	if (property == dev_priv->force_audio_property) {
2312
		int i = val;
2313
		bool has_audio;
2314
 
2315
		if (i == intel_dp->force_audio)
2316
			return 0;
2317
 
2318
		intel_dp->force_audio = i;
2319
 
3031 serge 2320
		if (i == HDMI_AUDIO_AUTO)
2330 Serge 2321
			has_audio = intel_dp_detect_audio(connector);
2322
		else
3031 serge 2323
			has_audio = (i == HDMI_AUDIO_ON);
2330 Serge 2324
 
2325
		if (has_audio == intel_dp->has_audio)
2326
			return 0;
2327
 
2328
		intel_dp->has_audio = has_audio;
2329
		goto done;
2330
	}
2331
 
2332
	if (property == dev_priv->broadcast_rgb_property) {
2333
		if (val == !!intel_dp->color_range)
2334
			return 0;
2335
 
2336
		intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2337
	goto done;
2338
	}
2339
#endif
2340
 
2341
	return -EINVAL;
2342
 
2343
done:
2344
	if (intel_dp->base.base.crtc) {
2345
		struct drm_crtc *crtc = intel_dp->base.base.crtc;
3031 serge 2346
		intel_set_mode(crtc, &crtc->mode,
2347
			       crtc->x, crtc->y, crtc->fb);
2330 Serge 2348
	}
2349
 
2350
	return 0;
2351
}
2352
 
2353
static void
2342 Serge 2354
intel_dp_destroy(struct drm_connector *connector)
2330 Serge 2355
{
2356
	struct drm_device *dev = connector->dev;
3031 serge 2357
	struct intel_dp *intel_dp = intel_attached_dp(connector);
2330 Serge 2358
 
3031 serge 2359
	if (is_edp(intel_dp))
2330 Serge 2360
		intel_panel_destroy_backlight(dev);
2361
 
2362
	drm_sysfs_connector_remove(connector);
2363
	drm_connector_cleanup(connector);
2364
	kfree(connector);
2365
}
2366
 
2367
static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2368
{
2369
	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2370
 
2371
//   i2c_del_adapter(&intel_dp->adapter);
2372
	drm_encoder_cleanup(encoder);
2342 Serge 2373
	if (is_edp(intel_dp)) {
2374
//		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2375
		ironlake_panel_vdd_off_sync(intel_dp);
2376
	}
2330 Serge 2377
	kfree(intel_dp);
2378
}
2379
 
2380
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2381
	.mode_fixup = intel_dp_mode_fixup,
2382
	.mode_set = intel_dp_mode_set,
3031 serge 2383
	.disable = intel_encoder_noop,
2330 Serge 2384
};
2385
 
2386
static const struct drm_connector_funcs intel_dp_connector_funcs = {
3031 serge 2387
	.dpms = intel_connector_dpms,
2330 Serge 2388
	.detect = intel_dp_detect,
2389
	.fill_modes = drm_helper_probe_single_connector_modes,
2390
	.set_property = intel_dp_set_property,
2391
	.destroy = intel_dp_destroy,
2392
};
2393
 
2394
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2395
	.get_modes = intel_dp_get_modes,
2396
	.mode_valid = intel_dp_mode_valid,
2397
	.best_encoder = intel_best_encoder,
2398
};
2399
 
2400
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2401
	.destroy = intel_dp_encoder_destroy,
2402
};
2403
 
2404
static void
2405
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2406
{
2407
	struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
2408
 
2409
	intel_dp_check_link_status(intel_dp);
2410
}
2411
 
2327 Serge 2412
/* Return which DP Port should be selected for Transcoder DP control */
2413
int
2342 Serge 2414
intel_trans_dp_port_sel(struct drm_crtc *crtc)
2327 Serge 2415
{
2416
	struct drm_device *dev = crtc->dev;
3031 serge 2417
	struct intel_encoder *encoder;
2327 Serge 2418
 
3031 serge 2419
	for_each_encoder_on_crtc(dev, crtc, encoder) {
2420
		struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2327 Serge 2421
 
2342 Serge 2422
		if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2423
		    intel_dp->base.type == INTEL_OUTPUT_EDP)
2327 Serge 2424
			return intel_dp->output_reg;
2425
	}
2426
 
2427
	return -1;
2428
}
2330 Serge 2429
 
2430
/* check the VBT to see whether the eDP is on DP-D port */
2431
bool intel_dpd_is_edp(struct drm_device *dev)
2432
{
2433
	struct drm_i915_private *dev_priv = dev->dev_private;
2434
	struct child_device_config *p_child;
2435
	int i;
2436
 
2437
	if (!dev_priv->child_dev_num)
2438
		return false;
2439
 
2440
	for (i = 0; i < dev_priv->child_dev_num; i++) {
2441
		p_child = dev_priv->child_dev + i;
2442
 
2443
		if (p_child->dvo_port == PORT_IDPD &&
2444
		    p_child->device_type == DEVICE_TYPE_eDP)
2445
			return true;
2446
	}
2447
	return false;
2448
}
2449
 
2450
static void
2451
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2452
{
2453
	intel_attach_force_audio_property(connector);
2454
	intel_attach_broadcast_rgb_property(connector);
2455
}
2456
 
2457
void
3031 serge 2458
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2330 Serge 2459
{
2460
	struct drm_i915_private *dev_priv = dev->dev_private;
2461
	struct drm_connector *connector;
2462
	struct intel_dp *intel_dp;
2463
	struct intel_encoder *intel_encoder;
2464
	struct intel_connector *intel_connector;
2465
	const char *name = NULL;
2466
	int type;
2467
 
2468
	intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2469
	if (!intel_dp)
2470
		return;
2471
 
2472
	intel_dp->output_reg = output_reg;
3031 serge 2473
	intel_dp->port = port;
2474
	/* Preserve the current hw state. */
2475
	intel_dp->DP = I915_READ(intel_dp->output_reg);
2330 Serge 2476
 
2477
	intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2478
	if (!intel_connector) {
2479
		kfree(intel_dp);
2480
		return;
2481
	}
2482
	intel_encoder = &intel_dp->base;
2483
 
2484
	if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2485
		if (intel_dpd_is_edp(dev))
2486
			intel_dp->is_pch_edp = true;
2487
 
2488
	if (output_reg == DP_A || is_pch_edp(intel_dp)) {
2489
		type = DRM_MODE_CONNECTOR_eDP;
2490
		intel_encoder->type = INTEL_OUTPUT_EDP;
2491
	} else {
2492
		type = DRM_MODE_CONNECTOR_DisplayPort;
2493
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2494
	}
2495
 
2496
	connector = &intel_connector->base;
2497
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2498
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2499
 
2500
	connector->polled = DRM_CONNECTOR_POLL_HPD;
2501
 
3031 serge 2502
	intel_encoder->cloneable = false;
2330 Serge 2503
 
3031 serge 2504
//	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2505
//			  ironlake_panel_vdd_work);
2330 Serge 2506
 
2342 Serge 2507
	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3031 serge 2508
 
2330 Serge 2509
	connector->interlace_allowed = true;
2510
	connector->doublescan_allowed = 0;
2511
 
2512
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2513
			 DRM_MODE_ENCODER_TMDS);
2514
	drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2515
 
2516
	intel_connector_attach_encoder(intel_connector, intel_encoder);
2517
	drm_sysfs_connector_add(connector);
2518
 
3031 serge 2519
	intel_encoder->enable = intel_enable_dp;
2520
	intel_encoder->pre_enable = intel_pre_enable_dp;
2521
	intel_encoder->disable = intel_disable_dp;
2522
	intel_encoder->post_disable = intel_post_disable_dp;
2523
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
2524
	intel_connector->get_hw_state = intel_connector_get_hw_state;
2525
 
2330 Serge 2526
	/* Set up the DDC bus. */
3031 serge 2527
	switch (port) {
2528
	case PORT_A:
2330 Serge 2529
			name = "DPDDC-A";
2530
			break;
3031 serge 2531
	case PORT_B:
2532
		dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2330 Serge 2533
			name = "DPDDC-B";
2534
			break;
3031 serge 2535
	case PORT_C:
2536
		dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2330 Serge 2537
			name = "DPDDC-C";
2538
			break;
3031 serge 2539
	case PORT_D:
2540
		dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2330 Serge 2541
			name = "DPDDC-D";
2542
			break;
3031 serge 2543
	default:
2544
		WARN(1, "Invalid port %c\n", port_name(port));
2545
		break;
2330 Serge 2546
	}
2547
 
2548
	/* Cache some DPCD data in the eDP case */
2549
	if (is_edp(intel_dp)) {
2342 Serge 2550
		struct edp_power_seq	cur, vbt;
2551
		u32 pp_on, pp_off, pp_div;
2330 Serge 2552
 
2553
		pp_on = I915_READ(PCH_PP_ON_DELAYS);
2342 Serge 2554
		pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2330 Serge 2555
		pp_div = I915_READ(PCH_PP_DIVISOR);
2556
 
3031 serge 2557
		if (!pp_on || !pp_off || !pp_div) {
2558
			DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2559
			intel_dp_encoder_destroy(&intel_dp->base.base);
2560
			intel_dp_destroy(&intel_connector->base);
2561
			return;
2562
		}
2563
 
2342 Serge 2564
		/* Pull timing values out of registers */
2565
		cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2566
			PANEL_POWER_UP_DELAY_SHIFT;
2330 Serge 2567
 
2342 Serge 2568
		cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2569
			PANEL_LIGHT_ON_DELAY_SHIFT;
2570
 
2571
		cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2572
			PANEL_LIGHT_OFF_DELAY_SHIFT;
2573
 
2574
		cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2575
			PANEL_POWER_DOWN_DELAY_SHIFT;
2576
 
2577
		cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2578
			       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2579
 
2580
		DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2581
			      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2582
 
2583
		vbt = dev_priv->edp.pps;
2584
 
2585
		DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2586
			      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2587
 
2588
#define get_delay(field)	((max(cur.field, vbt.field) + 9) / 10)
2589
 
2590
		intel_dp->panel_power_up_delay = get_delay(t1_t3);
2591
		intel_dp->backlight_on_delay = get_delay(t8);
2592
		intel_dp->backlight_off_delay = get_delay(t9);
2593
		intel_dp->panel_power_down_delay = get_delay(t10);
2594
		intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2595
 
2596
		DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2597
			      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2598
			      intel_dp->panel_power_cycle_delay);
2599
 
2600
		DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2601
			      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3031 serge 2602
	}
2342 Serge 2603
 
3031 serge 2604
	intel_dp_i2c_init(intel_dp, intel_connector, name);
2605
 
2606
	if (is_edp(intel_dp)) {
2607
		bool ret;
2608
		struct edid *edid;
2609
 
2330 Serge 2610
		ironlake_edp_panel_vdd_on(intel_dp);
2611
		ret = intel_dp_get_dpcd(intel_dp);
2342 Serge 2612
		ironlake_edp_panel_vdd_off(intel_dp, false);
2613
 
2330 Serge 2614
		if (ret) {
2615
			if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2616
				dev_priv->no_aux_handshake =
2617
					intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
2618
					DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2619
		} else {
2620
			/* if this fails, presume the device is a ghost */
2621
			DRM_INFO("failed to retrieve link info, disabling eDP\n");
2622
			intel_dp_encoder_destroy(&intel_dp->base.base);
2623
			intel_dp_destroy(&intel_connector->base);
2624
			return;
2625
		}
3031 serge 2626
 
2627
		ironlake_edp_panel_vdd_on(intel_dp);
2628
		edid = drm_get_edid(connector, &intel_dp->adapter);
2629
		if (edid) {
2630
			drm_mode_connector_update_edid_property(connector,
2631
								edid);
2632
			intel_dp->edid_mode_count =
2633
				drm_add_edid_modes(connector, edid);
2634
			drm_edid_to_eld(connector, edid);
2635
			intel_dp->edid = edid;
2636
		}
2637
		ironlake_edp_panel_vdd_off(intel_dp, false);
2330 Serge 2638
	}
2639
 
2640
	intel_encoder->hot_plug = intel_dp_hot_plug;
2641
 
2642
	if (is_edp(intel_dp)) {
2643
		dev_priv->int_edp_connector = connector;
2644
		intel_panel_setup_backlight(dev);
2645
	}
2646
 
2647
	intel_dp_add_properties(intel_dp, connector);
2648
 
2649
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2650
	 * 0xd.  Failure to do so will result in spurious interrupts being
2651
	 * generated on the port when a cable is not attached.
2652
	 */
2653
	if (IS_G4X(dev) && !IS_GM45(dev)) {
2654
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2655
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2656
	}
2657
}