Subversion Repositories Kolibri OS

Rev

Rev 5097 | Rev 5354 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5097 Rev 5139
1
/*
1
/*
2
 * Copyright © 2008 Intel Corporation
2
 * Copyright © 2008 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *    Keith Packard 
24
 *    Keith Packard 
25
 *
25
 *
26
 */
26
 */
27
 
27
 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
#include "intel_drv.h"
35
#include "intel_drv.h"
36
#include 
36
#include 
37
#include "i915_drv.h"
37
#include "i915_drv.h"
38
 
38
 
39
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
39
#define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
40
 
40
 
41
struct dp_link_dpll {
41
struct dp_link_dpll {
42
	int link_bw;
42
	int link_bw;
43
	struct dpll dpll;
43
	struct dpll dpll;
44
};
44
};
45
 
45
 
46
static const struct dp_link_dpll gen4_dpll[] = {
46
static const struct dp_link_dpll gen4_dpll[] = {
47
	{ DP_LINK_BW_1_62,
47
	{ DP_LINK_BW_1_62,
48
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
48
		{ .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
49
	{ DP_LINK_BW_2_7,
49
	{ DP_LINK_BW_2_7,
50
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
50
		{ .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
51
};
51
};
52
 
52
 
53
static const struct dp_link_dpll pch_dpll[] = {
53
static const struct dp_link_dpll pch_dpll[] = {
54
	{ DP_LINK_BW_1_62,
54
	{ DP_LINK_BW_1_62,
55
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
55
		{ .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
56
	{ DP_LINK_BW_2_7,
56
	{ DP_LINK_BW_2_7,
57
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
57
		{ .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
58
};
58
};
59
 
59
 
60
static const struct dp_link_dpll vlv_dpll[] = {
60
static const struct dp_link_dpll vlv_dpll[] = {
61
	{ DP_LINK_BW_1_62,
61
	{ DP_LINK_BW_1_62,
62
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
62
		{ .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63
	{ DP_LINK_BW_2_7,
63
	{ DP_LINK_BW_2_7,
64
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
64
		{ .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65
};
65
};
66
 
66
 
67
/*
67
/*
68
 * CHV supports eDP 1.4 that have  more link rates.
68
 * CHV supports eDP 1.4 that have  more link rates.
69
 * Below only provides the fixed rate but exclude variable rate.
69
 * Below only provides the fixed rate but exclude variable rate.
70
 */
70
 */
71
static const struct dp_link_dpll chv_dpll[] = {
71
static const struct dp_link_dpll chv_dpll[] = {
72
	/*
72
	/*
73
	 * CHV requires to program fractional division for m2.
73
	 * CHV requires to program fractional division for m2.
74
	 * m2 is stored in fixed point format using formula below
74
	 * m2 is stored in fixed point format using formula below
75
	 * (m2_int << 22) | m2_fraction
75
	 * (m2_int << 22) | m2_fraction
76
	 */
76
	 */
77
	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
77
	{ DP_LINK_BW_1_62,	/* m2_int = 32, m2_fraction = 1677722 */
78
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
78
		{ .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
79
	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
79
	{ DP_LINK_BW_2_7,	/* m2_int = 27, m2_fraction = 0 */
80
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
80
		{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
81
	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
81
	{ DP_LINK_BW_5_4,	/* m2_int = 27, m2_fraction = 0 */
82
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
82
		{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
83
};
83
};
84
 
84
 
85
/**
85
/**
86
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
86
 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
87
 * @intel_dp: DP struct
87
 * @intel_dp: DP struct
88
 *
88
 *
89
 * If a CPU or PCH DP output is attached to an eDP panel, this function
89
 * If a CPU or PCH DP output is attached to an eDP panel, this function
90
 * will return true, and false otherwise.
90
 * will return true, and false otherwise.
91
 */
91
 */
92
static bool is_edp(struct intel_dp *intel_dp)
92
static bool is_edp(struct intel_dp *intel_dp)
93
{
93
{
94
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
94
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
95
 
95
 
96
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
96
	return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
97
}
97
}
98
 
98
 
99
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
99
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
100
{
100
{
101
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
101
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
 
102
 
103
	return intel_dig_port->base.base.dev;
103
	return intel_dig_port->base.base.dev;
104
}
104
}
105
 
105
 
106
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
106
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
107
{
107
{
108
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
108
	return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
109
}
109
}
110
 
110
 
111
static void intel_dp_link_down(struct intel_dp *intel_dp);
111
static void intel_dp_link_down(struct intel_dp *intel_dp);
112
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
112
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
113
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
113
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
114
 
114
 
115
int
115
int
116
intel_dp_max_link_bw(struct intel_dp *intel_dp)
116
intel_dp_max_link_bw(struct intel_dp *intel_dp)
117
{
117
{
118
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
118
	int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
119
	struct drm_device *dev = intel_dp->attached_connector->base.dev;
119
	struct drm_device *dev = intel_dp->attached_connector->base.dev;
120
 
120
 
121
	switch (max_link_bw) {
121
	switch (max_link_bw) {
122
	case DP_LINK_BW_1_62:
122
	case DP_LINK_BW_1_62:
123
	case DP_LINK_BW_2_7:
123
	case DP_LINK_BW_2_7:
124
		break;
124
		break;
125
	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
125
	case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
126
		if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
126
		if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
127
		     INTEL_INFO(dev)->gen >= 8) &&
127
		     INTEL_INFO(dev)->gen >= 8) &&
128
		    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
128
		    intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
129
			max_link_bw = DP_LINK_BW_5_4;
129
			max_link_bw = DP_LINK_BW_5_4;
130
		else
130
		else
131
			max_link_bw = DP_LINK_BW_2_7;
131
			max_link_bw = DP_LINK_BW_2_7;
132
		break;
132
		break;
133
	default:
133
	default:
134
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
134
		WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
135
		     max_link_bw);
135
		     max_link_bw);
136
		max_link_bw = DP_LINK_BW_1_62;
136
		max_link_bw = DP_LINK_BW_1_62;
137
		break;
137
		break;
138
	}
138
	}
139
	return max_link_bw;
139
	return max_link_bw;
140
}
140
}
141
 
141
 
142
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
142
static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
143
{
143
{
144
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
144
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
145
	struct drm_device *dev = intel_dig_port->base.base.dev;
145
	struct drm_device *dev = intel_dig_port->base.base.dev;
146
	u8 source_max, sink_max;
146
	u8 source_max, sink_max;
147
 
147
 
148
	source_max = 4;
148
	source_max = 4;
149
	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
149
	if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
150
	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
150
	    (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
151
		source_max = 2;
151
		source_max = 2;
152
 
152
 
153
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
153
	sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
154
 
154
 
155
	return min(source_max, sink_max);
155
	return min(source_max, sink_max);
156
}
156
}
157
 
157
 
158
/*
158
/*
159
 * The units on the numbers in the next two are... bizarre.  Examples will
159
 * The units on the numbers in the next two are... bizarre.  Examples will
160
 * make it clearer; this one parallels an example in the eDP spec.
160
 * make it clearer; this one parallels an example in the eDP spec.
161
 *
161
 *
162
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
162
 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
163
 *
163
 *
164
 *     270000 * 1 * 8 / 10 == 216000
164
 *     270000 * 1 * 8 / 10 == 216000
165
 *
165
 *
166
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
166
 * The actual data capacity of that configuration is 2.16Gbit/s, so the
167
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
167
 * units are decakilobits.  ->clock in a drm_display_mode is in kilohertz -
168
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
168
 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
169
 * 119000.  At 18bpp that's 2142000 kilobits per second.
169
 * 119000.  At 18bpp that's 2142000 kilobits per second.
170
 *
170
 *
171
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
171
 * Thus the strange-looking division by 10 in intel_dp_link_required, to
172
 * get the result in decakilobits instead of kilobits.
172
 * get the result in decakilobits instead of kilobits.
173
 */
173
 */
174
 
174
 
175
static int
175
static int
176
intel_dp_link_required(int pixel_clock, int bpp)
176
intel_dp_link_required(int pixel_clock, int bpp)
177
{
177
{
178
	return (pixel_clock * bpp + 9) / 10;
178
	return (pixel_clock * bpp + 9) / 10;
179
}
179
}
180
 
180
 
181
static int
181
static int
182
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
182
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
183
{
183
{
184
	return (max_link_clock * max_lanes * 8) / 10;
184
	return (max_link_clock * max_lanes * 8) / 10;
185
}
185
}
186
 
186
 
187
static enum drm_mode_status
187
static enum drm_mode_status
188
intel_dp_mode_valid(struct drm_connector *connector,
188
intel_dp_mode_valid(struct drm_connector *connector,
189
		    struct drm_display_mode *mode)
189
		    struct drm_display_mode *mode)
190
{
190
{
191
	struct intel_dp *intel_dp = intel_attached_dp(connector);
191
	struct intel_dp *intel_dp = intel_attached_dp(connector);
192
	struct intel_connector *intel_connector = to_intel_connector(connector);
192
	struct intel_connector *intel_connector = to_intel_connector(connector);
193
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
193
	struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
194
	int target_clock = mode->clock;
194
	int target_clock = mode->clock;
195
	int max_rate, mode_rate, max_lanes, max_link_clock;
195
	int max_rate, mode_rate, max_lanes, max_link_clock;
196
 
196
 
197
	if (is_edp(intel_dp) && fixed_mode) {
197
	if (is_edp(intel_dp) && fixed_mode) {
198
		if (mode->hdisplay > fixed_mode->hdisplay)
198
		if (mode->hdisplay > fixed_mode->hdisplay)
199
			return MODE_PANEL;
199
			return MODE_PANEL;
200
 
200
 
201
		if (mode->vdisplay > fixed_mode->vdisplay)
201
		if (mode->vdisplay > fixed_mode->vdisplay)
202
			return MODE_PANEL;
202
			return MODE_PANEL;
203
 
203
 
204
		target_clock = fixed_mode->clock;
204
		target_clock = fixed_mode->clock;
205
	}
205
	}
206
 
206
 
207
	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
207
	max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
208
	max_lanes = intel_dp_max_lane_count(intel_dp);
208
	max_lanes = intel_dp_max_lane_count(intel_dp);
209
 
209
 
210
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
210
	max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
211
	mode_rate = intel_dp_link_required(target_clock, 18);
211
	mode_rate = intel_dp_link_required(target_clock, 18);
212
 
212
 
213
	if (mode_rate > max_rate)
213
	if (mode_rate > max_rate)
214
		return MODE_CLOCK_HIGH;
214
		return MODE_CLOCK_HIGH;
215
 
215
 
216
	if (mode->clock < 10000)
216
	if (mode->clock < 10000)
217
		return MODE_CLOCK_LOW;
217
		return MODE_CLOCK_LOW;
218
 
218
 
219
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
219
	if (mode->flags & DRM_MODE_FLAG_DBLCLK)
220
		return MODE_H_ILLEGAL;
220
		return MODE_H_ILLEGAL;
221
 
221
 
222
	return MODE_OK;
222
	return MODE_OK;
223
}
223
}
224
 
224
 
225
static uint32_t
225
static uint32_t
226
pack_aux(uint8_t *src, int src_bytes)
226
pack_aux(uint8_t *src, int src_bytes)
227
{
227
{
228
	int	i;
228
	int	i;
229
	uint32_t v = 0;
229
	uint32_t v = 0;
230
 
230
 
231
	if (src_bytes > 4)
231
	if (src_bytes > 4)
232
		src_bytes = 4;
232
		src_bytes = 4;
233
	for (i = 0; i < src_bytes; i++)
233
	for (i = 0; i < src_bytes; i++)
234
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
234
		v |= ((uint32_t) src[i]) << ((3-i) * 8);
235
	return v;
235
	return v;
236
}
236
}
237
 
237
 
238
static void
238
static void
239
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
239
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
240
{
240
{
241
	int i;
241
	int i;
242
	if (dst_bytes > 4)
242
	if (dst_bytes > 4)
243
		dst_bytes = 4;
243
		dst_bytes = 4;
244
	for (i = 0; i < dst_bytes; i++)
244
	for (i = 0; i < dst_bytes; i++)
245
		dst[i] = src >> ((3-i) * 8);
245
		dst[i] = src >> ((3-i) * 8);
246
}
246
}
247
 
247
 
248
/* hrawclock is 1/4 the FSB frequency */
248
/* hrawclock is 1/4 the FSB frequency */
249
static int
249
static int
250
intel_hrawclk(struct drm_device *dev)
250
intel_hrawclk(struct drm_device *dev)
251
{
251
{
252
	struct drm_i915_private *dev_priv = dev->dev_private;
252
	struct drm_i915_private *dev_priv = dev->dev_private;
253
	uint32_t clkcfg;
253
	uint32_t clkcfg;
254
 
254
 
255
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
255
	/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256
	if (IS_VALLEYVIEW(dev))
256
	if (IS_VALLEYVIEW(dev))
257
		return 200;
257
		return 200;
258
 
258
 
259
	clkcfg = I915_READ(CLKCFG);
259
	clkcfg = I915_READ(CLKCFG);
260
	switch (clkcfg & CLKCFG_FSB_MASK) {
260
	switch (clkcfg & CLKCFG_FSB_MASK) {
261
	case CLKCFG_FSB_400:
261
	case CLKCFG_FSB_400:
262
		return 100;
262
		return 100;
263
	case CLKCFG_FSB_533:
263
	case CLKCFG_FSB_533:
264
		return 133;
264
		return 133;
265
	case CLKCFG_FSB_667:
265
	case CLKCFG_FSB_667:
266
		return 166;
266
		return 166;
267
	case CLKCFG_FSB_800:
267
	case CLKCFG_FSB_800:
268
		return 200;
268
		return 200;
269
	case CLKCFG_FSB_1067:
269
	case CLKCFG_FSB_1067:
270
		return 266;
270
		return 266;
271
	case CLKCFG_FSB_1333:
271
	case CLKCFG_FSB_1333:
272
		return 333;
272
		return 333;
273
	/* these two are just a guess; one of them might be right */
273
	/* these two are just a guess; one of them might be right */
274
	case CLKCFG_FSB_1600:
274
	case CLKCFG_FSB_1600:
275
	case CLKCFG_FSB_1600_ALT:
275
	case CLKCFG_FSB_1600_ALT:
276
		return 400;
276
		return 400;
277
	default:
277
	default:
278
		return 133;
278
		return 133;
279
	}
279
	}
280
}
280
}
281
 
281
 
282
static void
282
static void
283
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
283
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
284
				    struct intel_dp *intel_dp,
284
				    struct intel_dp *intel_dp,
285
				    struct edp_power_seq *out);
285
				    struct edp_power_seq *out);
286
static void
286
static void
287
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
287
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
288
					      struct intel_dp *intel_dp,
288
					      struct intel_dp *intel_dp,
289
					      struct edp_power_seq *out);
289
					      struct edp_power_seq *out);
290
 
290
 
291
static enum pipe
291
static enum pipe
292
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
292
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
293
{
293
{
294
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
294
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
295
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
296
	struct drm_device *dev = intel_dig_port->base.base.dev;
296
	struct drm_device *dev = intel_dig_port->base.base.dev;
297
	struct drm_i915_private *dev_priv = dev->dev_private;
297
	struct drm_i915_private *dev_priv = dev->dev_private;
298
	enum port port = intel_dig_port->port;
298
	enum port port = intel_dig_port->port;
299
	enum pipe pipe;
299
	enum pipe pipe;
300
 
300
 
301
	/* modeset should have pipe */
301
	/* modeset should have pipe */
302
	if (crtc)
302
	if (crtc)
303
		return to_intel_crtc(crtc)->pipe;
303
		return to_intel_crtc(crtc)->pipe;
304
 
304
 
305
	/* init time, try to find a pipe with this port selected */
305
	/* init time, try to find a pipe with this port selected */
306
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
306
	for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
307
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
307
		u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
308
			PANEL_PORT_SELECT_MASK;
308
			PANEL_PORT_SELECT_MASK;
309
		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
309
		if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
310
			return pipe;
310
			return pipe;
311
		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
311
		if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
312
			return pipe;
312
			return pipe;
313
	}
313
	}
314
 
314
 
315
	/* shrug */
315
	/* shrug */
316
	return PIPE_A;
316
	return PIPE_A;
317
}
317
}
318
 
318
 
319
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
319
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
320
{
320
{
321
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
321
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
322
 
322
 
323
	if (HAS_PCH_SPLIT(dev))
323
	if (HAS_PCH_SPLIT(dev))
324
		return PCH_PP_CONTROL;
324
		return PCH_PP_CONTROL;
325
	else
325
	else
326
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
326
		return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
327
}
327
}
328
 
328
 
329
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
329
static u32 _pp_stat_reg(struct intel_dp *intel_dp)
330
{
330
{
331
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
331
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
332
 
332
 
333
	if (HAS_PCH_SPLIT(dev))
333
	if (HAS_PCH_SPLIT(dev))
334
		return PCH_PP_STATUS;
334
		return PCH_PP_STATUS;
335
	else
335
	else
336
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
336
		return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
337
}
337
}
338
 
338
 
339
 
339
 
340
static bool edp_have_panel_power(struct intel_dp *intel_dp)
340
static bool edp_have_panel_power(struct intel_dp *intel_dp)
341
{
341
{
342
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
342
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
343
	struct drm_i915_private *dev_priv = dev->dev_private;
343
	struct drm_i915_private *dev_priv = dev->dev_private;
344
 
344
 
345
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
345
	return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
346
}
346
}
347
 
347
 
348
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
348
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
349
{
349
{
350
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
350
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
351
	struct drm_i915_private *dev_priv = dev->dev_private;
351
	struct drm_i915_private *dev_priv = dev->dev_private;
352
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
352
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
353
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
353
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
354
	enum intel_display_power_domain power_domain;
354
	enum intel_display_power_domain power_domain;
355
 
355
 
356
	power_domain = intel_display_port_power_domain(intel_encoder);
356
	power_domain = intel_display_port_power_domain(intel_encoder);
357
	return intel_display_power_enabled(dev_priv, power_domain) &&
357
	return intel_display_power_enabled(dev_priv, power_domain) &&
358
	       (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
358
	       (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
359
}
359
}
360
 
360
 
361
static void
361
static void
362
intel_dp_check_edp(struct intel_dp *intel_dp)
362
intel_dp_check_edp(struct intel_dp *intel_dp)
363
{
363
{
364
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
364
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
365
	struct drm_i915_private *dev_priv = dev->dev_private;
365
	struct drm_i915_private *dev_priv = dev->dev_private;
366
 
366
 
367
	if (!is_edp(intel_dp))
367
	if (!is_edp(intel_dp))
368
		return;
368
		return;
369
 
369
 
370
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
370
	if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
371
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
371
		WARN(1, "eDP powered off while attempting aux channel communication.\n");
372
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
372
		DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
373
			      I915_READ(_pp_stat_reg(intel_dp)),
373
			      I915_READ(_pp_stat_reg(intel_dp)),
374
			      I915_READ(_pp_ctrl_reg(intel_dp)));
374
			      I915_READ(_pp_ctrl_reg(intel_dp)));
375
	}
375
	}
376
}
376
}
377
 
377
 
378
static uint32_t
378
static uint32_t
379
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
379
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
380
{
380
{
381
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
381
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
382
	struct drm_device *dev = intel_dig_port->base.base.dev;
382
	struct drm_device *dev = intel_dig_port->base.base.dev;
383
	struct drm_i915_private *dev_priv = dev->dev_private;
383
	struct drm_i915_private *dev_priv = dev->dev_private;
384
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
384
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
385
	uint32_t status;
385
	uint32_t status;
386
	bool done;
386
	bool done;
387
 
387
 
388
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
388
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
389
	if (has_aux_irq)
389
	if (has_aux_irq)
390
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
390
		done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
391
					  msecs_to_jiffies_timeout(10));
391
					  msecs_to_jiffies_timeout(10));
392
	else
392
	else
393
		done = wait_for_atomic(C, 10) == 0;
393
		done = wait_for_atomic(C, 10) == 0;
394
	if (!done)
394
	if (!done)
395
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
395
		DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
396
			  has_aux_irq);
396
			  has_aux_irq);
397
#undef C
397
#undef C
398
 
398
 
399
	return status;
399
	return status;
400
}
400
}
401
 
401
 
402
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
402
static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
403
{
403
{
404
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
404
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
405
	struct drm_device *dev = intel_dig_port->base.base.dev;
405
	struct drm_device *dev = intel_dig_port->base.base.dev;
406
 
406
 
407
	/*
407
	/*
408
	 * The clock divider is based off the hrawclk, and would like to run at
408
	 * The clock divider is based off the hrawclk, and would like to run at
409
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
409
	 * 2MHz.  So, take the hrawclk value and divide by 2 and use that
410
	 */
410
	 */
411
	return index ? 0 : intel_hrawclk(dev) / 2;
411
	return index ? 0 : intel_hrawclk(dev) / 2;
412
}
412
}
413
 
413
 
414
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
414
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
415
{
415
{
416
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
416
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
417
	struct drm_device *dev = intel_dig_port->base.base.dev;
417
	struct drm_device *dev = intel_dig_port->base.base.dev;
418
 
418
 
419
	if (index)
419
	if (index)
420
		return 0;
420
		return 0;
421
 
421
 
422
	if (intel_dig_port->port == PORT_A) {
422
	if (intel_dig_port->port == PORT_A) {
423
		if (IS_GEN6(dev) || IS_GEN7(dev))
423
		if (IS_GEN6(dev) || IS_GEN7(dev))
424
			return 200; /* SNB & IVB eDP input clock at 400Mhz */
424
			return 200; /* SNB & IVB eDP input clock at 400Mhz */
425
		else
425
		else
426
			return 225; /* eDP input clock at 450Mhz */
426
			return 225; /* eDP input clock at 450Mhz */
427
	} else {
427
	} else {
428
		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
428
		return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
429
	}
429
	}
430
}
430
}
431
 
431
 
432
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
432
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
433
{
433
{
434
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
434
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
435
	struct drm_device *dev = intel_dig_port->base.base.dev;
435
	struct drm_device *dev = intel_dig_port->base.base.dev;
436
	struct drm_i915_private *dev_priv = dev->dev_private;
436
	struct drm_i915_private *dev_priv = dev->dev_private;
437
 
437
 
438
	if (intel_dig_port->port == PORT_A) {
438
	if (intel_dig_port->port == PORT_A) {
439
		if (index)
439
		if (index)
440
			return 0;
440
			return 0;
441
		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
441
		return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
442
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
442
	} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
443
		/* Workaround for non-ULT HSW */
443
		/* Workaround for non-ULT HSW */
444
		switch (index) {
444
		switch (index) {
445
		case 0: return 63;
445
		case 0: return 63;
446
		case 1: return 72;
446
		case 1: return 72;
447
		default: return 0;
447
		default: return 0;
448
		}
448
		}
449
	} else  {
449
	} else  {
450
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
450
		return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
451
	}
451
	}
452
}
452
}
453
 
453
 
454
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
454
static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
455
{
455
{
456
	return index ? 0 : 100;
456
	return index ? 0 : 100;
457
}
457
}
458
 
458
 
459
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
459
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
460
				      bool has_aux_irq,
460
				      bool has_aux_irq,
461
				      int send_bytes,
461
				      int send_bytes,
462
				      uint32_t aux_clock_divider)
462
				      uint32_t aux_clock_divider)
463
{
463
{
464
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
464
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
465
	struct drm_device *dev = intel_dig_port->base.base.dev;
465
	struct drm_device *dev = intel_dig_port->base.base.dev;
466
	uint32_t precharge, timeout;
466
	uint32_t precharge, timeout;
467
 
467
 
468
	if (IS_GEN6(dev))
468
	if (IS_GEN6(dev))
469
		precharge = 3;
469
		precharge = 3;
470
	else
470
	else
471
		precharge = 5;
471
		precharge = 5;
472
 
472
 
473
	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
473
	if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
474
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
474
		timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
475
	else
475
	else
476
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
476
		timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
477
 
477
 
478
	return DP_AUX_CH_CTL_SEND_BUSY |
478
	return DP_AUX_CH_CTL_SEND_BUSY |
479
	       DP_AUX_CH_CTL_DONE |
479
	       DP_AUX_CH_CTL_DONE |
480
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
480
	       (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
481
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
481
	       DP_AUX_CH_CTL_TIME_OUT_ERROR |
482
	       timeout |
482
	       timeout |
483
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
483
	       DP_AUX_CH_CTL_RECEIVE_ERROR |
484
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
484
	       (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
485
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
485
	       (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
486
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
486
	       (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
487
}
487
}
488
 
488
 
489
static int
489
static int
490
intel_dp_aux_ch(struct intel_dp *intel_dp,
490
intel_dp_aux_ch(struct intel_dp *intel_dp,
491
		uint8_t *send, int send_bytes,
491
		uint8_t *send, int send_bytes,
492
		uint8_t *recv, int recv_size)
492
		uint8_t *recv, int recv_size)
493
{
493
{
494
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
494
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
495
	struct drm_device *dev = intel_dig_port->base.base.dev;
495
	struct drm_device *dev = intel_dig_port->base.base.dev;
496
	struct drm_i915_private *dev_priv = dev->dev_private;
496
	struct drm_i915_private *dev_priv = dev->dev_private;
497
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
497
	uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
498
	uint32_t ch_data = ch_ctl + 4;
498
	uint32_t ch_data = ch_ctl + 4;
499
	uint32_t aux_clock_divider;
499
	uint32_t aux_clock_divider;
500
	int i, ret, recv_bytes;
500
	int i, ret, recv_bytes;
501
	uint32_t status;
501
	uint32_t status;
502
	int try, clock = 0;
502
	int try, clock = 0;
503
	bool has_aux_irq = HAS_AUX_IRQ(dev);
503
	bool has_aux_irq = HAS_AUX_IRQ(dev);
504
	bool vdd;
504
	bool vdd;
505
 
505
 
506
	vdd = _edp_panel_vdd_on(intel_dp);
506
	vdd = _edp_panel_vdd_on(intel_dp);
507
 
507
 
508
	/* dp aux is extremely sensitive to irq latency, hence request the
508
	/* dp aux is extremely sensitive to irq latency, hence request the
509
	 * lowest possible wakeup latency and so prevent the cpu from going into
509
	 * lowest possible wakeup latency and so prevent the cpu from going into
510
	 * deep sleep states.
510
	 * deep sleep states.
511
	 */
511
	 */
512
 
512
 
513
	intel_dp_check_edp(intel_dp);
513
	intel_dp_check_edp(intel_dp);
514
 
514
 
515
	intel_aux_display_runtime_get(dev_priv);
515
	intel_aux_display_runtime_get(dev_priv);
516
 
516
 
517
	/* Try to wait for any previous AUX channel activity */
517
	/* Try to wait for any previous AUX channel activity */
518
	for (try = 0; try < 3; try++) {
518
	for (try = 0; try < 3; try++) {
519
		status = I915_READ_NOTRACE(ch_ctl);
519
		status = I915_READ_NOTRACE(ch_ctl);
520
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
520
		if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
521
			break;
521
			break;
522
		msleep(1);
522
		msleep(1);
523
	}
523
	}
524
 
524
 
525
	if (try == 3) {
525
	if (try == 3) {
526
		WARN(1, "dp_aux_ch not started status 0x%08x\n",
526
		WARN(1, "dp_aux_ch not started status 0x%08x\n",
527
		     I915_READ(ch_ctl));
527
		     I915_READ(ch_ctl));
528
		ret = -EBUSY;
528
		ret = -EBUSY;
529
		goto out;
529
		goto out;
530
	}
530
	}
531
 
531
 
532
	/* Only 5 data registers! */
532
	/* Only 5 data registers! */
533
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
533
	if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
534
		ret = -E2BIG;
534
		ret = -E2BIG;
535
		goto out;
535
		goto out;
536
	}
536
	}
537
 
537
 
538
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
538
	while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
539
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
539
		u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
540
							  has_aux_irq,
540
							  has_aux_irq,
541
							  send_bytes,
541
							  send_bytes,
542
							  aux_clock_divider);
542
							  aux_clock_divider);
543
 
543
 
544
	/* Must try at least 3 times according to DP spec */
544
	/* Must try at least 3 times according to DP spec */
545
	for (try = 0; try < 5; try++) {
545
	for (try = 0; try < 5; try++) {
546
		/* Load the send data into the aux channel data registers */
546
		/* Load the send data into the aux channel data registers */
547
		for (i = 0; i < send_bytes; i += 4)
547
		for (i = 0; i < send_bytes; i += 4)
548
			I915_WRITE(ch_data + i,
548
			I915_WRITE(ch_data + i,
549
				   pack_aux(send + i, send_bytes - i));
549
				   pack_aux(send + i, send_bytes - i));
550
 
550
 
551
		/* Send the command and wait for it to complete */
551
		/* Send the command and wait for it to complete */
552
			I915_WRITE(ch_ctl, send_ctl);
552
			I915_WRITE(ch_ctl, send_ctl);
553
 
553
 
554
		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
554
		status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
555
 
555
 
556
		/* Clear done status and any errors */
556
		/* Clear done status and any errors */
557
		I915_WRITE(ch_ctl,
557
		I915_WRITE(ch_ctl,
558
			   status |
558
			   status |
559
			   DP_AUX_CH_CTL_DONE |
559
			   DP_AUX_CH_CTL_DONE |
560
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
560
			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
561
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
561
			   DP_AUX_CH_CTL_RECEIVE_ERROR);
562
 
562
 
563
		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
563
		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
564
			      DP_AUX_CH_CTL_RECEIVE_ERROR))
564
			      DP_AUX_CH_CTL_RECEIVE_ERROR))
565
			continue;
565
			continue;
566
		if (status & DP_AUX_CH_CTL_DONE)
566
		if (status & DP_AUX_CH_CTL_DONE)
567
			break;
567
			break;
568
	}
568
	}
569
		if (status & DP_AUX_CH_CTL_DONE)
569
		if (status & DP_AUX_CH_CTL_DONE)
570
			break;
570
			break;
571
	}
571
	}
572
 
572
 
573
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
573
	if ((status & DP_AUX_CH_CTL_DONE) == 0) {
574
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
574
		DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
575
		ret = -EBUSY;
575
		ret = -EBUSY;
576
		goto out;
576
		goto out;
577
	}
577
	}
578
 
578
 
579
	/* Check for timeout or receive error.
579
	/* Check for timeout or receive error.
580
	 * Timeouts occur when the sink is not connected
580
	 * Timeouts occur when the sink is not connected
581
	 */
581
	 */
582
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
582
	if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
583
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
583
		DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
584
		ret = -EIO;
584
		ret = -EIO;
585
		goto out;
585
		goto out;
586
	}
586
	}
587
 
587
 
588
	/* Timeouts occur when the device isn't connected, so they're
588
	/* Timeouts occur when the device isn't connected, so they're
589
	 * "normal" -- don't fill the kernel log with these */
589
	 * "normal" -- don't fill the kernel log with these */
590
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
590
	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
591
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
591
		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
592
		ret = -ETIMEDOUT;
592
		ret = -ETIMEDOUT;
593
		goto out;
593
		goto out;
594
	}
594
	}
595
 
595
 
596
	/* Unload any bytes sent back from the other side */
596
	/* Unload any bytes sent back from the other side */
597
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
597
	recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
598
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
598
		      DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
599
	if (recv_bytes > recv_size)
599
	if (recv_bytes > recv_size)
600
		recv_bytes = recv_size;
600
		recv_bytes = recv_size;
601
 
601
 
602
	for (i = 0; i < recv_bytes; i += 4)
602
	for (i = 0; i < recv_bytes; i += 4)
603
		unpack_aux(I915_READ(ch_data + i),
603
		unpack_aux(I915_READ(ch_data + i),
604
			   recv + i, recv_bytes - i);
604
			   recv + i, recv_bytes - i);
605
 
605
 
606
	ret = recv_bytes;
606
	ret = recv_bytes;
607
out:
607
out:
608
//	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
608
//	pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
609
	intel_aux_display_runtime_put(dev_priv);
609
	intel_aux_display_runtime_put(dev_priv);
610
 
610
 
611
	if (vdd)
611
	if (vdd)
612
		edp_panel_vdd_off(intel_dp, false);
612
		edp_panel_vdd_off(intel_dp, false);
613
 
613
 
614
	return ret;
614
	return ret;
615
}
615
}
616
 
616
 
617
#define BARE_ADDRESS_SIZE	3
617
#define BARE_ADDRESS_SIZE	3
618
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
618
#define HEADER_SIZE		(BARE_ADDRESS_SIZE + 1)
619
static ssize_t
619
static ssize_t
620
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
620
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
621
{
621
{
622
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
622
	struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
623
	uint8_t txbuf[20], rxbuf[20];
623
	uint8_t txbuf[20], rxbuf[20];
624
	size_t txsize, rxsize;
624
	size_t txsize, rxsize;
625
	int ret;
625
	int ret;
626
 
626
 
627
	txbuf[0] = msg->request << 4;
627
	txbuf[0] = msg->request << 4;
628
	txbuf[1] = msg->address >> 8;
628
	txbuf[1] = msg->address >> 8;
629
	txbuf[2] = msg->address & 0xff;
629
	txbuf[2] = msg->address & 0xff;
630
	txbuf[3] = msg->size - 1;
630
	txbuf[3] = msg->size - 1;
631
 
631
 
632
	switch (msg->request & ~DP_AUX_I2C_MOT) {
632
	switch (msg->request & ~DP_AUX_I2C_MOT) {
633
	case DP_AUX_NATIVE_WRITE:
633
	case DP_AUX_NATIVE_WRITE:
634
	case DP_AUX_I2C_WRITE:
634
	case DP_AUX_I2C_WRITE:
635
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
635
		txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
636
		rxsize = 1;
636
		rxsize = 1;
637
 
637
 
638
		if (WARN_ON(txsize > 20))
638
		if (WARN_ON(txsize > 20))
639
		return -E2BIG;
639
		return -E2BIG;
640
 
640
 
641
		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
641
		memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
642
 
642
 
643
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
643
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
644
		if (ret > 0) {
644
		if (ret > 0) {
645
			msg->reply = rxbuf[0] >> 4;
645
			msg->reply = rxbuf[0] >> 4;
646
 
646
 
647
			/* Return payload size. */
647
			/* Return payload size. */
648
			ret = msg->size;
648
			ret = msg->size;
649
		}
649
		}
650
			break;
650
			break;
651
 
651
 
652
	case DP_AUX_NATIVE_READ:
652
	case DP_AUX_NATIVE_READ:
653
	case DP_AUX_I2C_READ:
653
	case DP_AUX_I2C_READ:
654
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
654
		txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
655
		rxsize = msg->size + 1;
655
		rxsize = msg->size + 1;
656
 
656
 
657
		if (WARN_ON(rxsize > 20))
657
		if (WARN_ON(rxsize > 20))
658
		return -E2BIG;
658
		return -E2BIG;
659
 
659
 
660
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
660
		ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
661
		if (ret > 0) {
661
		if (ret > 0) {
662
			msg->reply = rxbuf[0] >> 4;
662
			msg->reply = rxbuf[0] >> 4;
663
			/*
663
			/*
664
			 * Assume happy day, and copy the data. The caller is
664
			 * Assume happy day, and copy the data. The caller is
665
			 * expected to check msg->reply before touching it.
665
			 * expected to check msg->reply before touching it.
666
			 *
666
			 *
667
			 * Return payload size.
667
			 * Return payload size.
668
			 */
668
			 */
669
			ret--;
669
			ret--;
670
			memcpy(msg->buffer, rxbuf + 1, ret);
670
			memcpy(msg->buffer, rxbuf + 1, ret);
671
		}
671
		}
672
		break;
672
		break;
673
 
673
 
674
	default:
674
	default:
675
		ret = -EINVAL;
675
		ret = -EINVAL;
676
		break;
676
		break;
677
	}
677
	}
678
 
678
 
679
			return ret;
679
			return ret;
680
}
680
}
681
 
681
 
682
static void
682
static void
683
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
683
intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
684
{
684
{
685
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
685
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
686
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
687
	enum port port = intel_dig_port->port;
687
	enum port port = intel_dig_port->port;
688
	const char *name = NULL;
688
	const char *name = NULL;
689
	int ret;
689
	int ret;
690
 
690
 
691
	switch (port) {
691
	switch (port) {
692
	case PORT_A:
692
	case PORT_A:
693
		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
693
		intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
694
		name = "DPDDC-A";
694
		name = "DPDDC-A";
695
		break;
695
		break;
696
	case PORT_B:
696
	case PORT_B:
697
		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
697
		intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
698
		name = "DPDDC-B";
698
		name = "DPDDC-B";
699
		break;
699
		break;
700
	case PORT_C:
700
	case PORT_C:
701
		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
701
		intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
702
		name = "DPDDC-C";
702
		name = "DPDDC-C";
703
		break;
703
		break;
704
	case PORT_D:
704
	case PORT_D:
705
		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
705
		intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
706
		name = "DPDDC-D";
706
		name = "DPDDC-D";
707
		break;
707
		break;
708
	default:
708
	default:
709
		BUG();
709
		BUG();
710
	}
710
	}
711
 
711
 
712
	if (!HAS_DDI(dev))
712
	if (!HAS_DDI(dev))
713
		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
713
		intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
714
 
714
 
715
	intel_dp->aux.name = name;
715
	intel_dp->aux.name = name;
716
	intel_dp->aux.dev = dev->dev;
716
	intel_dp->aux.dev = dev->dev;
717
	intel_dp->aux.transfer = intel_dp_aux_transfer;
717
	intel_dp->aux.transfer = intel_dp_aux_transfer;
718
 
718
 
719
 
719
 
720
	ret = drm_dp_aux_register(&intel_dp->aux);
720
	ret = drm_dp_aux_register(&intel_dp->aux);
721
		if (ret < 0) {
721
		if (ret < 0) {
722
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
722
		DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
723
			  name, ret);
723
			  name, ret);
724
		return;
724
		return;
725
	}
725
	}
726
}
726
}
727
 
727
 
728
static void
728
static void
729
intel_dp_connector_unregister(struct intel_connector *intel_connector)
729
intel_dp_connector_unregister(struct intel_connector *intel_connector)
730
{
730
{
731
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
731
	struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
732
 
732
 
733
	intel_connector_unregister(intel_connector);
733
	intel_connector_unregister(intel_connector);
734
}
734
}
735
 
735
 
736
static void
736
static void
737
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
737
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
738
{
738
{
739
	switch (link_bw) {
739
	switch (link_bw) {
740
	case DP_LINK_BW_1_62:
740
	case DP_LINK_BW_1_62:
741
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
741
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
742
		break;
742
		break;
743
	case DP_LINK_BW_2_7:
743
	case DP_LINK_BW_2_7:
744
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
744
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
745
		break;
745
		break;
746
	case DP_LINK_BW_5_4:
746
	case DP_LINK_BW_5_4:
747
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
747
		pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
748
		break;
748
		break;
749
	}
749
	}
750
}
750
}
751
 
751
 
752
static void
752
static void
753
intel_dp_set_clock(struct intel_encoder *encoder,
753
intel_dp_set_clock(struct intel_encoder *encoder,
754
		   struct intel_crtc_config *pipe_config, int link_bw)
754
		   struct intel_crtc_config *pipe_config, int link_bw)
755
{
755
{
756
	struct drm_device *dev = encoder->base.dev;
756
	struct drm_device *dev = encoder->base.dev;
757
	const struct dp_link_dpll *divisor = NULL;
757
	const struct dp_link_dpll *divisor = NULL;
758
	int i, count = 0;
758
	int i, count = 0;
759
 
759
 
760
	if (IS_G4X(dev)) {
760
	if (IS_G4X(dev)) {
761
		divisor = gen4_dpll;
761
		divisor = gen4_dpll;
762
		count = ARRAY_SIZE(gen4_dpll);
762
		count = ARRAY_SIZE(gen4_dpll);
763
	} else if (HAS_PCH_SPLIT(dev)) {
763
	} else if (HAS_PCH_SPLIT(dev)) {
764
		divisor = pch_dpll;
764
		divisor = pch_dpll;
765
		count = ARRAY_SIZE(pch_dpll);
765
		count = ARRAY_SIZE(pch_dpll);
766
	} else if (IS_CHERRYVIEW(dev)) {
766
	} else if (IS_CHERRYVIEW(dev)) {
767
		divisor = chv_dpll;
767
		divisor = chv_dpll;
768
		count = ARRAY_SIZE(chv_dpll);
768
		count = ARRAY_SIZE(chv_dpll);
769
	} else if (IS_VALLEYVIEW(dev)) {
769
	} else if (IS_VALLEYVIEW(dev)) {
770
		divisor = vlv_dpll;
770
		divisor = vlv_dpll;
771
		count = ARRAY_SIZE(vlv_dpll);
771
		count = ARRAY_SIZE(vlv_dpll);
772
	}
772
	}
773
 
773
 
774
	if (divisor && count) {
774
	if (divisor && count) {
775
		for (i = 0; i < count; i++) {
775
		for (i = 0; i < count; i++) {
776
			if (link_bw == divisor[i].link_bw) {
776
			if (link_bw == divisor[i].link_bw) {
777
				pipe_config->dpll = divisor[i].dpll;
777
				pipe_config->dpll = divisor[i].dpll;
778
				pipe_config->clock_set = true;
778
				pipe_config->clock_set = true;
779
				break;
779
				break;
780
			}
780
			}
781
		}
781
		}
782
	}
782
	}
783
}
783
}
784
 
784
 
785
static void
785
static void
786
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
786
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
787
{
787
{
788
	struct drm_device *dev = crtc->base.dev;
788
	struct drm_device *dev = crtc->base.dev;
789
	struct drm_i915_private *dev_priv = dev->dev_private;
789
	struct drm_i915_private *dev_priv = dev->dev_private;
790
	enum transcoder transcoder = crtc->config.cpu_transcoder;
790
	enum transcoder transcoder = crtc->config.cpu_transcoder;
791
 
791
 
792
	I915_WRITE(PIPE_DATA_M2(transcoder),
792
	I915_WRITE(PIPE_DATA_M2(transcoder),
793
		TU_SIZE(m_n->tu) | m_n->gmch_m);
793
		TU_SIZE(m_n->tu) | m_n->gmch_m);
794
	I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
794
	I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
795
	I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
795
	I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
796
	I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
796
	I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
797
}
797
}
798
 
798
 
799
bool
799
bool
800
intel_dp_compute_config(struct intel_encoder *encoder,
800
intel_dp_compute_config(struct intel_encoder *encoder,
801
			struct intel_crtc_config *pipe_config)
801
			struct intel_crtc_config *pipe_config)
802
{
802
{
803
	struct drm_device *dev = encoder->base.dev;
803
	struct drm_device *dev = encoder->base.dev;
804
	struct drm_i915_private *dev_priv = dev->dev_private;
804
	struct drm_i915_private *dev_priv = dev->dev_private;
805
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
805
	struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
806
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
806
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
807
	enum port port = dp_to_dig_port(intel_dp)->port;
807
	enum port port = dp_to_dig_port(intel_dp)->port;
808
	struct intel_crtc *intel_crtc = encoder->new_crtc;
808
	struct intel_crtc *intel_crtc = encoder->new_crtc;
809
	struct intel_connector *intel_connector = intel_dp->attached_connector;
809
	struct intel_connector *intel_connector = intel_dp->attached_connector;
810
	int lane_count, clock;
810
	int lane_count, clock;
811
	int min_lane_count = 1;
811
	int min_lane_count = 1;
812
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
812
	int max_lane_count = intel_dp_max_lane_count(intel_dp);
813
	/* Conveniently, the link BW constants become indices with a shift...*/
813
	/* Conveniently, the link BW constants become indices with a shift...*/
814
	int min_clock = 0;
814
	int min_clock = 0;
815
	int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
815
	int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
816
	int bpp, mode_rate;
816
	int bpp, mode_rate;
817
	static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
817
	static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
818
	int link_avail, link_clock;
818
	int link_avail, link_clock;
819
 
819
 
820
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
820
	if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
821
		pipe_config->has_pch_encoder = true;
821
		pipe_config->has_pch_encoder = true;
822
 
822
 
823
	pipe_config->has_dp_encoder = true;
823
	pipe_config->has_dp_encoder = true;
824
	pipe_config->has_audio = intel_dp->has_audio;
824
	pipe_config->has_audio = intel_dp->has_audio;
825
 
825
 
826
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
826
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
827
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
827
		intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
828
				       adjusted_mode);
828
				       adjusted_mode);
829
		if (!HAS_PCH_SPLIT(dev))
829
		if (!HAS_PCH_SPLIT(dev))
830
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
830
			intel_gmch_panel_fitting(intel_crtc, pipe_config,
831
						 intel_connector->panel.fitting_mode);
831
						 intel_connector->panel.fitting_mode);
832
		else
832
		else
833
			intel_pch_panel_fitting(intel_crtc, pipe_config,
833
			intel_pch_panel_fitting(intel_crtc, pipe_config,
834
						intel_connector->panel.fitting_mode);
834
						intel_connector->panel.fitting_mode);
835
	}
835
	}
836
 
836
 
837
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
837
	if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
838
		return false;
838
		return false;
839
 
839
 
840
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
840
	DRM_DEBUG_KMS("DP link computation with max lane count %i "
841
		      "max bw %02x pixel clock %iKHz\n",
841
		      "max bw %02x pixel clock %iKHz\n",
842
		      max_lane_count, bws[max_clock],
842
		      max_lane_count, bws[max_clock],
843
		      adjusted_mode->crtc_clock);
843
		      adjusted_mode->crtc_clock);
844
 
844
 
845
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
845
	/* Walk through all bpp values. Luckily they're all nicely spaced with 2
846
	 * bpc in between. */
846
	 * bpc in between. */
847
	bpp = pipe_config->pipe_bpp;
847
	bpp = pipe_config->pipe_bpp;
848
	if (is_edp(intel_dp)) {
848
	if (is_edp(intel_dp)) {
849
		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
849
		if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
850
		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
850
		DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
851
			      dev_priv->vbt.edp_bpp);
851
			      dev_priv->vbt.edp_bpp);
852
		bpp = dev_priv->vbt.edp_bpp;
852
		bpp = dev_priv->vbt.edp_bpp;
853
	}
853
	}
854
 
854
 
855
		if (IS_BROADWELL(dev)) {
855
		if (IS_BROADWELL(dev)) {
856
			/* Yes, it's an ugly hack. */
856
			/* Yes, it's an ugly hack. */
857
			min_lane_count = max_lane_count;
857
			min_lane_count = max_lane_count;
858
			DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
858
			DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
859
				      min_lane_count);
859
				      min_lane_count);
860
		} else if (dev_priv->vbt.edp_lanes) {
860
		} else if (dev_priv->vbt.edp_lanes) {
861
			min_lane_count = min(dev_priv->vbt.edp_lanes,
861
			min_lane_count = min(dev_priv->vbt.edp_lanes,
862
					     max_lane_count);
862
					     max_lane_count);
863
			DRM_DEBUG_KMS("using min %u lanes per VBT\n",
863
			DRM_DEBUG_KMS("using min %u lanes per VBT\n",
864
				      min_lane_count);
864
				      min_lane_count);
865
		}
865
		}
866
 
866
 
867
		if (dev_priv->vbt.edp_rate) {
867
		if (dev_priv->vbt.edp_rate) {
868
			min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
868
			min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
869
			DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
869
			DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
870
				      bws[min_clock]);
870
				      bws[min_clock]);
871
		}
871
		}
872
	}
872
	}
873
 
873
 
874
	for (; bpp >= 6*3; bpp -= 2*3) {
874
	for (; bpp >= 6*3; bpp -= 2*3) {
875
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
875
		mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
876
						   bpp);
876
						   bpp);
877
 
877
 
878
		for (clock = min_clock; clock <= max_clock; clock++) {
878
		for (clock = min_clock; clock <= max_clock; clock++) {
879
		for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
879
		for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
880
				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
880
				link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
881
				link_avail = intel_dp_max_data_rate(link_clock,
881
				link_avail = intel_dp_max_data_rate(link_clock,
882
								    lane_count);
882
								    lane_count);
883
 
883
 
884
				if (mode_rate <= link_avail) {
884
				if (mode_rate <= link_avail) {
885
					goto found;
885
					goto found;
886
				}
886
				}
887
			}
887
			}
888
		}
888
		}
889
	}
889
	}
890
 
890
 
891
		return false;
891
		return false;
892
 
892
 
893
found:
893
found:
894
	if (intel_dp->color_range_auto) {
894
	if (intel_dp->color_range_auto) {
895
		/*
895
		/*
896
		 * See:
896
		 * See:
897
		 * CEA-861-E - 5.1 Default Encoding Parameters
897
		 * CEA-861-E - 5.1 Default Encoding Parameters
898
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
898
		 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
899
		 */
899
		 */
900
		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
900
		if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
901
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
901
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
902
		else
902
		else
903
			intel_dp->color_range = 0;
903
			intel_dp->color_range = 0;
904
	}
904
	}
905
 
905
 
906
	if (intel_dp->color_range)
906
	if (intel_dp->color_range)
907
		pipe_config->limited_color_range = true;
907
		pipe_config->limited_color_range = true;
908
 
908
 
909
				intel_dp->link_bw = bws[clock];
909
				intel_dp->link_bw = bws[clock];
910
				intel_dp->lane_count = lane_count;
910
				intel_dp->lane_count = lane_count;
911
	pipe_config->pipe_bpp = bpp;
911
	pipe_config->pipe_bpp = bpp;
912
	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
912
	pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
913
 
913
 
914
	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
914
	DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
915
				       intel_dp->link_bw, intel_dp->lane_count,
915
				       intel_dp->link_bw, intel_dp->lane_count,
916
		      pipe_config->port_clock, bpp);
916
		      pipe_config->port_clock, bpp);
917
				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
917
				DRM_DEBUG_KMS("DP link bw required %i available %i\n",
918
					      mode_rate, link_avail);
918
					      mode_rate, link_avail);
919
 
919
 
920
	intel_link_compute_m_n(bpp, lane_count,
920
	intel_link_compute_m_n(bpp, lane_count,
921
			       adjusted_mode->crtc_clock,
921
			       adjusted_mode->crtc_clock,
922
			       pipe_config->port_clock,
922
			       pipe_config->port_clock,
923
			       &pipe_config->dp_m_n);
923
			       &pipe_config->dp_m_n);
924
 
924
 
925
	if (intel_connector->panel.downclock_mode != NULL &&
925
	if (intel_connector->panel.downclock_mode != NULL &&
926
		intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
926
		intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
927
			intel_link_compute_m_n(bpp, lane_count,
927
			intel_link_compute_m_n(bpp, lane_count,
928
				intel_connector->panel.downclock_mode->clock,
928
				intel_connector->panel.downclock_mode->clock,
929
				pipe_config->port_clock,
929
				pipe_config->port_clock,
930
				&pipe_config->dp_m2_n2);
930
				&pipe_config->dp_m2_n2);
931
	}
931
	}
932
 
932
 
933
	if (HAS_DDI(dev))
933
	if (HAS_DDI(dev))
934
		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
934
		hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
935
	else
935
	else
936
	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
936
	intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
937
 
937
 
938
	return true;
938
	return true;
939
}
939
}
940
 
940
 
941
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
941
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
942
{
942
{
943
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
943
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
944
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
944
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
945
	struct drm_device *dev = crtc->base.dev;
945
	struct drm_device *dev = crtc->base.dev;
946
	struct drm_i915_private *dev_priv = dev->dev_private;
946
	struct drm_i915_private *dev_priv = dev->dev_private;
947
	u32 dpa_ctl;
947
	u32 dpa_ctl;
948
 
948
 
949
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
949
	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
950
	dpa_ctl = I915_READ(DP_A);
950
	dpa_ctl = I915_READ(DP_A);
951
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
951
	dpa_ctl &= ~DP_PLL_FREQ_MASK;
952
 
952
 
953
	if (crtc->config.port_clock == 162000) {
953
	if (crtc->config.port_clock == 162000) {
954
		/* For a long time we've carried around a ILK-DevA w/a for the
954
		/* For a long time we've carried around a ILK-DevA w/a for the
955
		 * 160MHz clock. If we're really unlucky, it's still required.
955
		 * 160MHz clock. If we're really unlucky, it's still required.
956
		 */
956
		 */
957
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
957
		DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
958
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
958
		dpa_ctl |= DP_PLL_FREQ_160MHZ;
959
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
959
		intel_dp->DP |= DP_PLL_FREQ_160MHZ;
960
	} else {
960
	} else {
961
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
961
		dpa_ctl |= DP_PLL_FREQ_270MHZ;
962
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
962
		intel_dp->DP |= DP_PLL_FREQ_270MHZ;
963
	}
963
	}
964
 
964
 
965
	I915_WRITE(DP_A, dpa_ctl);
965
	I915_WRITE(DP_A, dpa_ctl);
966
 
966
 
967
	POSTING_READ(DP_A);
967
	POSTING_READ(DP_A);
968
	udelay(500);
968
	udelay(500);
969
}
969
}
970
 
970
 
971
static void intel_dp_prepare(struct intel_encoder *encoder)
971
static void intel_dp_prepare(struct intel_encoder *encoder)
972
{
972
{
973
	struct drm_device *dev = encoder->base.dev;
973
	struct drm_device *dev = encoder->base.dev;
974
	struct drm_i915_private *dev_priv = dev->dev_private;
974
	struct drm_i915_private *dev_priv = dev->dev_private;
975
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
975
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
976
	enum port port = dp_to_dig_port(intel_dp)->port;
976
	enum port port = dp_to_dig_port(intel_dp)->port;
977
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
977
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
978
	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
978
	struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
979
 
979
 
980
	/*
980
	/*
981
	 * There are four kinds of DP registers:
981
	 * There are four kinds of DP registers:
982
	 *
982
	 *
983
	 * 	IBX PCH
983
	 * 	IBX PCH
984
	 * 	SNB CPU
984
	 * 	SNB CPU
985
	 *	IVB CPU
985
	 *	IVB CPU
986
	 * 	CPT PCH
986
	 * 	CPT PCH
987
	 *
987
	 *
988
	 * IBX PCH and CPU are the same for almost everything,
988
	 * IBX PCH and CPU are the same for almost everything,
989
	 * except that the CPU DP PLL is configured in this
989
	 * except that the CPU DP PLL is configured in this
990
	 * register
990
	 * register
991
	 *
991
	 *
992
	 * CPT PCH is quite different, having many bits moved
992
	 * CPT PCH is quite different, having many bits moved
993
	 * to the TRANS_DP_CTL register instead. That
993
	 * to the TRANS_DP_CTL register instead. That
994
	 * configuration happens (oddly) in ironlake_pch_enable
994
	 * configuration happens (oddly) in ironlake_pch_enable
995
	 */
995
	 */
996
 
996
 
997
	/* Preserve the BIOS-computed detected bit. This is
997
	/* Preserve the BIOS-computed detected bit. This is
998
	 * supposed to be read-only.
998
	 * supposed to be read-only.
999
	 */
999
	 */
1000
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1000
	intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1001
 
1001
 
1002
	/* Handle DP bits in common between all three register formats */
1002
	/* Handle DP bits in common between all three register formats */
1003
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1003
	intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1004
	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1004
	intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1005
 
1005
 
1006
	if (crtc->config.has_audio) {
1006
	if (crtc->config.has_audio) {
1007
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1007
		DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1008
				 pipe_name(crtc->pipe));
1008
				 pipe_name(crtc->pipe));
1009
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1009
		intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1010
		intel_write_eld(&encoder->base, adjusted_mode);
1010
		intel_write_eld(&encoder->base, adjusted_mode);
1011
	}
1011
	}
1012
 
1012
 
1013
	/* Split out the IBX/CPU vs CPT settings */
1013
	/* Split out the IBX/CPU vs CPT settings */
1014
 
1014
 
1015
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1015
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1016
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1016
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1017
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1017
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1018
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1018
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1019
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1019
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1020
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1020
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1021
 
1021
 
1022
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1022
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1023
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1023
			intel_dp->DP |= DP_ENHANCED_FRAMING;
1024
 
1024
 
1025
		intel_dp->DP |= crtc->pipe << 29;
1025
		intel_dp->DP |= crtc->pipe << 29;
1026
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1026
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1027
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1027
		if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
1028
		intel_dp->DP |= intel_dp->color_range;
1028
		intel_dp->DP |= intel_dp->color_range;
1029
 
1029
 
1030
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1030
		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1031
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1031
			intel_dp->DP |= DP_SYNC_HS_HIGH;
1032
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1032
		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1033
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1033
			intel_dp->DP |= DP_SYNC_VS_HIGH;
1034
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1034
		intel_dp->DP |= DP_LINK_TRAIN_OFF;
1035
 
1035
 
1036
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1036
		if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1037
		intel_dp->DP |= DP_ENHANCED_FRAMING;
1037
		intel_dp->DP |= DP_ENHANCED_FRAMING;
1038
 
1038
 
1039
		if (!IS_CHERRYVIEW(dev)) {
1039
		if (!IS_CHERRYVIEW(dev)) {
1040
		if (crtc->pipe == 1)
1040
		if (crtc->pipe == 1)
1041
		intel_dp->DP |= DP_PIPEB_SELECT;
1041
		intel_dp->DP |= DP_PIPEB_SELECT;
1042
	} else {
1042
	} else {
1043
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1043
			intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1044
		}
1044
		}
1045
	} else {
1045
	} else {
1046
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1046
		intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1047
	}
1047
	}
1048
}
1048
}
1049
 
1049
 
1050
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1050
#define IDLE_ON_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
1051
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1051
#define IDLE_ON_VALUE   	(PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
1052
 
1052
 
1053
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1053
#define IDLE_OFF_MASK		(PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
1054
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1054
#define IDLE_OFF_VALUE		(0     | PP_SEQUENCE_NONE | 0                     | 0)
1055
 
1055
 
1056
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1056
#define IDLE_CYCLE_MASK		(PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1057
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1057
#define IDLE_CYCLE_VALUE	(0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
1058
 
1058
 
1059
static void wait_panel_status(struct intel_dp *intel_dp,
1059
static void wait_panel_status(struct intel_dp *intel_dp,
1060
				       u32 mask,
1060
				       u32 mask,
1061
				       u32 value)
1061
				       u32 value)
1062
{
1062
{
1063
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1063
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1064
	struct drm_i915_private *dev_priv = dev->dev_private;
1064
	struct drm_i915_private *dev_priv = dev->dev_private;
1065
	u32 pp_stat_reg, pp_ctrl_reg;
1065
	u32 pp_stat_reg, pp_ctrl_reg;
1066
 
1066
 
1067
	pp_stat_reg = _pp_stat_reg(intel_dp);
1067
	pp_stat_reg = _pp_stat_reg(intel_dp);
1068
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1068
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1069
 
1069
 
1070
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1070
	DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1071
		      mask, value,
1071
		      mask, value,
1072
			I915_READ(pp_stat_reg),
1072
			I915_READ(pp_stat_reg),
1073
			I915_READ(pp_ctrl_reg));
1073
			I915_READ(pp_ctrl_reg));
1074
 
1074
 
1075
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1075
	if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1076
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1076
		DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1077
				I915_READ(pp_stat_reg),
1077
				I915_READ(pp_stat_reg),
1078
				I915_READ(pp_ctrl_reg));
1078
				I915_READ(pp_ctrl_reg));
1079
	}
1079
	}
1080
 
1080
 
1081
	DRM_DEBUG_KMS("Wait complete\n");
1081
	DRM_DEBUG_KMS("Wait complete\n");
1082
}
1082
}
1083
 
1083
 
1084
static void wait_panel_on(struct intel_dp *intel_dp)
1084
static void wait_panel_on(struct intel_dp *intel_dp)
1085
{
1085
{
1086
	DRM_DEBUG_KMS("Wait for panel power on\n");
1086
	DRM_DEBUG_KMS("Wait for panel power on\n");
1087
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1087
	wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1088
}
1088
}
1089
 
1089
 
1090
static void wait_panel_off(struct intel_dp *intel_dp)
1090
static void wait_panel_off(struct intel_dp *intel_dp)
1091
{
1091
{
1092
	DRM_DEBUG_KMS("Wait for panel power off time\n");
1092
	DRM_DEBUG_KMS("Wait for panel power off time\n");
1093
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1093
	wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1094
}
1094
}
1095
 
1095
 
1096
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1096
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1097
{
1097
{
1098
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1098
	DRM_DEBUG_KMS("Wait for panel power cycle\n");
1099
 
1099
 
1100
	/* When we disable the VDD override bit last we have to do the manual
1100
	/* When we disable the VDD override bit last we have to do the manual
1101
	 * wait. */
1101
	 * wait. */
1102
	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1102
	wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1103
				       intel_dp->panel_power_cycle_delay);
1103
				       intel_dp->panel_power_cycle_delay);
1104
 
1104
 
1105
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1105
	wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1106
}
1106
}
1107
 
1107
 
1108
static void wait_backlight_on(struct intel_dp *intel_dp)
1108
static void wait_backlight_on(struct intel_dp *intel_dp)
1109
{
1109
{
1110
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1110
	wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1111
				       intel_dp->backlight_on_delay);
1111
				       intel_dp->backlight_on_delay);
1112
}
1112
}
1113
 
1113
 
1114
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1114
static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1115
{
1115
{
1116
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1116
	wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1117
				       intel_dp->backlight_off_delay);
1117
				       intel_dp->backlight_off_delay);
1118
}
1118
}
1119
 
1119
 
1120
/* Read the current pp_control value, unlocking the register if it
1120
/* Read the current pp_control value, unlocking the register if it
1121
 * is locked
1121
 * is locked
1122
 */
1122
 */
1123
 
1123
 
1124
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1124
static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1125
{
1125
{
1126
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1126
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1127
	struct drm_i915_private *dev_priv = dev->dev_private;
1127
	struct drm_i915_private *dev_priv = dev->dev_private;
1128
	u32 control;
1128
	u32 control;
1129
 
1129
 
1130
	control = I915_READ(_pp_ctrl_reg(intel_dp));
1130
	control = I915_READ(_pp_ctrl_reg(intel_dp));
1131
	control &= ~PANEL_UNLOCK_MASK;
1131
	control &= ~PANEL_UNLOCK_MASK;
1132
	control |= PANEL_UNLOCK_REGS;
1132
	control |= PANEL_UNLOCK_REGS;
1133
	return control;
1133
	return control;
1134
}
1134
}
1135
 
1135
 
1136
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1136
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1137
{
1137
{
1138
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1138
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1139
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1139
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1140
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1140
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1141
	struct drm_i915_private *dev_priv = dev->dev_private;
1141
	struct drm_i915_private *dev_priv = dev->dev_private;
1142
	enum intel_display_power_domain power_domain;
1142
	enum intel_display_power_domain power_domain;
1143
	u32 pp;
1143
	u32 pp;
1144
	u32 pp_stat_reg, pp_ctrl_reg;
1144
	u32 pp_stat_reg, pp_ctrl_reg;
1145
	bool need_to_disable = !intel_dp->want_panel_vdd;
1145
	bool need_to_disable = !intel_dp->want_panel_vdd;
1146
 
1146
 
1147
	if (!is_edp(intel_dp))
1147
	if (!is_edp(intel_dp))
1148
		return false;
1148
		return false;
1149
 
1149
 
1150
	intel_dp->want_panel_vdd = true;
1150
	intel_dp->want_panel_vdd = true;
1151
 
1151
 
1152
	if (edp_have_panel_vdd(intel_dp))
1152
	if (edp_have_panel_vdd(intel_dp))
1153
		return need_to_disable;
1153
		return need_to_disable;
1154
 
1154
 
1155
	power_domain = intel_display_port_power_domain(intel_encoder);
1155
	power_domain = intel_display_port_power_domain(intel_encoder);
1156
	intel_display_power_get(dev_priv, power_domain);
1156
	intel_display_power_get(dev_priv, power_domain);
1157
 
1157
 
1158
	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1158
	DRM_DEBUG_KMS("Turning eDP VDD on\n");
1159
 
1159
 
1160
	if (!edp_have_panel_power(intel_dp))
1160
	if (!edp_have_panel_power(intel_dp))
1161
		wait_panel_power_cycle(intel_dp);
1161
		wait_panel_power_cycle(intel_dp);
1162
 
1162
 
1163
	pp = ironlake_get_pp_control(intel_dp);
1163
	pp = ironlake_get_pp_control(intel_dp);
1164
	pp |= EDP_FORCE_VDD;
1164
	pp |= EDP_FORCE_VDD;
1165
 
1165
 
1166
	pp_stat_reg = _pp_stat_reg(intel_dp);
1166
	pp_stat_reg = _pp_stat_reg(intel_dp);
1167
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1167
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1168
 
1168
 
1169
	I915_WRITE(pp_ctrl_reg, pp);
1169
	I915_WRITE(pp_ctrl_reg, pp);
1170
	POSTING_READ(pp_ctrl_reg);
1170
	POSTING_READ(pp_ctrl_reg);
1171
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1171
	DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1172
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1172
			I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1173
	/*
1173
	/*
1174
	 * If the panel wasn't on, delay before accessing aux channel
1174
	 * If the panel wasn't on, delay before accessing aux channel
1175
	 */
1175
	 */
1176
	if (!edp_have_panel_power(intel_dp)) {
1176
	if (!edp_have_panel_power(intel_dp)) {
1177
		DRM_DEBUG_KMS("eDP was not running\n");
1177
		DRM_DEBUG_KMS("eDP was not running\n");
1178
		msleep(intel_dp->panel_power_up_delay);
1178
		msleep(intel_dp->panel_power_up_delay);
1179
	}
1179
	}
1180
 
1180
 
1181
	return need_to_disable;
1181
	return need_to_disable;
1182
}
1182
}
1183
 
1183
 
1184
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1184
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1185
{
1185
{
1186
	if (is_edp(intel_dp)) {
1186
	if (is_edp(intel_dp)) {
1187
		bool vdd = _edp_panel_vdd_on(intel_dp);
1187
		bool vdd = _edp_panel_vdd_on(intel_dp);
1188
 
1188
 
1189
		WARN(!vdd, "eDP VDD already requested on\n");
1189
		WARN(!vdd, "eDP VDD already requested on\n");
1190
	}
1190
	}
1191
}
1191
}
1192
 
1192
 
1193
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1193
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1194
{
1194
{
1195
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1195
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1196
	struct drm_i915_private *dev_priv = dev->dev_private;
1196
	struct drm_i915_private *dev_priv = dev->dev_private;
1197
	u32 pp;
1197
	u32 pp;
1198
	u32 pp_stat_reg, pp_ctrl_reg;
1198
	u32 pp_stat_reg, pp_ctrl_reg;
1199
 
1199
 
1200
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1200
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1201
 
1201
 
1202
	if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1202
	if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
1203
		struct intel_digital_port *intel_dig_port =
1203
		struct intel_digital_port *intel_dig_port =
1204
						dp_to_dig_port(intel_dp);
1204
						dp_to_dig_port(intel_dp);
1205
		struct intel_encoder *intel_encoder = &intel_dig_port->base;
1205
		struct intel_encoder *intel_encoder = &intel_dig_port->base;
1206
		enum intel_display_power_domain power_domain;
1206
		enum intel_display_power_domain power_domain;
1207
 
1207
 
1208
		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1208
		DRM_DEBUG_KMS("Turning eDP VDD off\n");
1209
 
1209
 
1210
		pp = ironlake_get_pp_control(intel_dp);
1210
		pp = ironlake_get_pp_control(intel_dp);
1211
	pp &= ~EDP_FORCE_VDD;
1211
	pp &= ~EDP_FORCE_VDD;
1212
 
1212
 
1213
		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1213
		pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1214
		pp_stat_reg = _pp_stat_reg(intel_dp);
1214
		pp_stat_reg = _pp_stat_reg(intel_dp);
1215
 
1215
 
1216
		I915_WRITE(pp_ctrl_reg, pp);
1216
		I915_WRITE(pp_ctrl_reg, pp);
1217
		POSTING_READ(pp_ctrl_reg);
1217
		POSTING_READ(pp_ctrl_reg);
1218
 
1218
 
1219
	/* Make sure sequencer is idle before allowing subsequent activity */
1219
	/* Make sure sequencer is idle before allowing subsequent activity */
1220
		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1220
		DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1221
		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1221
		I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1222
 
1222
 
1223
		if ((pp & POWER_TARGET_ON) == 0)
1223
		if ((pp & POWER_TARGET_ON) == 0)
1224
			intel_dp->last_power_cycle = jiffies;
1224
			intel_dp->last_power_cycle = jiffies;
1225
 
1225
 
1226
		power_domain = intel_display_port_power_domain(intel_encoder);
1226
		power_domain = intel_display_port_power_domain(intel_encoder);
1227
		intel_display_power_put(dev_priv, power_domain);
1227
		intel_display_power_put(dev_priv, power_domain);
1228
	}
1228
	}
1229
}
1229
}
1230
 
1230
 
1231
static void edp_panel_vdd_work(struct work_struct *__work)
1231
static void edp_panel_vdd_work(struct work_struct *__work)
1232
{
1232
{
1233
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1233
	struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1234
						 struct intel_dp, panel_vdd_work);
1234
						 struct intel_dp, panel_vdd_work);
1235
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1235
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1236
 
1236
 
1237
	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1237
	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
1238
	edp_panel_vdd_off_sync(intel_dp);
1238
	edp_panel_vdd_off_sync(intel_dp);
1239
	drm_modeset_unlock(&dev->mode_config.connection_mutex);
1239
	drm_modeset_unlock(&dev->mode_config.connection_mutex);
1240
}
1240
}
1241
 
1241
 
1242
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1242
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1243
{
1243
{
1244
	unsigned long delay;
1244
	unsigned long delay;
1245
 
1245
 
1246
	/*
1246
	/*
1247
	 * Queue the timer to fire a long time from now (relative to the power
1247
	 * Queue the timer to fire a long time from now (relative to the power
1248
	 * down delay) to keep the panel power up across a sequence of
1248
	 * down delay) to keep the panel power up across a sequence of
1249
	 * operations.
1249
	 * operations.
1250
	 */
1250
	 */
1251
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1251
	delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1252
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1252
	schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1253
}
1253
}
1254
 
1254
 
1255
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1255
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1256
{
1256
{
1257
	if (!is_edp(intel_dp))
1257
	if (!is_edp(intel_dp))
1258
		return;
1258
		return;
1259
 
1259
 
1260
	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1260
	WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1261
 
1261
 
1262
	intel_dp->want_panel_vdd = false;
1262
	intel_dp->want_panel_vdd = false;
1263
 
1263
 
1264
	if (sync)
1264
	if (sync)
1265
		edp_panel_vdd_off_sync(intel_dp);
1265
		edp_panel_vdd_off_sync(intel_dp);
1266
	else
1266
	else
1267
		edp_panel_vdd_schedule_off(intel_dp);
1267
		edp_panel_vdd_schedule_off(intel_dp);
1268
}
1268
}
1269
 
1269
 
1270
void intel_edp_panel_on(struct intel_dp *intel_dp)
1270
void intel_edp_panel_on(struct intel_dp *intel_dp)
1271
{
1271
{
1272
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1272
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1273
	struct drm_i915_private *dev_priv = dev->dev_private;
1273
	struct drm_i915_private *dev_priv = dev->dev_private;
1274
	u32 pp;
1274
	u32 pp;
1275
	u32 pp_ctrl_reg;
1275
	u32 pp_ctrl_reg;
1276
 
1276
 
1277
	if (!is_edp(intel_dp))
1277
	if (!is_edp(intel_dp))
1278
		return;
1278
		return;
1279
 
1279
 
1280
	DRM_DEBUG_KMS("Turn eDP power on\n");
1280
	DRM_DEBUG_KMS("Turn eDP power on\n");
1281
 
1281
 
1282
	if (edp_have_panel_power(intel_dp)) {
1282
	if (edp_have_panel_power(intel_dp)) {
1283
		DRM_DEBUG_KMS("eDP power already on\n");
1283
		DRM_DEBUG_KMS("eDP power already on\n");
1284
		return;
1284
		return;
1285
	}
1285
	}
1286
 
1286
 
1287
	wait_panel_power_cycle(intel_dp);
1287
	wait_panel_power_cycle(intel_dp);
1288
 
1288
 
1289
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1289
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1290
	pp = ironlake_get_pp_control(intel_dp);
1290
	pp = ironlake_get_pp_control(intel_dp);
1291
	if (IS_GEN5(dev)) {
1291
	if (IS_GEN5(dev)) {
1292
	/* ILK workaround: disable reset around power sequence */
1292
	/* ILK workaround: disable reset around power sequence */
1293
	pp &= ~PANEL_POWER_RESET;
1293
	pp &= ~PANEL_POWER_RESET;
1294
		I915_WRITE(pp_ctrl_reg, pp);
1294
		I915_WRITE(pp_ctrl_reg, pp);
1295
		POSTING_READ(pp_ctrl_reg);
1295
		POSTING_READ(pp_ctrl_reg);
1296
	}
1296
	}
1297
 
1297
 
1298
	pp |= POWER_TARGET_ON;
1298
	pp |= POWER_TARGET_ON;
1299
	if (!IS_GEN5(dev))
1299
	if (!IS_GEN5(dev))
1300
		pp |= PANEL_POWER_RESET;
1300
		pp |= PANEL_POWER_RESET;
1301
 
1301
 
1302
	I915_WRITE(pp_ctrl_reg, pp);
1302
	I915_WRITE(pp_ctrl_reg, pp);
1303
	POSTING_READ(pp_ctrl_reg);
1303
	POSTING_READ(pp_ctrl_reg);
1304
 
1304
 
1305
	wait_panel_on(intel_dp);
1305
	wait_panel_on(intel_dp);
1306
	intel_dp->last_power_on = jiffies;
1306
	intel_dp->last_power_on = jiffies;
1307
 
1307
 
1308
	if (IS_GEN5(dev)) {
1308
	if (IS_GEN5(dev)) {
1309
	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1309
	pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1310
		I915_WRITE(pp_ctrl_reg, pp);
1310
		I915_WRITE(pp_ctrl_reg, pp);
1311
		POSTING_READ(pp_ctrl_reg);
1311
		POSTING_READ(pp_ctrl_reg);
1312
	}
1312
	}
1313
}
1313
}
1314
 
1314
 
1315
void intel_edp_panel_off(struct intel_dp *intel_dp)
1315
void intel_edp_panel_off(struct intel_dp *intel_dp)
1316
{
1316
{
1317
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1317
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1318
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1318
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
1319
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1319
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1320
	struct drm_i915_private *dev_priv = dev->dev_private;
1320
	struct drm_i915_private *dev_priv = dev->dev_private;
1321
	enum intel_display_power_domain power_domain;
1321
	enum intel_display_power_domain power_domain;
1322
	u32 pp;
1322
	u32 pp;
1323
	u32 pp_ctrl_reg;
1323
	u32 pp_ctrl_reg;
1324
 
1324
 
1325
	if (!is_edp(intel_dp))
1325
	if (!is_edp(intel_dp))
1326
		return;
1326
		return;
1327
 
1327
 
1328
	DRM_DEBUG_KMS("Turn eDP power off\n");
1328
	DRM_DEBUG_KMS("Turn eDP power off\n");
1329
 
1329
 
1330
	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1330
	WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1331
 
1331
 
1332
	pp = ironlake_get_pp_control(intel_dp);
1332
	pp = ironlake_get_pp_control(intel_dp);
1333
	/* We need to switch off panel power _and_ force vdd, for otherwise some
1333
	/* We need to switch off panel power _and_ force vdd, for otherwise some
1334
	 * panels get very unhappy and cease to work. */
1334
	 * panels get very unhappy and cease to work. */
1335
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1335
	pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1336
		EDP_BLC_ENABLE);
1336
		EDP_BLC_ENABLE);
1337
 
1337
 
1338
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1338
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1339
 
1339
 
1340
	intel_dp->want_panel_vdd = false;
1340
	intel_dp->want_panel_vdd = false;
1341
 
1341
 
1342
	I915_WRITE(pp_ctrl_reg, pp);
1342
	I915_WRITE(pp_ctrl_reg, pp);
1343
	POSTING_READ(pp_ctrl_reg);
1343
	POSTING_READ(pp_ctrl_reg);
1344
 
1344
 
1345
	intel_dp->last_power_cycle = jiffies;
1345
	intel_dp->last_power_cycle = jiffies;
1346
	wait_panel_off(intel_dp);
1346
	wait_panel_off(intel_dp);
1347
 
1347
 
1348
	/* We got a reference when we enabled the VDD. */
1348
	/* We got a reference when we enabled the VDD. */
1349
	power_domain = intel_display_port_power_domain(intel_encoder);
1349
	power_domain = intel_display_port_power_domain(intel_encoder);
1350
	intel_display_power_put(dev_priv, power_domain);
1350
	intel_display_power_put(dev_priv, power_domain);
1351
}
1351
}
1352
 
1352
 
1353
void intel_edp_backlight_on(struct intel_dp *intel_dp)
1353
void intel_edp_backlight_on(struct intel_dp *intel_dp)
1354
{
1354
{
1355
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1355
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1356
	struct drm_device *dev = intel_dig_port->base.base.dev;
1356
	struct drm_device *dev = intel_dig_port->base.base.dev;
1357
	struct drm_i915_private *dev_priv = dev->dev_private;
1357
	struct drm_i915_private *dev_priv = dev->dev_private;
1358
	u32 pp;
1358
	u32 pp;
1359
	u32 pp_ctrl_reg;
1359
	u32 pp_ctrl_reg;
1360
 
1360
 
1361
	if (!is_edp(intel_dp))
1361
	if (!is_edp(intel_dp))
1362
		return;
1362
		return;
1363
 
1363
 
1364
	DRM_DEBUG_KMS("\n");
1364
	DRM_DEBUG_KMS("\n");
1365
 
1365
 
1366
	intel_panel_enable_backlight(intel_dp->attached_connector);
1366
	intel_panel_enable_backlight(intel_dp->attached_connector);
1367
 
1367
 
1368
	/*
1368
	/*
1369
	 * If we enable the backlight right away following a panel power
1369
	 * If we enable the backlight right away following a panel power
1370
	 * on, we may see slight flicker as the panel syncs with the eDP
1370
	 * on, we may see slight flicker as the panel syncs with the eDP
1371
	 * link.  So delay a bit to make sure the image is solid before
1371
	 * link.  So delay a bit to make sure the image is solid before
1372
	 * allowing it to appear.
1372
	 * allowing it to appear.
1373
	 */
1373
	 */
1374
	wait_backlight_on(intel_dp);
1374
	wait_backlight_on(intel_dp);
1375
	pp = ironlake_get_pp_control(intel_dp);
1375
	pp = ironlake_get_pp_control(intel_dp);
1376
	pp |= EDP_BLC_ENABLE;
1376
	pp |= EDP_BLC_ENABLE;
1377
 
1377
 
1378
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1378
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1379
 
1379
 
1380
	I915_WRITE(pp_ctrl_reg, pp);
1380
	I915_WRITE(pp_ctrl_reg, pp);
1381
	POSTING_READ(pp_ctrl_reg);
1381
	POSTING_READ(pp_ctrl_reg);
1382
}
1382
}
1383
 
1383
 
1384
void intel_edp_backlight_off(struct intel_dp *intel_dp)
1384
void intel_edp_backlight_off(struct intel_dp *intel_dp)
1385
{
1385
{
1386
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1386
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1387
	struct drm_i915_private *dev_priv = dev->dev_private;
1387
	struct drm_i915_private *dev_priv = dev->dev_private;
1388
	u32 pp;
1388
	u32 pp;
1389
	u32 pp_ctrl_reg;
1389
	u32 pp_ctrl_reg;
1390
 
1390
 
1391
	if (!is_edp(intel_dp))
1391
	if (!is_edp(intel_dp))
1392
		return;
1392
		return;
1393
 
1393
 
1394
	DRM_DEBUG_KMS("\n");
1394
	DRM_DEBUG_KMS("\n");
1395
	pp = ironlake_get_pp_control(intel_dp);
1395
	pp = ironlake_get_pp_control(intel_dp);
1396
	pp &= ~EDP_BLC_ENABLE;
1396
	pp &= ~EDP_BLC_ENABLE;
1397
 
1397
 
1398
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1398
	pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1399
 
1399
 
1400
	I915_WRITE(pp_ctrl_reg, pp);
1400
	I915_WRITE(pp_ctrl_reg, pp);
1401
	POSTING_READ(pp_ctrl_reg);
1401
	POSTING_READ(pp_ctrl_reg);
1402
	intel_dp->last_backlight_off = jiffies;
1402
	intel_dp->last_backlight_off = jiffies;
1403
 
1403
 
1404
	edp_wait_backlight_off(intel_dp);
1404
	edp_wait_backlight_off(intel_dp);
1405
 
1405
 
1406
	intel_panel_disable_backlight(intel_dp->attached_connector);
1406
	intel_panel_disable_backlight(intel_dp->attached_connector);
1407
}
1407
}
1408
 
1408
 
1409
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1409
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1410
{
1410
{
1411
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1411
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1412
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1412
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1413
	struct drm_device *dev = crtc->dev;
1413
	struct drm_device *dev = crtc->dev;
1414
	struct drm_i915_private *dev_priv = dev->dev_private;
1414
	struct drm_i915_private *dev_priv = dev->dev_private;
1415
	u32 dpa_ctl;
1415
	u32 dpa_ctl;
1416
 
1416
 
1417
	assert_pipe_disabled(dev_priv,
1417
	assert_pipe_disabled(dev_priv,
1418
			     to_intel_crtc(crtc)->pipe);
1418
			     to_intel_crtc(crtc)->pipe);
1419
 
1419
 
1420
	DRM_DEBUG_KMS("\n");
1420
	DRM_DEBUG_KMS("\n");
1421
	dpa_ctl = I915_READ(DP_A);
1421
	dpa_ctl = I915_READ(DP_A);
1422
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1422
	WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1423
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1423
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1424
 
1424
 
1425
	/* We don't adjust intel_dp->DP while tearing down the link, to
1425
	/* We don't adjust intel_dp->DP while tearing down the link, to
1426
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1426
	 * facilitate link retraining (e.g. after hotplug). Hence clear all
1427
	 * enable bits here to ensure that we don't enable too much. */
1427
	 * enable bits here to ensure that we don't enable too much. */
1428
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1428
	intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1429
	intel_dp->DP |= DP_PLL_ENABLE;
1429
	intel_dp->DP |= DP_PLL_ENABLE;
1430
	I915_WRITE(DP_A, intel_dp->DP);
1430
	I915_WRITE(DP_A, intel_dp->DP);
1431
	POSTING_READ(DP_A);
1431
	POSTING_READ(DP_A);
1432
	udelay(200);
1432
	udelay(200);
1433
}
1433
}
1434
 
1434
 
1435
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1435
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1436
{
1436
{
1437
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1437
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1438
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1438
	struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1439
	struct drm_device *dev = crtc->dev;
1439
	struct drm_device *dev = crtc->dev;
1440
	struct drm_i915_private *dev_priv = dev->dev_private;
1440
	struct drm_i915_private *dev_priv = dev->dev_private;
1441
	u32 dpa_ctl;
1441
	u32 dpa_ctl;
1442
 
1442
 
1443
	assert_pipe_disabled(dev_priv,
1443
	assert_pipe_disabled(dev_priv,
1444
			     to_intel_crtc(crtc)->pipe);
1444
			     to_intel_crtc(crtc)->pipe);
1445
 
1445
 
1446
	dpa_ctl = I915_READ(DP_A);
1446
	dpa_ctl = I915_READ(DP_A);
1447
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1447
	WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1448
	     "dp pll off, should be on\n");
1448
	     "dp pll off, should be on\n");
1449
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1449
	WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1450
 
1450
 
1451
	/* We can't rely on the value tracked for the DP register in
1451
	/* We can't rely on the value tracked for the DP register in
1452
	 * intel_dp->DP because link_down must not change that (otherwise link
1452
	 * intel_dp->DP because link_down must not change that (otherwise link
1453
	 * re-training will fail. */
1453
	 * re-training will fail. */
1454
	dpa_ctl &= ~DP_PLL_ENABLE;
1454
	dpa_ctl &= ~DP_PLL_ENABLE;
1455
	I915_WRITE(DP_A, dpa_ctl);
1455
	I915_WRITE(DP_A, dpa_ctl);
1456
	POSTING_READ(DP_A);
1456
	POSTING_READ(DP_A);
1457
	udelay(200);
1457
	udelay(200);
1458
}
1458
}
1459
 
1459
 
1460
/* If the sink supports it, try to set the power state appropriately */
1460
/* If the sink supports it, try to set the power state appropriately */
1461
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1461
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1462
{
1462
{
1463
	int ret, i;
1463
	int ret, i;
1464
 
1464
 
1465
	/* Should have a valid DPCD by this point */
1465
	/* Should have a valid DPCD by this point */
1466
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1466
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1467
		return;
1467
		return;
1468
 
1468
 
1469
	if (mode != DRM_MODE_DPMS_ON) {
1469
	if (mode != DRM_MODE_DPMS_ON) {
1470
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1470
		ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1471
						  DP_SET_POWER_D3);
1471
						  DP_SET_POWER_D3);
1472
		if (ret != 1)
1472
		if (ret != 1)
1473
			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1473
			DRM_DEBUG_DRIVER("failed to write sink power state\n");
1474
	} else {
1474
	} else {
1475
		/*
1475
		/*
1476
		 * When turning on, we need to retry for 1ms to give the sink
1476
		 * When turning on, we need to retry for 1ms to give the sink
1477
		 * time to wake up.
1477
		 * time to wake up.
1478
		 */
1478
		 */
1479
		for (i = 0; i < 3; i++) {
1479
		for (i = 0; i < 3; i++) {
1480
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1480
			ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1481
							  DP_SET_POWER_D0);
1481
							  DP_SET_POWER_D0);
1482
			if (ret == 1)
1482
			if (ret == 1)
1483
				break;
1483
				break;
1484
			msleep(1);
1484
			msleep(1);
1485
		}
1485
		}
1486
	}
1486
	}
1487
}
1487
}
1488
 
1488
 
1489
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1489
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1490
				  enum pipe *pipe)
1490
				  enum pipe *pipe)
1491
{
1491
{
1492
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1492
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1493
	enum port port = dp_to_dig_port(intel_dp)->port;
1493
	enum port port = dp_to_dig_port(intel_dp)->port;
1494
	struct drm_device *dev = encoder->base.dev;
1494
	struct drm_device *dev = encoder->base.dev;
1495
	struct drm_i915_private *dev_priv = dev->dev_private;
1495
	struct drm_i915_private *dev_priv = dev->dev_private;
1496
	enum intel_display_power_domain power_domain;
1496
	enum intel_display_power_domain power_domain;
1497
	u32 tmp;
1497
	u32 tmp;
1498
 
1498
 
1499
	power_domain = intel_display_port_power_domain(encoder);
1499
	power_domain = intel_display_port_power_domain(encoder);
1500
	if (!intel_display_power_enabled(dev_priv, power_domain))
1500
	if (!intel_display_power_enabled(dev_priv, power_domain))
1501
		return false;
1501
		return false;
1502
 
1502
 
1503
	tmp = I915_READ(intel_dp->output_reg);
1503
	tmp = I915_READ(intel_dp->output_reg);
1504
 
1504
 
1505
	if (!(tmp & DP_PORT_EN))
1505
	if (!(tmp & DP_PORT_EN))
1506
		return false;
1506
		return false;
1507
 
1507
 
1508
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1508
	if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1509
		*pipe = PORT_TO_PIPE_CPT(tmp);
1509
		*pipe = PORT_TO_PIPE_CPT(tmp);
1510
	} else if (IS_CHERRYVIEW(dev)) {
1510
	} else if (IS_CHERRYVIEW(dev)) {
1511
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
1511
		*pipe = DP_PORT_TO_PIPE_CHV(tmp);
1512
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1512
	} else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
1513
		*pipe = PORT_TO_PIPE(tmp);
1513
		*pipe = PORT_TO_PIPE(tmp);
1514
	} else {
1514
	} else {
1515
		u32 trans_sel;
1515
		u32 trans_sel;
1516
		u32 trans_dp;
1516
		u32 trans_dp;
1517
		int i;
1517
		int i;
1518
 
1518
 
1519
		switch (intel_dp->output_reg) {
1519
		switch (intel_dp->output_reg) {
1520
		case PCH_DP_B:
1520
		case PCH_DP_B:
1521
			trans_sel = TRANS_DP_PORT_SEL_B;
1521
			trans_sel = TRANS_DP_PORT_SEL_B;
1522
			break;
1522
			break;
1523
		case PCH_DP_C:
1523
		case PCH_DP_C:
1524
			trans_sel = TRANS_DP_PORT_SEL_C;
1524
			trans_sel = TRANS_DP_PORT_SEL_C;
1525
			break;
1525
			break;
1526
		case PCH_DP_D:
1526
		case PCH_DP_D:
1527
			trans_sel = TRANS_DP_PORT_SEL_D;
1527
			trans_sel = TRANS_DP_PORT_SEL_D;
1528
			break;
1528
			break;
1529
		default:
1529
		default:
1530
			return true;
1530
			return true;
1531
		}
1531
		}
1532
 
1532
 
1533
		for_each_pipe(i) {
1533
		for_each_pipe(i) {
1534
			trans_dp = I915_READ(TRANS_DP_CTL(i));
1534
			trans_dp = I915_READ(TRANS_DP_CTL(i));
1535
			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1535
			if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1536
				*pipe = i;
1536
				*pipe = i;
1537
				return true;
1537
				return true;
1538
			}
1538
			}
1539
		}
1539
		}
1540
 
1540
 
1541
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1541
		DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1542
			      intel_dp->output_reg);
1542
			      intel_dp->output_reg);
1543
	}
1543
	}
1544
 
1544
 
1545
	return true;
1545
	return true;
1546
}
1546
}
1547
 
1547
 
1548
static void intel_dp_get_config(struct intel_encoder *encoder,
1548
static void intel_dp_get_config(struct intel_encoder *encoder,
1549
				struct intel_crtc_config *pipe_config)
1549
				struct intel_crtc_config *pipe_config)
1550
{
1550
{
1551
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1551
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1552
	u32 tmp, flags = 0;
1552
	u32 tmp, flags = 0;
1553
	struct drm_device *dev = encoder->base.dev;
1553
	struct drm_device *dev = encoder->base.dev;
1554
	struct drm_i915_private *dev_priv = dev->dev_private;
1554
	struct drm_i915_private *dev_priv = dev->dev_private;
1555
	enum port port = dp_to_dig_port(intel_dp)->port;
1555
	enum port port = dp_to_dig_port(intel_dp)->port;
1556
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1556
	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1557
	int dotclock;
1557
	int dotclock;
1558
 
1558
 
1559
	tmp = I915_READ(intel_dp->output_reg);
1559
	tmp = I915_READ(intel_dp->output_reg);
1560
	if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1560
	if (tmp & DP_AUDIO_OUTPUT_ENABLE)
1561
		pipe_config->has_audio = true;
1561
		pipe_config->has_audio = true;
1562
 
1562
 
1563
	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1563
	if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1564
		if (tmp & DP_SYNC_HS_HIGH)
1564
		if (tmp & DP_SYNC_HS_HIGH)
1565
			flags |= DRM_MODE_FLAG_PHSYNC;
1565
			flags |= DRM_MODE_FLAG_PHSYNC;
1566
		else
1566
		else
1567
			flags |= DRM_MODE_FLAG_NHSYNC;
1567
			flags |= DRM_MODE_FLAG_NHSYNC;
1568
 
1568
 
1569
		if (tmp & DP_SYNC_VS_HIGH)
1569
		if (tmp & DP_SYNC_VS_HIGH)
1570
			flags |= DRM_MODE_FLAG_PVSYNC;
1570
			flags |= DRM_MODE_FLAG_PVSYNC;
1571
		else
1571
		else
1572
			flags |= DRM_MODE_FLAG_NVSYNC;
1572
			flags |= DRM_MODE_FLAG_NVSYNC;
1573
	} else {
1573
	} else {
1574
		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1574
		tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1575
		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1575
		if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1576
			flags |= DRM_MODE_FLAG_PHSYNC;
1576
			flags |= DRM_MODE_FLAG_PHSYNC;
1577
		else
1577
		else
1578
			flags |= DRM_MODE_FLAG_NHSYNC;
1578
			flags |= DRM_MODE_FLAG_NHSYNC;
1579
 
1579
 
1580
		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1580
		if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1581
			flags |= DRM_MODE_FLAG_PVSYNC;
1581
			flags |= DRM_MODE_FLAG_PVSYNC;
1582
		else
1582
		else
1583
			flags |= DRM_MODE_FLAG_NVSYNC;
1583
			flags |= DRM_MODE_FLAG_NVSYNC;
1584
	}
1584
	}
1585
 
1585
 
1586
	pipe_config->adjusted_mode.flags |= flags;
1586
	pipe_config->adjusted_mode.flags |= flags;
-
 
1587
 
-
 
1588
	if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
-
 
1589
	    tmp & DP_COLOR_RANGE_16_235)
-
 
1590
		pipe_config->limited_color_range = true;
1587
 
1591
 
1588
	pipe_config->has_dp_encoder = true;
1592
	pipe_config->has_dp_encoder = true;
1589
 
1593
 
1590
	intel_dp_get_m_n(crtc, pipe_config);
1594
	intel_dp_get_m_n(crtc, pipe_config);
1591
 
1595
 
1592
	if (port == PORT_A) {
1596
	if (port == PORT_A) {
1593
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1597
		if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1594
			pipe_config->port_clock = 162000;
1598
			pipe_config->port_clock = 162000;
1595
		else
1599
		else
1596
			pipe_config->port_clock = 270000;
1600
			pipe_config->port_clock = 270000;
1597
	}
1601
	}
1598
 
1602
 
1599
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1603
	dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1600
					    &pipe_config->dp_m_n);
1604
					    &pipe_config->dp_m_n);
1601
 
1605
 
1602
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1606
	if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1603
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1607
		ironlake_check_encoder_dotclock(pipe_config, dotclock);
1604
 
1608
 
1605
	pipe_config->adjusted_mode.crtc_clock = dotclock;
1609
	pipe_config->adjusted_mode.crtc_clock = dotclock;
1606
 
1610
 
1607
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1611
	if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
1608
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1612
	    pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
1609
		/*
1613
		/*
1610
		 * This is a big fat ugly hack.
1614
		 * This is a big fat ugly hack.
1611
		 *
1615
		 *
1612
		 * Some machines in UEFI boot mode provide us a VBT that has 18
1616
		 * Some machines in UEFI boot mode provide us a VBT that has 18
1613
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1617
		 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
1614
		 * unknown we fail to light up. Yet the same BIOS boots up with
1618
		 * unknown we fail to light up. Yet the same BIOS boots up with
1615
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1619
		 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
1616
		 * max, not what it tells us to use.
1620
		 * max, not what it tells us to use.
1617
		 *
1621
		 *
1618
		 * Note: This will still be broken if the eDP panel is not lit
1622
		 * Note: This will still be broken if the eDP panel is not lit
1619
		 * up by the BIOS, and thus we can't get the mode at module
1623
		 * up by the BIOS, and thus we can't get the mode at module
1620
		 * load.
1624
		 * load.
1621
		 */
1625
		 */
1622
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1626
		DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
1623
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1627
			      pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
1624
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1628
		dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1625
	}
1629
	}
1626
}
1630
}
1627
 
1631
 
1628
static bool is_edp_psr(struct intel_dp *intel_dp)
1632
static bool is_edp_psr(struct intel_dp *intel_dp)
1629
{
1633
{
1630
	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1634
	return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1631
}
1635
}
1632
 
1636
 
1633
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1637
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1634
{
1638
{
1635
	struct drm_i915_private *dev_priv = dev->dev_private;
1639
	struct drm_i915_private *dev_priv = dev->dev_private;
1636
 
1640
 
1637
	if (!HAS_PSR(dev))
1641
	if (!HAS_PSR(dev))
1638
		return false;
1642
		return false;
1639
 
1643
 
1640
	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1644
	return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1641
}
1645
}
1642
 
1646
 
1643
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1647
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1644
				    struct edp_vsc_psr *vsc_psr)
1648
				    struct edp_vsc_psr *vsc_psr)
1645
{
1649
{
1646
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1650
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1647
	struct drm_device *dev = dig_port->base.base.dev;
1651
	struct drm_device *dev = dig_port->base.base.dev;
1648
	struct drm_i915_private *dev_priv = dev->dev_private;
1652
	struct drm_i915_private *dev_priv = dev->dev_private;
1649
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1653
	struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1650
	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1654
	u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1651
	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1655
	u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1652
	uint32_t *data = (uint32_t *) vsc_psr;
1656
	uint32_t *data = (uint32_t *) vsc_psr;
1653
	unsigned int i;
1657
	unsigned int i;
1654
 
1658
 
1655
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1659
	/* As per BSPec (Pipe Video Data Island Packet), we need to disable
1656
	   the video DIP being updated before program video DIP data buffer
1660
	   the video DIP being updated before program video DIP data buffer
1657
	   registers for DIP being updated. */
1661
	   registers for DIP being updated. */
1658
	I915_WRITE(ctl_reg, 0);
1662
	I915_WRITE(ctl_reg, 0);
1659
	POSTING_READ(ctl_reg);
1663
	POSTING_READ(ctl_reg);
1660
 
1664
 
1661
	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1665
	for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1662
		if (i < sizeof(struct edp_vsc_psr))
1666
		if (i < sizeof(struct edp_vsc_psr))
1663
			I915_WRITE(data_reg + i, *data++);
1667
			I915_WRITE(data_reg + i, *data++);
1664
		else
1668
		else
1665
			I915_WRITE(data_reg + i, 0);
1669
			I915_WRITE(data_reg + i, 0);
1666
	}
1670
	}
1667
 
1671
 
1668
	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1672
	I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1669
	POSTING_READ(ctl_reg);
1673
	POSTING_READ(ctl_reg);
1670
}
1674
}
1671
 
1675
 
1672
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1676
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1673
{
1677
{
1674
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1678
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1675
	struct drm_i915_private *dev_priv = dev->dev_private;
1679
	struct drm_i915_private *dev_priv = dev->dev_private;
1676
	struct edp_vsc_psr psr_vsc;
1680
	struct edp_vsc_psr psr_vsc;
1677
 
1681
 
1678
	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1682
	/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1679
	memset(&psr_vsc, 0, sizeof(psr_vsc));
1683
	memset(&psr_vsc, 0, sizeof(psr_vsc));
1680
	psr_vsc.sdp_header.HB0 = 0;
1684
	psr_vsc.sdp_header.HB0 = 0;
1681
	psr_vsc.sdp_header.HB1 = 0x7;
1685
	psr_vsc.sdp_header.HB1 = 0x7;
1682
	psr_vsc.sdp_header.HB2 = 0x2;
1686
	psr_vsc.sdp_header.HB2 = 0x2;
1683
	psr_vsc.sdp_header.HB3 = 0x8;
1687
	psr_vsc.sdp_header.HB3 = 0x8;
1684
	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1688
	intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1685
 
1689
 
1686
	/* Avoid continuous PSR exit by masking memup and hpd */
1690
	/* Avoid continuous PSR exit by masking memup and hpd */
1687
	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1691
	I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1688
		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1692
		   EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
1689
}
1693
}
1690
 
1694
 
1691
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1695
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1692
{
1696
{
1693
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1697
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1694
	struct drm_device *dev = dig_port->base.base.dev;
1698
	struct drm_device *dev = dig_port->base.base.dev;
1695
	struct drm_i915_private *dev_priv = dev->dev_private;
1699
	struct drm_i915_private *dev_priv = dev->dev_private;
1696
	uint32_t aux_clock_divider;
1700
	uint32_t aux_clock_divider;
1697
	int precharge = 0x3;
1701
	int precharge = 0x3;
1698
	int msg_size = 5;       /* Header(4) + Message(1) */
1702
	int msg_size = 5;       /* Header(4) + Message(1) */
1699
	bool only_standby = false;
1703
	bool only_standby = false;
1700
 
1704
 
1701
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1705
	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
1702
 
1706
 
1703
	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1707
	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1704
		only_standby = true;
1708
		only_standby = true;
1705
 
1709
 
1706
	/* Enable PSR in sink */
1710
	/* Enable PSR in sink */
1707
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
1711
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
1708
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1712
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1709
				   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1713
				   DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
1710
	else
1714
	else
1711
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1715
		drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
1712
				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1716
				   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
1713
 
1717
 
1714
	/* Setup AUX registers */
1718
	/* Setup AUX registers */
1715
	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1719
	I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1716
	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1720
	I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1717
	I915_WRITE(EDP_PSR_AUX_CTL(dev),
1721
	I915_WRITE(EDP_PSR_AUX_CTL(dev),
1718
		   DP_AUX_CH_CTL_TIME_OUT_400us |
1722
		   DP_AUX_CH_CTL_TIME_OUT_400us |
1719
		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1723
		   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1720
		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1724
		   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1721
		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1725
		   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1722
}
1726
}
1723
 
1727
 
1724
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1728
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1725
{
1729
{
1726
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1730
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1727
	struct drm_device *dev = dig_port->base.base.dev;
1731
	struct drm_device *dev = dig_port->base.base.dev;
1728
	struct drm_i915_private *dev_priv = dev->dev_private;
1732
	struct drm_i915_private *dev_priv = dev->dev_private;
1729
	uint32_t max_sleep_time = 0x1f;
1733
	uint32_t max_sleep_time = 0x1f;
1730
	uint32_t idle_frames = 1;
1734
	uint32_t idle_frames = 1;
1731
	uint32_t val = 0x0;
1735
	uint32_t val = 0x0;
1732
	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1736
	const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
1733
	bool only_standby = false;
1737
	bool only_standby = false;
1734
 
1738
 
1735
	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1739
	if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
1736
		only_standby = true;
1740
		only_standby = true;
1737
 
1741
 
1738
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
1742
	if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
1739
		val |= EDP_PSR_LINK_STANDBY;
1743
		val |= EDP_PSR_LINK_STANDBY;
1740
		val |= EDP_PSR_TP2_TP3_TIME_0us;
1744
		val |= EDP_PSR_TP2_TP3_TIME_0us;
1741
		val |= EDP_PSR_TP1_TIME_0us;
1745
		val |= EDP_PSR_TP1_TIME_0us;
1742
		val |= EDP_PSR_SKIP_AUX_EXIT;
1746
		val |= EDP_PSR_SKIP_AUX_EXIT;
1743
		val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
1747
		val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
1744
	} else
1748
	} else
1745
		val |= EDP_PSR_LINK_DISABLE;
1749
		val |= EDP_PSR_LINK_DISABLE;
1746
 
1750
 
1747
	I915_WRITE(EDP_PSR_CTL(dev), val |
1751
	I915_WRITE(EDP_PSR_CTL(dev), val |
1748
		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1752
		   (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1749
		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1753
		   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1750
		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1754
		   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1751
		   EDP_PSR_ENABLE);
1755
		   EDP_PSR_ENABLE);
1752
}
1756
}
1753
 
1757
 
1754
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1758
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1755
{
1759
{
1756
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1760
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1757
	struct drm_device *dev = dig_port->base.base.dev;
1761
	struct drm_device *dev = dig_port->base.base.dev;
1758
	struct drm_i915_private *dev_priv = dev->dev_private;
1762
	struct drm_i915_private *dev_priv = dev->dev_private;
1759
	struct drm_crtc *crtc = dig_port->base.base.crtc;
1763
	struct drm_crtc *crtc = dig_port->base.base.crtc;
1760
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1764
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1761
 
1765
 
1762
	lockdep_assert_held(&dev_priv->psr.lock);
1766
	lockdep_assert_held(&dev_priv->psr.lock);
1763
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1767
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
1764
	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1768
	WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
1765
 
1769
 
1766
	dev_priv->psr.source_ok = false;
1770
	dev_priv->psr.source_ok = false;
1767
 
1771
 
1768
	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1772
	if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1769
		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1773
		DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1770
		return false;
1774
		return false;
1771
	}
1775
	}
1772
 
1776
 
1773
	if (!i915.enable_psr) {
1777
	if (!i915.enable_psr) {
1774
		DRM_DEBUG_KMS("PSR disable by flag\n");
1778
		DRM_DEBUG_KMS("PSR disable by flag\n");
1775
		return false;
1779
		return false;
1776
	}
1780
	}
1777
 
1781
 
1778
	/* Below limitations aren't valid for Broadwell */
1782
	/* Below limitations aren't valid for Broadwell */
1779
	if (IS_BROADWELL(dev))
1783
	if (IS_BROADWELL(dev))
1780
		goto out;
1784
		goto out;
1781
 
1785
 
1782
	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1786
	if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1783
	    S3D_ENABLE) {
1787
	    S3D_ENABLE) {
1784
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1788
		DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1785
		return false;
1789
		return false;
1786
	}
1790
	}
1787
 
1791
 
1788
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1792
	if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1789
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1793
		DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1790
		return false;
1794
		return false;
1791
	}
1795
	}
1792
 
1796
 
1793
 out:
1797
 out:
1794
	dev_priv->psr.source_ok = true;
1798
	dev_priv->psr.source_ok = true;
1795
	return true;
1799
	return true;
1796
}
1800
}
1797
 
1801
 
1798
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1802
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1799
{
1803
{
1800
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1804
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1801
	struct drm_device *dev = intel_dig_port->base.base.dev;
1805
	struct drm_device *dev = intel_dig_port->base.base.dev;
1802
	struct drm_i915_private *dev_priv = dev->dev_private;
1806
	struct drm_i915_private *dev_priv = dev->dev_private;
1803
 
1807
 
1804
	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1808
	WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1805
	WARN_ON(dev_priv->psr.active);
1809
	WARN_ON(dev_priv->psr.active);
1806
	lockdep_assert_held(&dev_priv->psr.lock);
1810
	lockdep_assert_held(&dev_priv->psr.lock);
1807
 
1811
 
1808
	/* Enable PSR on the panel */
1812
	/* Enable PSR on the panel */
1809
	intel_edp_psr_enable_sink(intel_dp);
1813
	intel_edp_psr_enable_sink(intel_dp);
1810
 
1814
 
1811
	/* Enable PSR on the host */
1815
	/* Enable PSR on the host */
1812
	intel_edp_psr_enable_source(intel_dp);
1816
	intel_edp_psr_enable_source(intel_dp);
1813
 
1817
 
1814
	dev_priv->psr.active = true;
1818
	dev_priv->psr.active = true;
1815
}
1819
}
1816
 
1820
 
1817
void intel_edp_psr_enable(struct intel_dp *intel_dp)
1821
void intel_edp_psr_enable(struct intel_dp *intel_dp)
1818
{
1822
{
1819
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1823
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1820
	struct drm_i915_private *dev_priv = dev->dev_private;
1824
	struct drm_i915_private *dev_priv = dev->dev_private;
1821
 
1825
 
1822
	if (!HAS_PSR(dev)) {
1826
	if (!HAS_PSR(dev)) {
1823
		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1827
		DRM_DEBUG_KMS("PSR not supported on this platform\n");
1824
		return;
1828
		return;
1825
	}
1829
	}
1826
 
1830
 
1827
	if (!is_edp_psr(intel_dp)) {
1831
	if (!is_edp_psr(intel_dp)) {
1828
		DRM_DEBUG_KMS("PSR not supported by this panel\n");
1832
		DRM_DEBUG_KMS("PSR not supported by this panel\n");
1829
		return;
1833
		return;
1830
	}
1834
	}
1831
 
1835
 
1832
	mutex_lock(&dev_priv->psr.lock);
1836
	mutex_lock(&dev_priv->psr.lock);
1833
	if (dev_priv->psr.enabled) {
1837
	if (dev_priv->psr.enabled) {
1834
		DRM_DEBUG_KMS("PSR already in use\n");
1838
		DRM_DEBUG_KMS("PSR already in use\n");
1835
		mutex_unlock(&dev_priv->psr.lock);
1839
		mutex_unlock(&dev_priv->psr.lock);
1836
		return;
1840
		return;
1837
	}
1841
	}
1838
 
1842
 
1839
	dev_priv->psr.busy_frontbuffer_bits = 0;
1843
	dev_priv->psr.busy_frontbuffer_bits = 0;
1840
 
1844
 
1841
	/* Setup PSR once */
1845
	/* Setup PSR once */
1842
	intel_edp_psr_setup(intel_dp);
1846
	intel_edp_psr_setup(intel_dp);
1843
 
1847
 
1844
	if (intel_edp_psr_match_conditions(intel_dp))
1848
	if (intel_edp_psr_match_conditions(intel_dp))
1845
		dev_priv->psr.enabled = intel_dp;
1849
		dev_priv->psr.enabled = intel_dp;
1846
	mutex_unlock(&dev_priv->psr.lock);
1850
	mutex_unlock(&dev_priv->psr.lock);
1847
}
1851
}
1848
 
1852
 
1849
void intel_edp_psr_disable(struct intel_dp *intel_dp)
1853
void intel_edp_psr_disable(struct intel_dp *intel_dp)
1850
{
1854
{
1851
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1855
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
1852
	struct drm_i915_private *dev_priv = dev->dev_private;
1856
	struct drm_i915_private *dev_priv = dev->dev_private;
1853
 
1857
 
1854
	mutex_lock(&dev_priv->psr.lock);
1858
	mutex_lock(&dev_priv->psr.lock);
1855
	if (!dev_priv->psr.enabled) {
1859
	if (!dev_priv->psr.enabled) {
1856
		mutex_unlock(&dev_priv->psr.lock);
1860
		mutex_unlock(&dev_priv->psr.lock);
1857
		return;
1861
		return;
1858
	}
1862
	}
1859
 
1863
 
1860
	if (dev_priv->psr.active) {
1864
	if (dev_priv->psr.active) {
1861
	I915_WRITE(EDP_PSR_CTL(dev),
1865
	I915_WRITE(EDP_PSR_CTL(dev),
1862
		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1866
		   I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1863
 
1867
 
1864
	/* Wait till PSR is idle */
1868
	/* Wait till PSR is idle */
1865
	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1869
	if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1866
		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1870
		       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1867
		DRM_ERROR("Timed out waiting for PSR Idle State\n");
1871
		DRM_ERROR("Timed out waiting for PSR Idle State\n");
1868
 
1872
 
1869
		dev_priv->psr.active = false;
1873
		dev_priv->psr.active = false;
1870
	} else {
1874
	} else {
1871
		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1875
		WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
1872
	}
1876
	}
1873
 
1877
 
1874
	dev_priv->psr.enabled = NULL;
1878
	dev_priv->psr.enabled = NULL;
1875
	mutex_unlock(&dev_priv->psr.lock);
1879
	mutex_unlock(&dev_priv->psr.lock);
1876
 
1880
 
1877
	cancel_delayed_work_sync(&dev_priv->psr.work);
1881
	cancel_delayed_work_sync(&dev_priv->psr.work);
1878
}
1882
}
1879
 
1883
 
1880
static void intel_edp_psr_work(struct work_struct *work)
1884
static void intel_edp_psr_work(struct work_struct *work)
1881
{
1885
{
1882
	struct drm_i915_private *dev_priv =
1886
	struct drm_i915_private *dev_priv =
1883
		container_of(work, typeof(*dev_priv), psr.work.work);
1887
		container_of(work, typeof(*dev_priv), psr.work.work);
1884
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
1888
	struct intel_dp *intel_dp = dev_priv->psr.enabled;
1885
 
1889
 
1886
	mutex_lock(&dev_priv->psr.lock);
1890
	mutex_lock(&dev_priv->psr.lock);
1887
	intel_dp = dev_priv->psr.enabled;
1891
	intel_dp = dev_priv->psr.enabled;
1888
 
1892
 
1889
	if (!intel_dp)
1893
	if (!intel_dp)
1890
		goto unlock;
1894
		goto unlock;
1891
 
1895
 
1892
	/*
1896
	/*
1893
	 * The delayed work can race with an invalidate hence we need to
1897
	 * The delayed work can race with an invalidate hence we need to
1894
	 * recheck. Since psr_flush first clears this and then reschedules we
1898
	 * recheck. Since psr_flush first clears this and then reschedules we
1895
	 * won't ever miss a flush when bailing out here.
1899
	 * won't ever miss a flush when bailing out here.
1896
	 */
1900
	 */
1897
	if (dev_priv->psr.busy_frontbuffer_bits)
1901
	if (dev_priv->psr.busy_frontbuffer_bits)
1898
		goto unlock;
1902
		goto unlock;
1899
 
1903
 
1900
	intel_edp_psr_do_enable(intel_dp);
1904
	intel_edp_psr_do_enable(intel_dp);
1901
unlock:
1905
unlock:
1902
	mutex_unlock(&dev_priv->psr.lock);
1906
	mutex_unlock(&dev_priv->psr.lock);
1903
}
1907
}
1904
 
1908
 
1905
static void intel_edp_psr_do_exit(struct drm_device *dev)
1909
static void intel_edp_psr_do_exit(struct drm_device *dev)
1906
{
1910
{
1907
	struct drm_i915_private *dev_priv = dev->dev_private;
1911
	struct drm_i915_private *dev_priv = dev->dev_private;
1908
 
1912
 
1909
	if (dev_priv->psr.active) {
1913
	if (dev_priv->psr.active) {
1910
		u32 val = I915_READ(EDP_PSR_CTL(dev));
1914
		u32 val = I915_READ(EDP_PSR_CTL(dev));
1911
 
1915
 
1912
		WARN_ON(!(val & EDP_PSR_ENABLE));
1916
		WARN_ON(!(val & EDP_PSR_ENABLE));
1913
 
1917
 
1914
		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
1918
		I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
1915
 
1919
 
1916
		dev_priv->psr.active = false;
1920
		dev_priv->psr.active = false;
1917
	}
1921
	}
1918
 
1922
 
1919
}
1923
}
1920
 
1924
 
1921
void intel_edp_psr_invalidate(struct drm_device *dev,
1925
void intel_edp_psr_invalidate(struct drm_device *dev,
1922
			      unsigned frontbuffer_bits)
1926
			      unsigned frontbuffer_bits)
1923
{
1927
{
1924
	struct drm_i915_private *dev_priv = dev->dev_private;
1928
	struct drm_i915_private *dev_priv = dev->dev_private;
1925
	struct drm_crtc *crtc;
1929
	struct drm_crtc *crtc;
1926
	enum pipe pipe;
1930
	enum pipe pipe;
1927
 
1931
 
1928
	mutex_lock(&dev_priv->psr.lock);
1932
	mutex_lock(&dev_priv->psr.lock);
1929
	if (!dev_priv->psr.enabled) {
1933
	if (!dev_priv->psr.enabled) {
1930
		mutex_unlock(&dev_priv->psr.lock);
1934
		mutex_unlock(&dev_priv->psr.lock);
1931
				return;
1935
				return;
1932
	}
1936
	}
1933
 
1937
 
1934
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1938
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1935
	pipe = to_intel_crtc(crtc)->pipe;
1939
	pipe = to_intel_crtc(crtc)->pipe;
1936
 
1940
 
1937
	intel_edp_psr_do_exit(dev);
1941
	intel_edp_psr_do_exit(dev);
1938
 
1942
 
1939
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1943
	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1940
 
1944
 
1941
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1945
	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1942
	mutex_unlock(&dev_priv->psr.lock);
1946
	mutex_unlock(&dev_priv->psr.lock);
1943
}
1947
}
1944
 
1948
 
1945
void intel_edp_psr_flush(struct drm_device *dev,
1949
void intel_edp_psr_flush(struct drm_device *dev,
1946
			 unsigned frontbuffer_bits)
1950
			 unsigned frontbuffer_bits)
1947
{
1951
{
1948
	struct drm_i915_private *dev_priv = dev->dev_private;
1952
	struct drm_i915_private *dev_priv = dev->dev_private;
1949
	struct drm_crtc *crtc;
1953
	struct drm_crtc *crtc;
1950
	enum pipe pipe;
1954
	enum pipe pipe;
1951
 
1955
 
1952
	mutex_lock(&dev_priv->psr.lock);
1956
	mutex_lock(&dev_priv->psr.lock);
1953
	if (!dev_priv->psr.enabled) {
1957
	if (!dev_priv->psr.enabled) {
1954
		mutex_unlock(&dev_priv->psr.lock);
1958
		mutex_unlock(&dev_priv->psr.lock);
1955
		return;
1959
		return;
1956
		}
1960
		}
1957
 
1961
 
1958
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1962
	crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1959
	pipe = to_intel_crtc(crtc)->pipe;
1963
	pipe = to_intel_crtc(crtc)->pipe;
1960
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1964
	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1961
 
1965
 
1962
	/*
1966
	/*
1963
	 * On Haswell sprite plane updates don't result in a psr invalidating
1967
	 * On Haswell sprite plane updates don't result in a psr invalidating
1964
	 * signal in the hardware. Which means we need to manually fake this in
1968
	 * signal in the hardware. Which means we need to manually fake this in
1965
	 * software for all flushes, not just when we've seen a preceding
1969
	 * software for all flushes, not just when we've seen a preceding
1966
	 * invalidation through frontbuffer rendering.
1970
	 * invalidation through frontbuffer rendering.
1967
	 */
1971
	 */
1968
	if (IS_HASWELL(dev) &&
1972
	if (IS_HASWELL(dev) &&
1969
	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
1973
	    (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
1970
		intel_edp_psr_do_exit(dev);
1974
		intel_edp_psr_do_exit(dev);
1971
 
1975
 
1972
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1976
	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1973
		schedule_delayed_work(&dev_priv->psr.work,
1977
		schedule_delayed_work(&dev_priv->psr.work,
1974
				      msecs_to_jiffies(100));
1978
				      msecs_to_jiffies(100));
1975
	mutex_unlock(&dev_priv->psr.lock);
1979
	mutex_unlock(&dev_priv->psr.lock);
1976
}
1980
}
1977
 
1981
 
1978
void intel_edp_psr_init(struct drm_device *dev)
1982
void intel_edp_psr_init(struct drm_device *dev)
1979
{
1983
{
1980
	struct drm_i915_private *dev_priv = dev->dev_private;
1984
	struct drm_i915_private *dev_priv = dev->dev_private;
1981
 
1985
 
1982
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
1986
	INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
1983
	mutex_init(&dev_priv->psr.lock);
1987
	mutex_init(&dev_priv->psr.lock);
1984
}
1988
}
1985
 
1989
 
1986
static void intel_disable_dp(struct intel_encoder *encoder)
1990
static void intel_disable_dp(struct intel_encoder *encoder)
1987
{
1991
{
1988
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1992
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1989
	enum port port = dp_to_dig_port(intel_dp)->port;
1993
	enum port port = dp_to_dig_port(intel_dp)->port;
1990
	struct drm_device *dev = encoder->base.dev;
1994
	struct drm_device *dev = encoder->base.dev;
1991
 
1995
 
1992
	/* Make sure the panel is off before trying to change the mode. But also
1996
	/* Make sure the panel is off before trying to change the mode. But also
1993
	 * ensure that we have vdd while we switch off the panel. */
1997
	 * ensure that we have vdd while we switch off the panel. */
1994
	intel_edp_panel_vdd_on(intel_dp);
1998
	intel_edp_panel_vdd_on(intel_dp);
1995
	intel_edp_backlight_off(intel_dp);
1999
	intel_edp_backlight_off(intel_dp);
1996
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2000
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1997
	intel_edp_panel_off(intel_dp);
2001
	intel_edp_panel_off(intel_dp);
1998
 
2002
 
1999
	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
2003
	/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
2000
	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
2004
	if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
2001
		intel_dp_link_down(intel_dp);
2005
		intel_dp_link_down(intel_dp);
2002
}
2006
}
2003
 
2007
 
2004
static void g4x_post_disable_dp(struct intel_encoder *encoder)
2008
static void g4x_post_disable_dp(struct intel_encoder *encoder)
2005
{
2009
{
2006
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2010
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2007
	enum port port = dp_to_dig_port(intel_dp)->port;
2011
	enum port port = dp_to_dig_port(intel_dp)->port;
2008
 
2012
 
2009
	if (port != PORT_A)
2013
	if (port != PORT_A)
2010
		return;
2014
		return;
2011
 
2015
 
2012
	intel_dp_link_down(intel_dp);
2016
	intel_dp_link_down(intel_dp);
2013
	ironlake_edp_pll_off(intel_dp);
2017
	ironlake_edp_pll_off(intel_dp);
2014
}
2018
}
2015
 
2019
 
2016
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2020
static void vlv_post_disable_dp(struct intel_encoder *encoder)
2017
{
2021
{
2018
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2022
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2019
 
2023
 
2020
	intel_dp_link_down(intel_dp);
2024
	intel_dp_link_down(intel_dp);
2021
}
2025
}
2022
 
2026
 
2023
static void chv_post_disable_dp(struct intel_encoder *encoder)
2027
static void chv_post_disable_dp(struct intel_encoder *encoder)
2024
{
2028
{
2025
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2029
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2026
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2030
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2027
	struct drm_device *dev = encoder->base.dev;
2031
	struct drm_device *dev = encoder->base.dev;
2028
	struct drm_i915_private *dev_priv = dev->dev_private;
2032
	struct drm_i915_private *dev_priv = dev->dev_private;
2029
	struct intel_crtc *intel_crtc =
2033
	struct intel_crtc *intel_crtc =
2030
		to_intel_crtc(encoder->base.crtc);
2034
		to_intel_crtc(encoder->base.crtc);
2031
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2035
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2032
	enum pipe pipe = intel_crtc->pipe;
2036
	enum pipe pipe = intel_crtc->pipe;
2033
	u32 val;
2037
	u32 val;
2034
 
2038
 
2035
		intel_dp_link_down(intel_dp);
2039
		intel_dp_link_down(intel_dp);
2036
 
2040
 
2037
	mutex_lock(&dev_priv->dpio_lock);
2041
	mutex_lock(&dev_priv->dpio_lock);
2038
 
2042
 
2039
	/* Propagate soft reset to data lane reset */
2043
	/* Propagate soft reset to data lane reset */
2040
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2044
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2041
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2045
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2042
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2046
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2043
 
2047
 
2044
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2048
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2045
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2049
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2046
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2050
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2047
 
2051
 
2048
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2052
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2049
	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2053
	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2050
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2054
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2051
 
2055
 
2052
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2056
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2053
	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2057
	val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2054
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2058
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2055
 
2059
 
2056
	mutex_unlock(&dev_priv->dpio_lock);
2060
	mutex_unlock(&dev_priv->dpio_lock);
2057
}
2061
}
2058
 
2062
 
2059
static void intel_enable_dp(struct intel_encoder *encoder)
2063
static void intel_enable_dp(struct intel_encoder *encoder)
2060
{
2064
{
2061
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2065
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2062
	struct drm_device *dev = encoder->base.dev;
2066
	struct drm_device *dev = encoder->base.dev;
2063
	struct drm_i915_private *dev_priv = dev->dev_private;
2067
	struct drm_i915_private *dev_priv = dev->dev_private;
2064
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2068
	uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2065
 
2069
 
2066
	if (WARN_ON(dp_reg & DP_PORT_EN))
2070
	if (WARN_ON(dp_reg & DP_PORT_EN))
2067
		return;
2071
		return;
2068
 
2072
 
2069
	intel_edp_panel_vdd_on(intel_dp);
2073
	intel_edp_panel_vdd_on(intel_dp);
2070
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2074
	intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2071
			intel_dp_start_link_train(intel_dp);
2075
			intel_dp_start_link_train(intel_dp);
2072
	intel_edp_panel_on(intel_dp);
2076
	intel_edp_panel_on(intel_dp);
2073
	edp_panel_vdd_off(intel_dp, true);
2077
	edp_panel_vdd_off(intel_dp, true);
2074
			intel_dp_complete_link_train(intel_dp);
2078
			intel_dp_complete_link_train(intel_dp);
2075
	intel_dp_stop_link_train(intel_dp);
2079
	intel_dp_stop_link_train(intel_dp);
2076
}
2080
}
2077
 
2081
 
2078
static void g4x_enable_dp(struct intel_encoder *encoder)
2082
static void g4x_enable_dp(struct intel_encoder *encoder)
2079
{
2083
{
2080
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2084
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2081
 
2085
 
2082
	intel_enable_dp(encoder);
2086
	intel_enable_dp(encoder);
2083
	intel_edp_backlight_on(intel_dp);
2087
	intel_edp_backlight_on(intel_dp);
2084
}
2088
}
2085
 
2089
 
2086
static void vlv_enable_dp(struct intel_encoder *encoder)
2090
static void vlv_enable_dp(struct intel_encoder *encoder)
2087
{
2091
{
2088
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2092
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2089
 
2093
 
2090
	intel_edp_backlight_on(intel_dp);
2094
	intel_edp_backlight_on(intel_dp);
2091
}
2095
}
2092
 
2096
 
2093
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2097
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2094
{
2098
{
2095
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2099
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2096
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2100
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2097
 
2101
 
2098
	intel_dp_prepare(encoder);
2102
	intel_dp_prepare(encoder);
2099
 
2103
 
2100
	/* Only ilk+ has port A */
2104
	/* Only ilk+ has port A */
2101
	if (dport->port == PORT_A) {
2105
	if (dport->port == PORT_A) {
2102
		ironlake_set_pll_cpu_edp(intel_dp);
2106
		ironlake_set_pll_cpu_edp(intel_dp);
2103
		ironlake_edp_pll_on(intel_dp);
2107
		ironlake_edp_pll_on(intel_dp);
2104
	}
2108
	}
2105
}
2109
}
2106
 
2110
 
2107
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2111
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2108
{
2112
{
2109
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2113
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2110
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2114
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2111
	struct drm_device *dev = encoder->base.dev;
2115
	struct drm_device *dev = encoder->base.dev;
2112
	struct drm_i915_private *dev_priv = dev->dev_private;
2116
	struct drm_i915_private *dev_priv = dev->dev_private;
2113
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2117
	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2114
	enum dpio_channel port = vlv_dport_to_channel(dport);
2118
	enum dpio_channel port = vlv_dport_to_channel(dport);
2115
		int pipe = intel_crtc->pipe;
2119
		int pipe = intel_crtc->pipe;
2116
	struct edp_power_seq power_seq;
2120
	struct edp_power_seq power_seq;
2117
		u32 val;
2121
		u32 val;
2118
 
2122
 
2119
	mutex_lock(&dev_priv->dpio_lock);
2123
	mutex_lock(&dev_priv->dpio_lock);
2120
 
2124
 
2121
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2125
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2122
		val = 0;
2126
		val = 0;
2123
		if (pipe)
2127
		if (pipe)
2124
			val |= (1<<21);
2128
			val |= (1<<21);
2125
		else
2129
		else
2126
			val &= ~(1<<21);
2130
			val &= ~(1<<21);
2127
		val |= 0x001000c4;
2131
		val |= 0x001000c4;
2128
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2132
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2129
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2133
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2130
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2134
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2131
 
2135
 
2132
	mutex_unlock(&dev_priv->dpio_lock);
2136
	mutex_unlock(&dev_priv->dpio_lock);
2133
 
2137
 
2134
	if (is_edp(intel_dp)) {
2138
	if (is_edp(intel_dp)) {
2135
	/* init power sequencer on this pipe and port */
2139
	/* init power sequencer on this pipe and port */
2136
	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2140
	intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2137
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2141
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2138
						      &power_seq);
2142
						      &power_seq);
2139
	}
2143
	}
2140
 
2144
 
2141
	intel_enable_dp(encoder);
2145
	intel_enable_dp(encoder);
2142
 
2146
 
2143
	vlv_wait_port_ready(dev_priv, dport);
2147
	vlv_wait_port_ready(dev_priv, dport);
2144
}
2148
}
2145
 
2149
 
2146
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2150
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2147
{
2151
{
2148
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2152
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2149
	struct drm_device *dev = encoder->base.dev;
2153
	struct drm_device *dev = encoder->base.dev;
2150
	struct drm_i915_private *dev_priv = dev->dev_private;
2154
	struct drm_i915_private *dev_priv = dev->dev_private;
2151
	struct intel_crtc *intel_crtc =
2155
	struct intel_crtc *intel_crtc =
2152
		to_intel_crtc(encoder->base.crtc);
2156
		to_intel_crtc(encoder->base.crtc);
2153
	enum dpio_channel port = vlv_dport_to_channel(dport);
2157
	enum dpio_channel port = vlv_dport_to_channel(dport);
2154
	int pipe = intel_crtc->pipe;
2158
	int pipe = intel_crtc->pipe;
2155
 
2159
 
2156
	intel_dp_prepare(encoder);
2160
	intel_dp_prepare(encoder);
2157
 
2161
 
2158
	/* Program Tx lane resets to default */
2162
	/* Program Tx lane resets to default */
2159
	mutex_lock(&dev_priv->dpio_lock);
2163
	mutex_lock(&dev_priv->dpio_lock);
2160
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2164
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2161
			 DPIO_PCS_TX_LANE2_RESET |
2165
			 DPIO_PCS_TX_LANE2_RESET |
2162
			 DPIO_PCS_TX_LANE1_RESET);
2166
			 DPIO_PCS_TX_LANE1_RESET);
2163
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2167
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2164
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2168
			 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2165
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2169
			 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2166
			 (1<
2170
			 (1<
2167
				 DPIO_PCS_CLK_SOFT_RESET);
2171
				 DPIO_PCS_CLK_SOFT_RESET);
2168
 
2172
 
2169
	/* Fix up inter-pair skew failure */
2173
	/* Fix up inter-pair skew failure */
2170
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2174
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2171
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2175
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2172
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2176
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2173
	mutex_unlock(&dev_priv->dpio_lock);
2177
	mutex_unlock(&dev_priv->dpio_lock);
2174
}
2178
}
2175
 
2179
 
2176
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2180
static void chv_pre_enable_dp(struct intel_encoder *encoder)
2177
{
2181
{
2178
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2182
	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2179
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2183
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2180
	struct drm_device *dev = encoder->base.dev;
2184
	struct drm_device *dev = encoder->base.dev;
2181
	struct drm_i915_private *dev_priv = dev->dev_private;
2185
	struct drm_i915_private *dev_priv = dev->dev_private;
2182
	struct edp_power_seq power_seq;
2186
	struct edp_power_seq power_seq;
2183
	struct intel_crtc *intel_crtc =
2187
	struct intel_crtc *intel_crtc =
2184
		to_intel_crtc(encoder->base.crtc);
2188
		to_intel_crtc(encoder->base.crtc);
2185
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2189
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2186
	int pipe = intel_crtc->pipe;
2190
	int pipe = intel_crtc->pipe;
2187
	int data, i;
2191
	int data, i;
2188
	u32 val;
2192
	u32 val;
2189
 
2193
 
2190
	mutex_lock(&dev_priv->dpio_lock);
2194
	mutex_lock(&dev_priv->dpio_lock);
2191
 
2195
 
2192
	/* Deassert soft data lane reset*/
2196
	/* Deassert soft data lane reset*/
2193
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2197
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2194
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2198
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2195
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2199
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2196
 
2200
 
2197
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2201
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2198
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2202
	val |= CHV_PCS_REQ_SOFTRESET_EN;
2199
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2203
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2200
 
2204
 
2201
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2205
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2202
	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2206
	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2203
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2207
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2204
 
2208
 
2205
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2209
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2206
	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2210
	val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2207
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2211
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2208
 
2212
 
2209
	/* Program Tx lane latency optimal setting*/
2213
	/* Program Tx lane latency optimal setting*/
2210
	for (i = 0; i < 4; i++) {
2214
	for (i = 0; i < 4; i++) {
2211
		/* Set the latency optimal bit */
2215
		/* Set the latency optimal bit */
2212
		data = (i == 1) ? 0x0 : 0x6;
2216
		data = (i == 1) ? 0x0 : 0x6;
2213
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2217
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2214
				data << DPIO_FRC_LATENCY_SHFIT);
2218
				data << DPIO_FRC_LATENCY_SHFIT);
2215
 
2219
 
2216
		/* Set the upar bit */
2220
		/* Set the upar bit */
2217
		data = (i == 1) ? 0x0 : 0x1;
2221
		data = (i == 1) ? 0x0 : 0x1;
2218
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2222
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2219
				data << DPIO_UPAR_SHIFT);
2223
				data << DPIO_UPAR_SHIFT);
2220
	}
2224
	}
2221
 
2225
 
2222
	/* Data lane stagger programming */
2226
	/* Data lane stagger programming */
2223
	/* FIXME: Fix up value only after power analysis */
2227
	/* FIXME: Fix up value only after power analysis */
2224
 
2228
 
2225
	mutex_unlock(&dev_priv->dpio_lock);
2229
	mutex_unlock(&dev_priv->dpio_lock);
2226
 
2230
 
2227
	if (is_edp(intel_dp)) {
2231
	if (is_edp(intel_dp)) {
2228
		/* init power sequencer on this pipe and port */
2232
		/* init power sequencer on this pipe and port */
2229
		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2233
		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2230
		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2234
		intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2231
							      &power_seq);
2235
							      &power_seq);
2232
	}
2236
	}
2233
 
2237
 
2234
	intel_enable_dp(encoder);
2238
	intel_enable_dp(encoder);
2235
 
2239
 
2236
	vlv_wait_port_ready(dev_priv, dport);
2240
	vlv_wait_port_ready(dev_priv, dport);
2237
}
2241
}
2238
 
2242
 
2239
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2243
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2240
{
2244
{
2241
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2245
	struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2242
	struct drm_device *dev = encoder->base.dev;
2246
	struct drm_device *dev = encoder->base.dev;
2243
	struct drm_i915_private *dev_priv = dev->dev_private;
2247
	struct drm_i915_private *dev_priv = dev->dev_private;
2244
	struct intel_crtc *intel_crtc =
2248
	struct intel_crtc *intel_crtc =
2245
		to_intel_crtc(encoder->base.crtc);
2249
		to_intel_crtc(encoder->base.crtc);
2246
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2250
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2247
	enum pipe pipe = intel_crtc->pipe;
2251
	enum pipe pipe = intel_crtc->pipe;
2248
	u32 val;
2252
	u32 val;
2249
 
2253
 
2250
	mutex_lock(&dev_priv->dpio_lock);
2254
	mutex_lock(&dev_priv->dpio_lock);
2251
 
2255
 
2252
	/* program left/right clock distribution */
2256
	/* program left/right clock distribution */
2253
	if (pipe != PIPE_B) {
2257
	if (pipe != PIPE_B) {
2254
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2258
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2255
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2259
		val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2256
		if (ch == DPIO_CH0)
2260
		if (ch == DPIO_CH0)
2257
			val |= CHV_BUFLEFTENA1_FORCE;
2261
			val |= CHV_BUFLEFTENA1_FORCE;
2258
		if (ch == DPIO_CH1)
2262
		if (ch == DPIO_CH1)
2259
			val |= CHV_BUFRIGHTENA1_FORCE;
2263
			val |= CHV_BUFRIGHTENA1_FORCE;
2260
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2264
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2261
	} else {
2265
	} else {
2262
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2266
		val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2263
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2267
		val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2264
		if (ch == DPIO_CH0)
2268
		if (ch == DPIO_CH0)
2265
			val |= CHV_BUFLEFTENA2_FORCE;
2269
			val |= CHV_BUFLEFTENA2_FORCE;
2266
		if (ch == DPIO_CH1)
2270
		if (ch == DPIO_CH1)
2267
			val |= CHV_BUFRIGHTENA2_FORCE;
2271
			val |= CHV_BUFRIGHTENA2_FORCE;
2268
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2272
		vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2269
	}
2273
	}
2270
 
2274
 
2271
	/* program clock channel usage */
2275
	/* program clock channel usage */
2272
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2276
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2273
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2277
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2274
	if (pipe != PIPE_B)
2278
	if (pipe != PIPE_B)
2275
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2279
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2276
	else
2280
	else
2277
		val |= CHV_PCS_USEDCLKCHANNEL;
2281
		val |= CHV_PCS_USEDCLKCHANNEL;
2278
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2282
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2279
 
2283
 
2280
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2284
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2281
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2285
	val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2282
	if (pipe != PIPE_B)
2286
	if (pipe != PIPE_B)
2283
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2287
		val &= ~CHV_PCS_USEDCLKCHANNEL;
2284
	else
2288
	else
2285
		val |= CHV_PCS_USEDCLKCHANNEL;
2289
		val |= CHV_PCS_USEDCLKCHANNEL;
2286
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2290
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2287
 
2291
 
2288
	/*
2292
	/*
2289
	 * This a a bit weird since generally CL
2293
	 * This a a bit weird since generally CL
2290
	 * matches the pipe, but here we need to
2294
	 * matches the pipe, but here we need to
2291
	 * pick the CL based on the port.
2295
	 * pick the CL based on the port.
2292
	 */
2296
	 */
2293
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2297
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2294
	if (pipe != PIPE_B)
2298
	if (pipe != PIPE_B)
2295
		val &= ~CHV_CMN_USEDCLKCHANNEL;
2299
		val &= ~CHV_CMN_USEDCLKCHANNEL;
2296
	else
2300
	else
2297
		val |= CHV_CMN_USEDCLKCHANNEL;
2301
		val |= CHV_CMN_USEDCLKCHANNEL;
2298
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2302
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2299
 
2303
 
2300
	mutex_unlock(&dev_priv->dpio_lock);
2304
	mutex_unlock(&dev_priv->dpio_lock);
2301
}
2305
}
2302
 
2306
 
2303
/*
2307
/*
2304
 * Native read with retry for link status and receiver capability reads for
2308
 * Native read with retry for link status and receiver capability reads for
2305
 * cases where the sink may still be asleep.
2309
 * cases where the sink may still be asleep.
2306
 *
2310
 *
2307
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2311
 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2308
 * supposed to retry 3 times per the spec.
2312
 * supposed to retry 3 times per the spec.
2309
 */
2313
 */
2310
static ssize_t
2314
static ssize_t
2311
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2315
intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2312
			void *buffer, size_t size)
2316
			void *buffer, size_t size)
2313
{
2317
{
2314
	ssize_t ret;
2318
	ssize_t ret;
2315
	int i;
2319
	int i;
2316
 
2320
 
2317
	for (i = 0; i < 3; i++) {
2321
	for (i = 0; i < 3; i++) {
2318
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2322
		ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2319
		if (ret == size)
2323
		if (ret == size)
2320
			return ret;
2324
			return ret;
2321
		msleep(1);
2325
		msleep(1);
2322
	}
2326
	}
2323
 
2327
 
2324
	return ret;
2328
	return ret;
2325
}
2329
}
2326
 
2330
 
2327
/*
2331
/*
2328
 * Fetch AUX CH registers 0x202 - 0x207 which contain
2332
 * Fetch AUX CH registers 0x202 - 0x207 which contain
2329
 * link status information
2333
 * link status information
2330
 */
2334
 */
2331
static bool
2335
static bool
2332
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2336
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
2333
{
2337
{
2334
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
2338
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
2335
					      DP_LANE0_1_STATUS,
2339
					      DP_LANE0_1_STATUS,
2336
					      link_status,
2340
					      link_status,
2337
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2341
				       DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
2338
}
2342
}
2339
 
2343
 
2340
/* These are source-specific values. */
2344
/* These are source-specific values. */
2341
static uint8_t
2345
static uint8_t
2342
intel_dp_voltage_max(struct intel_dp *intel_dp)
2346
intel_dp_voltage_max(struct intel_dp *intel_dp)
2343
{
2347
{
2344
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2348
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2345
	enum port port = dp_to_dig_port(intel_dp)->port;
2349
	enum port port = dp_to_dig_port(intel_dp)->port;
2346
 
2350
 
2347
	if (IS_VALLEYVIEW(dev))
2351
	if (IS_VALLEYVIEW(dev))
2348
		return DP_TRAIN_VOLTAGE_SWING_1200;
2352
		return DP_TRAIN_VOLTAGE_SWING_1200;
2349
	else if (IS_GEN7(dev) && port == PORT_A)
2353
	else if (IS_GEN7(dev) && port == PORT_A)
2350
		return DP_TRAIN_VOLTAGE_SWING_800;
2354
		return DP_TRAIN_VOLTAGE_SWING_800;
2351
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2355
	else if (HAS_PCH_CPT(dev) && port != PORT_A)
2352
		return DP_TRAIN_VOLTAGE_SWING_1200;
2356
		return DP_TRAIN_VOLTAGE_SWING_1200;
2353
	else
2357
	else
2354
		return DP_TRAIN_VOLTAGE_SWING_800;
2358
		return DP_TRAIN_VOLTAGE_SWING_800;
2355
}
2359
}
2356
 
2360
 
2357
static uint8_t
2361
static uint8_t
2358
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2362
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2359
{
2363
{
2360
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2364
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2361
	enum port port = dp_to_dig_port(intel_dp)->port;
2365
	enum port port = dp_to_dig_port(intel_dp)->port;
2362
 
2366
 
2363
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2367
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2364
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2368
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2365
		case DP_TRAIN_VOLTAGE_SWING_400:
2369
		case DP_TRAIN_VOLTAGE_SWING_400:
2366
			return DP_TRAIN_PRE_EMPHASIS_9_5;
2370
			return DP_TRAIN_PRE_EMPHASIS_9_5;
2367
		case DP_TRAIN_VOLTAGE_SWING_600:
2371
		case DP_TRAIN_VOLTAGE_SWING_600:
2368
			return DP_TRAIN_PRE_EMPHASIS_6;
2372
			return DP_TRAIN_PRE_EMPHASIS_6;
2369
		case DP_TRAIN_VOLTAGE_SWING_800:
2373
		case DP_TRAIN_VOLTAGE_SWING_800:
2370
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2374
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2371
		case DP_TRAIN_VOLTAGE_SWING_1200:
2375
		case DP_TRAIN_VOLTAGE_SWING_1200:
2372
		default:
2376
		default:
2373
			return DP_TRAIN_PRE_EMPHASIS_0;
2377
			return DP_TRAIN_PRE_EMPHASIS_0;
2374
		}
2378
		}
2375
	} else if (IS_VALLEYVIEW(dev)) {
2379
	} else if (IS_VALLEYVIEW(dev)) {
2376
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2380
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2377
		case DP_TRAIN_VOLTAGE_SWING_400:
2381
		case DP_TRAIN_VOLTAGE_SWING_400:
2378
			return DP_TRAIN_PRE_EMPHASIS_9_5;
2382
			return DP_TRAIN_PRE_EMPHASIS_9_5;
2379
		case DP_TRAIN_VOLTAGE_SWING_600:
2383
		case DP_TRAIN_VOLTAGE_SWING_600:
2380
			return DP_TRAIN_PRE_EMPHASIS_6;
2384
			return DP_TRAIN_PRE_EMPHASIS_6;
2381
		case DP_TRAIN_VOLTAGE_SWING_800:
2385
		case DP_TRAIN_VOLTAGE_SWING_800:
2382
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2386
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2383
		case DP_TRAIN_VOLTAGE_SWING_1200:
2387
		case DP_TRAIN_VOLTAGE_SWING_1200:
2384
		default:
2388
		default:
2385
			return DP_TRAIN_PRE_EMPHASIS_0;
2389
			return DP_TRAIN_PRE_EMPHASIS_0;
2386
		}
2390
		}
2387
	} else if (IS_GEN7(dev) && port == PORT_A) {
2391
	} else if (IS_GEN7(dev) && port == PORT_A) {
2388
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2392
		switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2389
		case DP_TRAIN_VOLTAGE_SWING_400:
2393
		case DP_TRAIN_VOLTAGE_SWING_400:
2390
			return DP_TRAIN_PRE_EMPHASIS_6;
2394
			return DP_TRAIN_PRE_EMPHASIS_6;
2391
		case DP_TRAIN_VOLTAGE_SWING_600:
2395
		case DP_TRAIN_VOLTAGE_SWING_600:
2392
		case DP_TRAIN_VOLTAGE_SWING_800:
2396
		case DP_TRAIN_VOLTAGE_SWING_800:
2393
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2397
			return DP_TRAIN_PRE_EMPHASIS_3_5;
2394
		default:
2398
		default:
2395
			return DP_TRAIN_PRE_EMPHASIS_0;
2399
			return DP_TRAIN_PRE_EMPHASIS_0;
2396
		}
2400
		}
2397
	} else {
2401
	} else {
2398
	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2402
	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2399
	case DP_TRAIN_VOLTAGE_SWING_400:
2403
	case DP_TRAIN_VOLTAGE_SWING_400:
2400
		return DP_TRAIN_PRE_EMPHASIS_6;
2404
		return DP_TRAIN_PRE_EMPHASIS_6;
2401
	case DP_TRAIN_VOLTAGE_SWING_600:
2405
	case DP_TRAIN_VOLTAGE_SWING_600:
2402
		return DP_TRAIN_PRE_EMPHASIS_6;
2406
		return DP_TRAIN_PRE_EMPHASIS_6;
2403
	case DP_TRAIN_VOLTAGE_SWING_800:
2407
	case DP_TRAIN_VOLTAGE_SWING_800:
2404
		return DP_TRAIN_PRE_EMPHASIS_3_5;
2408
		return DP_TRAIN_PRE_EMPHASIS_3_5;
2405
	case DP_TRAIN_VOLTAGE_SWING_1200:
2409
	case DP_TRAIN_VOLTAGE_SWING_1200:
2406
	default:
2410
	default:
2407
		return DP_TRAIN_PRE_EMPHASIS_0;
2411
		return DP_TRAIN_PRE_EMPHASIS_0;
2408
	}
2412
	}
2409
	}
2413
	}
2410
}
2414
}
2411
 
2415
 
2412
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2416
static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2413
{
2417
{
2414
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2418
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2415
	struct drm_i915_private *dev_priv = dev->dev_private;
2419
	struct drm_i915_private *dev_priv = dev->dev_private;
2416
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2420
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2417
	struct intel_crtc *intel_crtc =
2421
	struct intel_crtc *intel_crtc =
2418
		to_intel_crtc(dport->base.base.crtc);
2422
		to_intel_crtc(dport->base.base.crtc);
2419
	unsigned long demph_reg_value, preemph_reg_value,
2423
	unsigned long demph_reg_value, preemph_reg_value,
2420
		uniqtranscale_reg_value;
2424
		uniqtranscale_reg_value;
2421
	uint8_t train_set = intel_dp->train_set[0];
2425
	uint8_t train_set = intel_dp->train_set[0];
2422
	enum dpio_channel port = vlv_dport_to_channel(dport);
2426
	enum dpio_channel port = vlv_dport_to_channel(dport);
2423
	int pipe = intel_crtc->pipe;
2427
	int pipe = intel_crtc->pipe;
2424
 
2428
 
2425
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2429
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2426
	case DP_TRAIN_PRE_EMPHASIS_0:
2430
	case DP_TRAIN_PRE_EMPHASIS_0:
2427
		preemph_reg_value = 0x0004000;
2431
		preemph_reg_value = 0x0004000;
2428
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2432
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2429
		case DP_TRAIN_VOLTAGE_SWING_400:
2433
		case DP_TRAIN_VOLTAGE_SWING_400:
2430
			demph_reg_value = 0x2B405555;
2434
			demph_reg_value = 0x2B405555;
2431
			uniqtranscale_reg_value = 0x552AB83A;
2435
			uniqtranscale_reg_value = 0x552AB83A;
2432
			break;
2436
			break;
2433
		case DP_TRAIN_VOLTAGE_SWING_600:
2437
		case DP_TRAIN_VOLTAGE_SWING_600:
2434
			demph_reg_value = 0x2B404040;
2438
			demph_reg_value = 0x2B404040;
2435
			uniqtranscale_reg_value = 0x5548B83A;
2439
			uniqtranscale_reg_value = 0x5548B83A;
2436
			break;
2440
			break;
2437
		case DP_TRAIN_VOLTAGE_SWING_800:
2441
		case DP_TRAIN_VOLTAGE_SWING_800:
2438
			demph_reg_value = 0x2B245555;
2442
			demph_reg_value = 0x2B245555;
2439
			uniqtranscale_reg_value = 0x5560B83A;
2443
			uniqtranscale_reg_value = 0x5560B83A;
2440
			break;
2444
			break;
2441
		case DP_TRAIN_VOLTAGE_SWING_1200:
2445
		case DP_TRAIN_VOLTAGE_SWING_1200:
2442
			demph_reg_value = 0x2B405555;
2446
			demph_reg_value = 0x2B405555;
2443
			uniqtranscale_reg_value = 0x5598DA3A;
2447
			uniqtranscale_reg_value = 0x5598DA3A;
2444
			break;
2448
			break;
2445
		default:
2449
		default:
2446
			return 0;
2450
			return 0;
2447
		}
2451
		}
2448
		break;
2452
		break;
2449
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2453
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2450
		preemph_reg_value = 0x0002000;
2454
		preemph_reg_value = 0x0002000;
2451
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2455
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2452
		case DP_TRAIN_VOLTAGE_SWING_400:
2456
		case DP_TRAIN_VOLTAGE_SWING_400:
2453
			demph_reg_value = 0x2B404040;
2457
			demph_reg_value = 0x2B404040;
2454
			uniqtranscale_reg_value = 0x5552B83A;
2458
			uniqtranscale_reg_value = 0x5552B83A;
2455
			break;
2459
			break;
2456
		case DP_TRAIN_VOLTAGE_SWING_600:
2460
		case DP_TRAIN_VOLTAGE_SWING_600:
2457
			demph_reg_value = 0x2B404848;
2461
			demph_reg_value = 0x2B404848;
2458
			uniqtranscale_reg_value = 0x5580B83A;
2462
			uniqtranscale_reg_value = 0x5580B83A;
2459
			break;
2463
			break;
2460
		case DP_TRAIN_VOLTAGE_SWING_800:
2464
		case DP_TRAIN_VOLTAGE_SWING_800:
2461
			demph_reg_value = 0x2B404040;
2465
			demph_reg_value = 0x2B404040;
2462
			uniqtranscale_reg_value = 0x55ADDA3A;
2466
			uniqtranscale_reg_value = 0x55ADDA3A;
2463
			break;
2467
			break;
2464
		default:
2468
		default:
2465
			return 0;
2469
			return 0;
2466
		}
2470
		}
2467
		break;
2471
		break;
2468
	case DP_TRAIN_PRE_EMPHASIS_6:
2472
	case DP_TRAIN_PRE_EMPHASIS_6:
2469
		preemph_reg_value = 0x0000000;
2473
		preemph_reg_value = 0x0000000;
2470
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2474
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2471
		case DP_TRAIN_VOLTAGE_SWING_400:
2475
		case DP_TRAIN_VOLTAGE_SWING_400:
2472
			demph_reg_value = 0x2B305555;
2476
			demph_reg_value = 0x2B305555;
2473
			uniqtranscale_reg_value = 0x5570B83A;
2477
			uniqtranscale_reg_value = 0x5570B83A;
2474
			break;
2478
			break;
2475
		case DP_TRAIN_VOLTAGE_SWING_600:
2479
		case DP_TRAIN_VOLTAGE_SWING_600:
2476
			demph_reg_value = 0x2B2B4040;
2480
			demph_reg_value = 0x2B2B4040;
2477
			uniqtranscale_reg_value = 0x55ADDA3A;
2481
			uniqtranscale_reg_value = 0x55ADDA3A;
2478
			break;
2482
			break;
2479
		default:
2483
		default:
2480
			return 0;
2484
			return 0;
2481
		}
2485
		}
2482
		break;
2486
		break;
2483
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2487
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2484
		preemph_reg_value = 0x0006000;
2488
		preemph_reg_value = 0x0006000;
2485
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2489
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2486
		case DP_TRAIN_VOLTAGE_SWING_400:
2490
		case DP_TRAIN_VOLTAGE_SWING_400:
2487
			demph_reg_value = 0x1B405555;
2491
			demph_reg_value = 0x1B405555;
2488
			uniqtranscale_reg_value = 0x55ADDA3A;
2492
			uniqtranscale_reg_value = 0x55ADDA3A;
2489
			break;
2493
			break;
2490
		default:
2494
		default:
2491
			return 0;
2495
			return 0;
2492
		}
2496
		}
2493
		break;
2497
		break;
2494
	default:
2498
	default:
2495
		return 0;
2499
		return 0;
2496
	}
2500
	}
2497
 
2501
 
2498
	mutex_lock(&dev_priv->dpio_lock);
2502
	mutex_lock(&dev_priv->dpio_lock);
2499
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2503
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2500
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2504
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2501
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2505
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
2502
			 uniqtranscale_reg_value);
2506
			 uniqtranscale_reg_value);
2503
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2507
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2504
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2508
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2505
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2509
	vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2506
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2510
	vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
2507
	mutex_unlock(&dev_priv->dpio_lock);
2511
	mutex_unlock(&dev_priv->dpio_lock);
2508
 
2512
 
2509
	return 0;
2513
	return 0;
2510
}
2514
}
2511
 
2515
 
2512
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2516
static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2513
{
2517
{
2514
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2518
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
2515
	struct drm_i915_private *dev_priv = dev->dev_private;
2519
	struct drm_i915_private *dev_priv = dev->dev_private;
2516
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2520
	struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2517
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2521
	struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
2518
	u32 deemph_reg_value, margin_reg_value, val;
2522
	u32 deemph_reg_value, margin_reg_value, val;
2519
	uint8_t train_set = intel_dp->train_set[0];
2523
	uint8_t train_set = intel_dp->train_set[0];
2520
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2524
	enum dpio_channel ch = vlv_dport_to_channel(dport);
2521
	enum pipe pipe = intel_crtc->pipe;
2525
	enum pipe pipe = intel_crtc->pipe;
2522
	int i;
2526
	int i;
2523
 
2527
 
2524
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2528
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2525
	case DP_TRAIN_PRE_EMPHASIS_0:
2529
	case DP_TRAIN_PRE_EMPHASIS_0:
2526
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2530
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2527
		case DP_TRAIN_VOLTAGE_SWING_400:
2531
		case DP_TRAIN_VOLTAGE_SWING_400:
2528
			deemph_reg_value = 128;
2532
			deemph_reg_value = 128;
2529
			margin_reg_value = 52;
2533
			margin_reg_value = 52;
2530
			break;
2534
			break;
2531
		case DP_TRAIN_VOLTAGE_SWING_600:
2535
		case DP_TRAIN_VOLTAGE_SWING_600:
2532
			deemph_reg_value = 128;
2536
			deemph_reg_value = 128;
2533
			margin_reg_value = 77;
2537
			margin_reg_value = 77;
2534
			break;
2538
			break;
2535
		case DP_TRAIN_VOLTAGE_SWING_800:
2539
		case DP_TRAIN_VOLTAGE_SWING_800:
2536
			deemph_reg_value = 128;
2540
			deemph_reg_value = 128;
2537
			margin_reg_value = 102;
2541
			margin_reg_value = 102;
2538
			break;
2542
			break;
2539
		case DP_TRAIN_VOLTAGE_SWING_1200:
2543
		case DP_TRAIN_VOLTAGE_SWING_1200:
2540
			deemph_reg_value = 128;
2544
			deemph_reg_value = 128;
2541
			margin_reg_value = 154;
2545
			margin_reg_value = 154;
2542
			/* FIXME extra to set for 1200 */
2546
			/* FIXME extra to set for 1200 */
2543
			break;
2547
			break;
2544
		default:
2548
		default:
2545
			return 0;
2549
			return 0;
2546
		}
2550
		}
2547
		break;
2551
		break;
2548
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2552
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2549
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2553
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2550
		case DP_TRAIN_VOLTAGE_SWING_400:
2554
		case DP_TRAIN_VOLTAGE_SWING_400:
2551
			deemph_reg_value = 85;
2555
			deemph_reg_value = 85;
2552
			margin_reg_value = 78;
2556
			margin_reg_value = 78;
2553
			break;
2557
			break;
2554
		case DP_TRAIN_VOLTAGE_SWING_600:
2558
		case DP_TRAIN_VOLTAGE_SWING_600:
2555
			deemph_reg_value = 85;
2559
			deemph_reg_value = 85;
2556
			margin_reg_value = 116;
2560
			margin_reg_value = 116;
2557
			break;
2561
			break;
2558
		case DP_TRAIN_VOLTAGE_SWING_800:
2562
		case DP_TRAIN_VOLTAGE_SWING_800:
2559
			deemph_reg_value = 85;
2563
			deemph_reg_value = 85;
2560
			margin_reg_value = 154;
2564
			margin_reg_value = 154;
2561
			break;
2565
			break;
2562
		default:
2566
		default:
2563
			return 0;
2567
			return 0;
2564
		}
2568
		}
2565
		break;
2569
		break;
2566
	case DP_TRAIN_PRE_EMPHASIS_6:
2570
	case DP_TRAIN_PRE_EMPHASIS_6:
2567
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2571
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2568
		case DP_TRAIN_VOLTAGE_SWING_400:
2572
		case DP_TRAIN_VOLTAGE_SWING_400:
2569
			deemph_reg_value = 64;
2573
			deemph_reg_value = 64;
2570
			margin_reg_value = 104;
2574
			margin_reg_value = 104;
2571
			break;
2575
			break;
2572
		case DP_TRAIN_VOLTAGE_SWING_600:
2576
		case DP_TRAIN_VOLTAGE_SWING_600:
2573
			deemph_reg_value = 64;
2577
			deemph_reg_value = 64;
2574
			margin_reg_value = 154;
2578
			margin_reg_value = 154;
2575
			break;
2579
			break;
2576
		default:
2580
		default:
2577
			return 0;
2581
			return 0;
2578
		}
2582
		}
2579
		break;
2583
		break;
2580
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2584
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2581
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2585
		switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2582
		case DP_TRAIN_VOLTAGE_SWING_400:
2586
		case DP_TRAIN_VOLTAGE_SWING_400:
2583
			deemph_reg_value = 43;
2587
			deemph_reg_value = 43;
2584
			margin_reg_value = 154;
2588
			margin_reg_value = 154;
2585
			break;
2589
			break;
2586
		default:
2590
		default:
2587
			return 0;
2591
			return 0;
2588
		}
2592
		}
2589
		break;
2593
		break;
2590
	default:
2594
	default:
2591
		return 0;
2595
		return 0;
2592
	}
2596
	}
2593
 
2597
 
2594
	mutex_lock(&dev_priv->dpio_lock);
2598
	mutex_lock(&dev_priv->dpio_lock);
2595
 
2599
 
2596
	/* Clear calc init */
2600
	/* Clear calc init */
2597
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2601
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2598
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2602
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2599
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2603
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2600
 
2604
 
2601
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2605
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2602
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2606
	val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2603
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2607
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2604
 
2608
 
2605
	/* Program swing deemph */
2609
	/* Program swing deemph */
2606
	for (i = 0; i < 4; i++) {
2610
	for (i = 0; i < 4; i++) {
2607
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2611
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2608
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2612
		val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2609
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2613
		val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
2610
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2614
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2611
	}
2615
	}
2612
 
2616
 
2613
	/* Program swing margin */
2617
	/* Program swing margin */
2614
	for (i = 0; i < 4; i++) {
2618
	for (i = 0; i < 4; i++) {
2615
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2619
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2616
		val &= ~DPIO_SWING_MARGIN_MASK;
2620
		val &= ~DPIO_SWING_MARGIN_MASK;
2617
		val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2621
		val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
2618
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2622
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2619
	}
2623
	}
2620
 
2624
 
2621
	/* Disable unique transition scale */
2625
	/* Disable unique transition scale */
2622
	for (i = 0; i < 4; i++) {
2626
	for (i = 0; i < 4; i++) {
2623
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2627
		val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2624
		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2628
		val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2625
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2629
		vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2626
	}
2630
	}
2627
 
2631
 
2628
	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2632
	if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2629
			== DP_TRAIN_PRE_EMPHASIS_0) &&
2633
			== DP_TRAIN_PRE_EMPHASIS_0) &&
2630
		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2634
		((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2631
			== DP_TRAIN_VOLTAGE_SWING_1200)) {
2635
			== DP_TRAIN_VOLTAGE_SWING_1200)) {
2632
 
2636
 
2633
		/*
2637
		/*
2634
		 * The document said it needs to set bit 27 for ch0 and bit 26
2638
		 * The document said it needs to set bit 27 for ch0 and bit 26
2635
		 * for ch1. Might be a typo in the doc.
2639
		 * for ch1. Might be a typo in the doc.
2636
		 * For now, for this unique transition scale selection, set bit
2640
		 * For now, for this unique transition scale selection, set bit
2637
		 * 27 for ch0 and ch1.
2641
		 * 27 for ch0 and ch1.
2638
		 */
2642
		 */
2639
		for (i = 0; i < 4; i++) {
2643
		for (i = 0; i < 4; i++) {
2640
			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2644
			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2641
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2645
			val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
2642
			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2646
			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2643
		}
2647
		}
2644
 
2648
 
2645
		for (i = 0; i < 4; i++) {
2649
		for (i = 0; i < 4; i++) {
2646
			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2650
			val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2647
			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2651
			val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2648
			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2652
			val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2649
			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2653
			vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2650
		}
2654
		}
2651
	}
2655
	}
2652
 
2656
 
2653
	/* Start swing calculation */
2657
	/* Start swing calculation */
2654
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2658
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2655
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2659
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2656
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2660
	vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2657
 
2661
 
2658
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2662
	val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2659
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2663
	val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2660
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2664
	vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2661
 
2665
 
2662
	/* LRC Bypass */
2666
	/* LRC Bypass */
2663
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2667
	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
2664
	val |= DPIO_LRC_BYPASS;
2668
	val |= DPIO_LRC_BYPASS;
2665
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2669
	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
2666
 
2670
 
2667
	mutex_unlock(&dev_priv->dpio_lock);
2671
	mutex_unlock(&dev_priv->dpio_lock);
2668
 
2672
 
2669
	return 0;
2673
	return 0;
2670
}
2674
}
2671
 
2675
 
2672
static void
2676
static void
2673
intel_get_adjust_train(struct intel_dp *intel_dp,
2677
intel_get_adjust_train(struct intel_dp *intel_dp,
2674
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2678
		       const uint8_t link_status[DP_LINK_STATUS_SIZE])
2675
{
2679
{
2676
	uint8_t v = 0;
2680
	uint8_t v = 0;
2677
	uint8_t p = 0;
2681
	uint8_t p = 0;
2678
	int lane;
2682
	int lane;
2679
	uint8_t voltage_max;
2683
	uint8_t voltage_max;
2680
	uint8_t preemph_max;
2684
	uint8_t preemph_max;
2681
 
2685
 
2682
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2686
	for (lane = 0; lane < intel_dp->lane_count; lane++) {
2683
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2687
		uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
2684
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2688
		uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
2685
 
2689
 
2686
		if (this_v > v)
2690
		if (this_v > v)
2687
			v = this_v;
2691
			v = this_v;
2688
		if (this_p > p)
2692
		if (this_p > p)
2689
			p = this_p;
2693
			p = this_p;
2690
	}
2694
	}
2691
 
2695
 
2692
	voltage_max = intel_dp_voltage_max(intel_dp);
2696
	voltage_max = intel_dp_voltage_max(intel_dp);
2693
	if (v >= voltage_max)
2697
	if (v >= voltage_max)
2694
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2698
		v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
2695
 
2699
 
2696
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2700
	preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
2697
	if (p >= preemph_max)
2701
	if (p >= preemph_max)
2698
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2702
		p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
2699
 
2703
 
2700
	for (lane = 0; lane < 4; lane++)
2704
	for (lane = 0; lane < 4; lane++)
2701
		intel_dp->train_set[lane] = v | p;
2705
		intel_dp->train_set[lane] = v | p;
2702
}
2706
}
2703
 
2707
 
2704
static uint32_t
2708
static uint32_t
2705
intel_gen4_signal_levels(uint8_t train_set)
2709
intel_gen4_signal_levels(uint8_t train_set)
2706
{
2710
{
2707
	uint32_t	signal_levels = 0;
2711
	uint32_t	signal_levels = 0;
2708
 
2712
 
2709
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2713
	switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2710
	case DP_TRAIN_VOLTAGE_SWING_400:
2714
	case DP_TRAIN_VOLTAGE_SWING_400:
2711
	default:
2715
	default:
2712
		signal_levels |= DP_VOLTAGE_0_4;
2716
		signal_levels |= DP_VOLTAGE_0_4;
2713
		break;
2717
		break;
2714
	case DP_TRAIN_VOLTAGE_SWING_600:
2718
	case DP_TRAIN_VOLTAGE_SWING_600:
2715
		signal_levels |= DP_VOLTAGE_0_6;
2719
		signal_levels |= DP_VOLTAGE_0_6;
2716
		break;
2720
		break;
2717
	case DP_TRAIN_VOLTAGE_SWING_800:
2721
	case DP_TRAIN_VOLTAGE_SWING_800:
2718
		signal_levels |= DP_VOLTAGE_0_8;
2722
		signal_levels |= DP_VOLTAGE_0_8;
2719
		break;
2723
		break;
2720
	case DP_TRAIN_VOLTAGE_SWING_1200:
2724
	case DP_TRAIN_VOLTAGE_SWING_1200:
2721
		signal_levels |= DP_VOLTAGE_1_2;
2725
		signal_levels |= DP_VOLTAGE_1_2;
2722
		break;
2726
		break;
2723
	}
2727
	}
2724
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2728
	switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2725
	case DP_TRAIN_PRE_EMPHASIS_0:
2729
	case DP_TRAIN_PRE_EMPHASIS_0:
2726
	default:
2730
	default:
2727
		signal_levels |= DP_PRE_EMPHASIS_0;
2731
		signal_levels |= DP_PRE_EMPHASIS_0;
2728
		break;
2732
		break;
2729
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2733
	case DP_TRAIN_PRE_EMPHASIS_3_5:
2730
		signal_levels |= DP_PRE_EMPHASIS_3_5;
2734
		signal_levels |= DP_PRE_EMPHASIS_3_5;
2731
		break;
2735
		break;
2732
	case DP_TRAIN_PRE_EMPHASIS_6:
2736
	case DP_TRAIN_PRE_EMPHASIS_6:
2733
		signal_levels |= DP_PRE_EMPHASIS_6;
2737
		signal_levels |= DP_PRE_EMPHASIS_6;
2734
		break;
2738
		break;
2735
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2739
	case DP_TRAIN_PRE_EMPHASIS_9_5:
2736
		signal_levels |= DP_PRE_EMPHASIS_9_5;
2740
		signal_levels |= DP_PRE_EMPHASIS_9_5;
2737
		break;
2741
		break;
2738
	}
2742
	}
2739
	return signal_levels;
2743
	return signal_levels;
2740
}
2744
}
2741
 
2745
 
2742
/* Gen6's DP voltage swing and pre-emphasis control */
2746
/* Gen6's DP voltage swing and pre-emphasis control */
2743
static uint32_t
2747
static uint32_t
2744
intel_gen6_edp_signal_levels(uint8_t train_set)
2748
intel_gen6_edp_signal_levels(uint8_t train_set)
2745
{
2749
{
2746
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2750
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2747
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2751
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2748
	switch (signal_levels) {
2752
	switch (signal_levels) {
2749
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2753
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2750
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2754
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2751
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2755
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2752
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2756
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2753
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2757
		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2754
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2758
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2755
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2759
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2756
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2760
		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2757
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2761
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2758
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2762
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2759
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2763
		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2760
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2764
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2761
	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2765
	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2762
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2766
		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2763
	default:
2767
	default:
2764
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2768
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2765
			      "0x%x\n", signal_levels);
2769
			      "0x%x\n", signal_levels);
2766
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2770
		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2767
	}
2771
	}
2768
}
2772
}
2769
 
2773
 
2770
/* Gen7's DP voltage swing and pre-emphasis control */
2774
/* Gen7's DP voltage swing and pre-emphasis control */
2771
static uint32_t
2775
static uint32_t
2772
intel_gen7_edp_signal_levels(uint8_t train_set)
2776
intel_gen7_edp_signal_levels(uint8_t train_set)
2773
{
2777
{
2774
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2778
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2775
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2779
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2776
	switch (signal_levels) {
2780
	switch (signal_levels) {
2777
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2781
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2778
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2782
		return EDP_LINK_TRAIN_400MV_0DB_IVB;
2779
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2783
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2780
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2784
		return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2781
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2785
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2782
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2786
		return EDP_LINK_TRAIN_400MV_6DB_IVB;
2783
 
2787
 
2784
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2788
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2785
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2789
		return EDP_LINK_TRAIN_600MV_0DB_IVB;
2786
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2790
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2787
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2791
		return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2788
 
2792
 
2789
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2793
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2790
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2794
		return EDP_LINK_TRAIN_800MV_0DB_IVB;
2791
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2795
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2792
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2796
		return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2793
 
2797
 
2794
	default:
2798
	default:
2795
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2799
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2796
			      "0x%x\n", signal_levels);
2800
			      "0x%x\n", signal_levels);
2797
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2801
		return EDP_LINK_TRAIN_500MV_0DB_IVB;
2798
	}
2802
	}
2799
}
2803
}
2800
 
2804
 
2801
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2805
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2802
static uint32_t
2806
static uint32_t
2803
intel_hsw_signal_levels(uint8_t train_set)
2807
intel_hsw_signal_levels(uint8_t train_set)
2804
{
2808
{
2805
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2809
	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2806
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2810
					 DP_TRAIN_PRE_EMPHASIS_MASK);
2807
	switch (signal_levels) {
2811
	switch (signal_levels) {
2808
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2812
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2809
		return DDI_BUF_EMP_400MV_0DB_HSW;
2813
		return DDI_BUF_EMP_400MV_0DB_HSW;
2810
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2814
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2811
		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2815
		return DDI_BUF_EMP_400MV_3_5DB_HSW;
2812
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2816
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2813
		return DDI_BUF_EMP_400MV_6DB_HSW;
2817
		return DDI_BUF_EMP_400MV_6DB_HSW;
2814
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2818
	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2815
		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2819
		return DDI_BUF_EMP_400MV_9_5DB_HSW;
2816
 
2820
 
2817
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2821
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2818
		return DDI_BUF_EMP_600MV_0DB_HSW;
2822
		return DDI_BUF_EMP_600MV_0DB_HSW;
2819
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2823
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2820
		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2824
		return DDI_BUF_EMP_600MV_3_5DB_HSW;
2821
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2825
	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2822
		return DDI_BUF_EMP_600MV_6DB_HSW;
2826
		return DDI_BUF_EMP_600MV_6DB_HSW;
2823
 
2827
 
2824
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2828
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2825
		return DDI_BUF_EMP_800MV_0DB_HSW;
2829
		return DDI_BUF_EMP_800MV_0DB_HSW;
2826
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2830
	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2827
		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2831
		return DDI_BUF_EMP_800MV_3_5DB_HSW;
2828
	default:
2832
	default:
2829
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2833
		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2830
			      "0x%x\n", signal_levels);
2834
			      "0x%x\n", signal_levels);
2831
		return DDI_BUF_EMP_400MV_0DB_HSW;
2835
		return DDI_BUF_EMP_400MV_0DB_HSW;
2832
	}
2836
	}
2833
}
2837
}
2834
 
2838
 
2835
/* Properly updates "DP" with the correct signal levels. */
2839
/* Properly updates "DP" with the correct signal levels. */
2836
static void
2840
static void
2837
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2841
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2838
{
2842
{
2839
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2843
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2840
	enum port port = intel_dig_port->port;
2844
	enum port port = intel_dig_port->port;
2841
	struct drm_device *dev = intel_dig_port->base.base.dev;
2845
	struct drm_device *dev = intel_dig_port->base.base.dev;
2842
	uint32_t signal_levels, mask;
2846
	uint32_t signal_levels, mask;
2843
	uint8_t train_set = intel_dp->train_set[0];
2847
	uint8_t train_set = intel_dp->train_set[0];
2844
 
2848
 
2845
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2849
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2846
		signal_levels = intel_hsw_signal_levels(train_set);
2850
		signal_levels = intel_hsw_signal_levels(train_set);
2847
		mask = DDI_BUF_EMP_MASK;
2851
		mask = DDI_BUF_EMP_MASK;
2848
	} else if (IS_CHERRYVIEW(dev)) {
2852
	} else if (IS_CHERRYVIEW(dev)) {
2849
		signal_levels = intel_chv_signal_levels(intel_dp);
2853
		signal_levels = intel_chv_signal_levels(intel_dp);
2850
		mask = 0;
2854
		mask = 0;
2851
	} else if (IS_VALLEYVIEW(dev)) {
2855
	} else if (IS_VALLEYVIEW(dev)) {
2852
		signal_levels = intel_vlv_signal_levels(intel_dp);
2856
		signal_levels = intel_vlv_signal_levels(intel_dp);
2853
		mask = 0;
2857
		mask = 0;
2854
	} else if (IS_GEN7(dev) && port == PORT_A) {
2858
	} else if (IS_GEN7(dev) && port == PORT_A) {
2855
		signal_levels = intel_gen7_edp_signal_levels(train_set);
2859
		signal_levels = intel_gen7_edp_signal_levels(train_set);
2856
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2860
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
2857
	} else if (IS_GEN6(dev) && port == PORT_A) {
2861
	} else if (IS_GEN6(dev) && port == PORT_A) {
2858
		signal_levels = intel_gen6_edp_signal_levels(train_set);
2862
		signal_levels = intel_gen6_edp_signal_levels(train_set);
2859
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2863
		mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2860
	} else {
2864
	} else {
2861
		signal_levels = intel_gen4_signal_levels(train_set);
2865
		signal_levels = intel_gen4_signal_levels(train_set);
2862
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2866
		mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2863
	}
2867
	}
2864
 
2868
 
2865
	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2869
	DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2866
 
2870
 
2867
	*DP = (*DP & ~mask) | signal_levels;
2871
	*DP = (*DP & ~mask) | signal_levels;
2868
}
2872
}
2869
 
2873
 
2870
static bool
2874
static bool
2871
intel_dp_set_link_train(struct intel_dp *intel_dp,
2875
intel_dp_set_link_train(struct intel_dp *intel_dp,
2872
			uint32_t *DP,
2876
			uint32_t *DP,
2873
			uint8_t dp_train_pat)
2877
			uint8_t dp_train_pat)
2874
{
2878
{
2875
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2879
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2876
	struct drm_device *dev = intel_dig_port->base.base.dev;
2880
	struct drm_device *dev = intel_dig_port->base.base.dev;
2877
	struct drm_i915_private *dev_priv = dev->dev_private;
2881
	struct drm_i915_private *dev_priv = dev->dev_private;
2878
	enum port port = intel_dig_port->port;
2882
	enum port port = intel_dig_port->port;
2879
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
2883
	uint8_t buf[sizeof(intel_dp->train_set) + 1];
2880
	int ret, len;
2884
	int ret, len;
2881
 
2885
 
2882
	if (HAS_DDI(dev)) {
2886
	if (HAS_DDI(dev)) {
2883
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2887
		uint32_t temp = I915_READ(DP_TP_CTL(port));
2884
 
2888
 
2885
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2889
		if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2886
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2890
			temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2887
		else
2891
		else
2888
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2892
			temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2889
 
2893
 
2890
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2894
		temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2891
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2895
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2892
		case DP_TRAINING_PATTERN_DISABLE:
2896
		case DP_TRAINING_PATTERN_DISABLE:
2893
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2897
			temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2894
 
2898
 
2895
			break;
2899
			break;
2896
		case DP_TRAINING_PATTERN_1:
2900
		case DP_TRAINING_PATTERN_1:
2897
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2901
			temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2898
			break;
2902
			break;
2899
		case DP_TRAINING_PATTERN_2:
2903
		case DP_TRAINING_PATTERN_2:
2900
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2904
			temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2901
			break;
2905
			break;
2902
		case DP_TRAINING_PATTERN_3:
2906
		case DP_TRAINING_PATTERN_3:
2903
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2907
			temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2904
			break;
2908
			break;
2905
		}
2909
		}
2906
		I915_WRITE(DP_TP_CTL(port), temp);
2910
		I915_WRITE(DP_TP_CTL(port), temp);
2907
 
2911
 
2908
	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2912
	} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2909
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2913
		*DP &= ~DP_LINK_TRAIN_MASK_CPT;
2910
 
2914
 
2911
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2915
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2912
		case DP_TRAINING_PATTERN_DISABLE:
2916
		case DP_TRAINING_PATTERN_DISABLE:
2913
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2917
			*DP |= DP_LINK_TRAIN_OFF_CPT;
2914
			break;
2918
			break;
2915
		case DP_TRAINING_PATTERN_1:
2919
		case DP_TRAINING_PATTERN_1:
2916
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2920
			*DP |= DP_LINK_TRAIN_PAT_1_CPT;
2917
			break;
2921
			break;
2918
		case DP_TRAINING_PATTERN_2:
2922
		case DP_TRAINING_PATTERN_2:
2919
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2923
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2920
			break;
2924
			break;
2921
		case DP_TRAINING_PATTERN_3:
2925
		case DP_TRAINING_PATTERN_3:
2922
			DRM_ERROR("DP training pattern 3 not supported\n");
2926
			DRM_ERROR("DP training pattern 3 not supported\n");
2923
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2927
			*DP |= DP_LINK_TRAIN_PAT_2_CPT;
2924
			break;
2928
			break;
2925
		}
2929
		}
2926
 
2930
 
2927
	} else {
2931
	} else {
2928
		*DP &= ~DP_LINK_TRAIN_MASK;
2932
		*DP &= ~DP_LINK_TRAIN_MASK;
2929
 
2933
 
2930
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2934
		switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2931
		case DP_TRAINING_PATTERN_DISABLE:
2935
		case DP_TRAINING_PATTERN_DISABLE:
2932
			*DP |= DP_LINK_TRAIN_OFF;
2936
			*DP |= DP_LINK_TRAIN_OFF;
2933
			break;
2937
			break;
2934
		case DP_TRAINING_PATTERN_1:
2938
		case DP_TRAINING_PATTERN_1:
2935
			*DP |= DP_LINK_TRAIN_PAT_1;
2939
			*DP |= DP_LINK_TRAIN_PAT_1;
2936
			break;
2940
			break;
2937
		case DP_TRAINING_PATTERN_2:
2941
		case DP_TRAINING_PATTERN_2:
2938
			*DP |= DP_LINK_TRAIN_PAT_2;
2942
			*DP |= DP_LINK_TRAIN_PAT_2;
2939
			break;
2943
			break;
2940
		case DP_TRAINING_PATTERN_3:
2944
		case DP_TRAINING_PATTERN_3:
2941
			DRM_ERROR("DP training pattern 3 not supported\n");
2945
			DRM_ERROR("DP training pattern 3 not supported\n");
2942
			*DP |= DP_LINK_TRAIN_PAT_2;
2946
			*DP |= DP_LINK_TRAIN_PAT_2;
2943
			break;
2947
			break;
2944
		}
2948
		}
2945
	}
2949
	}
2946
 
2950
 
2947
	I915_WRITE(intel_dp->output_reg, *DP);
2951
	I915_WRITE(intel_dp->output_reg, *DP);
2948
	POSTING_READ(intel_dp->output_reg);
2952
	POSTING_READ(intel_dp->output_reg);
2949
 
2953
 
2950
	buf[0] = dp_train_pat;
2954
	buf[0] = dp_train_pat;
2951
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2955
	if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
2952
	    DP_TRAINING_PATTERN_DISABLE) {
2956
	    DP_TRAINING_PATTERN_DISABLE) {
2953
		/* don't write DP_TRAINING_LANEx_SET on disable */
2957
		/* don't write DP_TRAINING_LANEx_SET on disable */
2954
		len = 1;
2958
		len = 1;
2955
	} else {
2959
	} else {
2956
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2960
		/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
2957
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2961
		memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
2958
		len = intel_dp->lane_count + 1;
2962
		len = intel_dp->lane_count + 1;
2959
	}
2963
	}
2960
 
2964
 
2961
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2965
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
2962
					buf, len);
2966
					buf, len);
2963
 
2967
 
2964
	return ret == len;
2968
	return ret == len;
2965
}
2969
}
2966
 
2970
 
2967
static bool
2971
static bool
2968
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2972
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2969
			uint8_t dp_train_pat)
2973
			uint8_t dp_train_pat)
2970
{
2974
{
2971
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2975
	memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
2972
	intel_dp_set_signal_levels(intel_dp, DP);
2976
	intel_dp_set_signal_levels(intel_dp, DP);
2973
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2977
	return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2974
}
2978
}
2975
 
2979
 
2976
static bool
2980
static bool
2977
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2981
intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2978
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
2982
			   const uint8_t link_status[DP_LINK_STATUS_SIZE])
2979
{
2983
{
2980
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2984
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2981
	struct drm_device *dev = intel_dig_port->base.base.dev;
2985
	struct drm_device *dev = intel_dig_port->base.base.dev;
2982
	struct drm_i915_private *dev_priv = dev->dev_private;
2986
	struct drm_i915_private *dev_priv = dev->dev_private;
2983
	int ret;
2987
	int ret;
2984
 
2988
 
2985
	intel_get_adjust_train(intel_dp, link_status);
2989
	intel_get_adjust_train(intel_dp, link_status);
2986
	intel_dp_set_signal_levels(intel_dp, DP);
2990
	intel_dp_set_signal_levels(intel_dp, DP);
2987
 
2991
 
2988
	I915_WRITE(intel_dp->output_reg, *DP);
2992
	I915_WRITE(intel_dp->output_reg, *DP);
2989
	POSTING_READ(intel_dp->output_reg);
2993
	POSTING_READ(intel_dp->output_reg);
2990
 
2994
 
2991
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2995
	ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
2992
				intel_dp->train_set, intel_dp->lane_count);
2996
				intel_dp->train_set, intel_dp->lane_count);
2993
 
2997
 
2994
	return ret == intel_dp->lane_count;
2998
	return ret == intel_dp->lane_count;
2995
}
2999
}
2996
 
3000
 
2997
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3001
static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2998
{
3002
{
2999
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3003
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3000
	struct drm_device *dev = intel_dig_port->base.base.dev;
3004
	struct drm_device *dev = intel_dig_port->base.base.dev;
3001
	struct drm_i915_private *dev_priv = dev->dev_private;
3005
	struct drm_i915_private *dev_priv = dev->dev_private;
3002
	enum port port = intel_dig_port->port;
3006
	enum port port = intel_dig_port->port;
3003
	uint32_t val;
3007
	uint32_t val;
3004
 
3008
 
3005
	if (!HAS_DDI(dev))
3009
	if (!HAS_DDI(dev))
3006
		return;
3010
		return;
3007
 
3011
 
3008
	val = I915_READ(DP_TP_CTL(port));
3012
	val = I915_READ(DP_TP_CTL(port));
3009
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3013
	val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3010
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3014
	val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3011
	I915_WRITE(DP_TP_CTL(port), val);
3015
	I915_WRITE(DP_TP_CTL(port), val);
3012
 
3016
 
3013
	/*
3017
	/*
3014
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3018
	 * On PORT_A we can have only eDP in SST mode. There the only reason
3015
	 * we need to set idle transmission mode is to work around a HW issue
3019
	 * we need to set idle transmission mode is to work around a HW issue
3016
	 * where we enable the pipe while not in idle link-training mode.
3020
	 * where we enable the pipe while not in idle link-training mode.
3017
	 * In this case there is requirement to wait for a minimum number of
3021
	 * In this case there is requirement to wait for a minimum number of
3018
	 * idle patterns to be sent.
3022
	 * idle patterns to be sent.
3019
	 */
3023
	 */
3020
	if (port == PORT_A)
3024
	if (port == PORT_A)
3021
		return;
3025
		return;
3022
 
3026
 
3023
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3027
	if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3024
		     1))
3028
		     1))
3025
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3029
		DRM_ERROR("Timed out waiting for DP idle patterns\n");
3026
}
3030
}
3027
 
3031
 
3028
/* Enable corresponding port and start training pattern 1 */
3032
/* Enable corresponding port and start training pattern 1 */
3029
void
3033
void
3030
intel_dp_start_link_train(struct intel_dp *intel_dp)
3034
intel_dp_start_link_train(struct intel_dp *intel_dp)
3031
{
3035
{
3032
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3036
	struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3033
	struct drm_device *dev = encoder->dev;
3037
	struct drm_device *dev = encoder->dev;
3034
	int i;
3038
	int i;
3035
	uint8_t voltage;
3039
	uint8_t voltage;
3036
	int voltage_tries, loop_tries;
3040
	int voltage_tries, loop_tries;
3037
	uint32_t DP = intel_dp->DP;
3041
	uint32_t DP = intel_dp->DP;
3038
	uint8_t link_config[2];
3042
	uint8_t link_config[2];
3039
 
3043
 
3040
	if (HAS_DDI(dev))
3044
	if (HAS_DDI(dev))
3041
		intel_ddi_prepare_link_retrain(encoder);
3045
		intel_ddi_prepare_link_retrain(encoder);
3042
 
3046
 
3043
	/* Write the link configuration data */
3047
	/* Write the link configuration data */
3044
	link_config[0] = intel_dp->link_bw;
3048
	link_config[0] = intel_dp->link_bw;
3045
	link_config[1] = intel_dp->lane_count;
3049
	link_config[1] = intel_dp->lane_count;
3046
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3050
	if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3047
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3051
		link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3048
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3052
	drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3049
 
3053
 
3050
	link_config[0] = 0;
3054
	link_config[0] = 0;
3051
	link_config[1] = DP_SET_ANSI_8B10B;
3055
	link_config[1] = DP_SET_ANSI_8B10B;
3052
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3056
	drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3053
 
3057
 
3054
	DP |= DP_PORT_EN;
3058
	DP |= DP_PORT_EN;
3055
 
3059
 
3056
	/* clock recovery */
3060
	/* clock recovery */
3057
	if (!intel_dp_reset_link_train(intel_dp, &DP,
3061
	if (!intel_dp_reset_link_train(intel_dp, &DP,
3058
				       DP_TRAINING_PATTERN_1 |
3062
				       DP_TRAINING_PATTERN_1 |
3059
				       DP_LINK_SCRAMBLING_DISABLE)) {
3063
				       DP_LINK_SCRAMBLING_DISABLE)) {
3060
		DRM_ERROR("failed to enable link training\n");
3064
		DRM_ERROR("failed to enable link training\n");
3061
		return;
3065
		return;
3062
	}
3066
	}
3063
 
3067
 
3064
	voltage = 0xff;
3068
	voltage = 0xff;
3065
	voltage_tries = 0;
3069
	voltage_tries = 0;
3066
	loop_tries = 0;
3070
	loop_tries = 0;
3067
	for (;;) {
3071
	for (;;) {
3068
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
3072
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
3069
 
3073
 
3070
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3074
		drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3071
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3075
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3072
			DRM_ERROR("failed to get link status\n");
3076
			DRM_ERROR("failed to get link status\n");
3073
			break;
3077
			break;
3074
		}
3078
		}
3075
 
3079
 
3076
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3080
		if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3077
			DRM_DEBUG_KMS("clock recovery OK\n");
3081
			DRM_DEBUG_KMS("clock recovery OK\n");
3078
			break;
3082
			break;
3079
		}
3083
		}
3080
 
3084
 
3081
		/* Check to see if we've tried the max voltage */
3085
		/* Check to see if we've tried the max voltage */
3082
		for (i = 0; i < intel_dp->lane_count; i++)
3086
		for (i = 0; i < intel_dp->lane_count; i++)
3083
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3087
			if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3084
				break;
3088
				break;
3085
		if (i == intel_dp->lane_count) {
3089
		if (i == intel_dp->lane_count) {
3086
			++loop_tries;
3090
			++loop_tries;
3087
			if (loop_tries == 5) {
3091
			if (loop_tries == 5) {
3088
				DRM_ERROR("too many full retries, give up\n");
3092
				DRM_ERROR("too many full retries, give up\n");
3089
			break;
3093
			break;
3090
			}
3094
			}
3091
			intel_dp_reset_link_train(intel_dp, &DP,
3095
			intel_dp_reset_link_train(intel_dp, &DP,
3092
						  DP_TRAINING_PATTERN_1 |
3096
						  DP_TRAINING_PATTERN_1 |
3093
						  DP_LINK_SCRAMBLING_DISABLE);
3097
						  DP_LINK_SCRAMBLING_DISABLE);
3094
			voltage_tries = 0;
3098
			voltage_tries = 0;
3095
			continue;
3099
			continue;
3096
		}
3100
		}
3097
 
3101
 
3098
		/* Check to see if we've tried the same voltage 5 times */
3102
		/* Check to see if we've tried the same voltage 5 times */
3099
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3103
		if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3100
			++voltage_tries;
3104
			++voltage_tries;
3101
			if (voltage_tries == 5) {
3105
			if (voltage_tries == 5) {
3102
				DRM_ERROR("too many voltage retries, give up\n");
3106
				DRM_ERROR("too many voltage retries, give up\n");
3103
				break;
3107
				break;
3104
			}
3108
			}
3105
		} else
3109
		} else
3106
			voltage_tries = 0;
3110
			voltage_tries = 0;
3107
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3111
		voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3108
 
3112
 
3109
		/* Update training set as requested by target */
3113
		/* Update training set as requested by target */
3110
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3114
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3111
			DRM_ERROR("failed to update link training\n");
3115
			DRM_ERROR("failed to update link training\n");
3112
			break;
3116
			break;
3113
		}
3117
		}
3114
	}
3118
	}
3115
 
3119
 
3116
	intel_dp->DP = DP;
3120
	intel_dp->DP = DP;
3117
}
3121
}
3118
 
3122
 
3119
void
3123
void
3120
intel_dp_complete_link_train(struct intel_dp *intel_dp)
3124
intel_dp_complete_link_train(struct intel_dp *intel_dp)
3121
{
3125
{
3122
	bool channel_eq = false;
3126
	bool channel_eq = false;
3123
	int tries, cr_tries;
3127
	int tries, cr_tries;
3124
	uint32_t DP = intel_dp->DP;
3128
	uint32_t DP = intel_dp->DP;
3125
	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3129
	uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3126
 
3130
 
3127
	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3131
	/* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3128
	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3132
	if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3129
		training_pattern = DP_TRAINING_PATTERN_3;
3133
		training_pattern = DP_TRAINING_PATTERN_3;
3130
 
3134
 
3131
	/* channel equalization */
3135
	/* channel equalization */
3132
	if (!intel_dp_set_link_train(intel_dp, &DP,
3136
	if (!intel_dp_set_link_train(intel_dp, &DP,
3133
				     training_pattern |
3137
				     training_pattern |
3134
				     DP_LINK_SCRAMBLING_DISABLE)) {
3138
				     DP_LINK_SCRAMBLING_DISABLE)) {
3135
		DRM_ERROR("failed to start channel equalization\n");
3139
		DRM_ERROR("failed to start channel equalization\n");
3136
		return;
3140
		return;
3137
	}
3141
	}
3138
 
3142
 
3139
	tries = 0;
3143
	tries = 0;
3140
	cr_tries = 0;
3144
	cr_tries = 0;
3141
	channel_eq = false;
3145
	channel_eq = false;
3142
	for (;;) {
3146
	for (;;) {
3143
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
3147
		uint8_t	    link_status[DP_LINK_STATUS_SIZE];
3144
 
3148
 
3145
		if (cr_tries > 5) {
3149
		if (cr_tries > 5) {
3146
			DRM_ERROR("failed to train DP, aborting\n");
3150
			DRM_ERROR("failed to train DP, aborting\n");
3147
			break;
3151
			break;
3148
		}
3152
		}
3149
 
3153
 
3150
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3154
		drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3151
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3155
		if (!intel_dp_get_link_status(intel_dp, link_status)) {
3152
			DRM_ERROR("failed to get link status\n");
3156
			DRM_ERROR("failed to get link status\n");
3153
			break;
3157
			break;
3154
		}
3158
		}
3155
 
3159
 
3156
		/* Make sure clock is still ok */
3160
		/* Make sure clock is still ok */
3157
		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3161
		if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3158
			intel_dp_start_link_train(intel_dp);
3162
			intel_dp_start_link_train(intel_dp);
3159
			intel_dp_set_link_train(intel_dp, &DP,
3163
			intel_dp_set_link_train(intel_dp, &DP,
3160
						training_pattern |
3164
						training_pattern |
3161
						DP_LINK_SCRAMBLING_DISABLE);
3165
						DP_LINK_SCRAMBLING_DISABLE);
3162
			cr_tries++;
3166
			cr_tries++;
3163
			continue;
3167
			continue;
3164
		}
3168
		}
3165
 
3169
 
3166
		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3170
		if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3167
			channel_eq = true;
3171
			channel_eq = true;
3168
			break;
3172
			break;
3169
		}
3173
		}
3170
 
3174
 
3171
		/* Try 5 times, then try clock recovery if that fails */
3175
		/* Try 5 times, then try clock recovery if that fails */
3172
		if (tries > 5) {
3176
		if (tries > 5) {
3173
			intel_dp_link_down(intel_dp);
3177
			intel_dp_link_down(intel_dp);
3174
			intel_dp_start_link_train(intel_dp);
3178
			intel_dp_start_link_train(intel_dp);
3175
			intel_dp_set_link_train(intel_dp, &DP,
3179
			intel_dp_set_link_train(intel_dp, &DP,
3176
						training_pattern |
3180
						training_pattern |
3177
						DP_LINK_SCRAMBLING_DISABLE);
3181
						DP_LINK_SCRAMBLING_DISABLE);
3178
			tries = 0;
3182
			tries = 0;
3179
			cr_tries++;
3183
			cr_tries++;
3180
			continue;
3184
			continue;
3181
		}
3185
		}
3182
 
3186
 
3183
		/* Update training set as requested by target */
3187
		/* Update training set as requested by target */
3184
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3188
		if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3185
			DRM_ERROR("failed to update link training\n");
3189
			DRM_ERROR("failed to update link training\n");
3186
			break;
3190
			break;
3187
		}
3191
		}
3188
		++tries;
3192
		++tries;
3189
	}
3193
	}
3190
 
3194
 
3191
	intel_dp_set_idle_link_train(intel_dp);
3195
	intel_dp_set_idle_link_train(intel_dp);
3192
 
3196
 
3193
	intel_dp->DP = DP;
3197
	intel_dp->DP = DP;
3194
 
3198
 
3195
	if (channel_eq)
3199
	if (channel_eq)
3196
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3200
		DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3197
 
3201
 
3198
}
3202
}
3199
 
3203
 
3200
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3204
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3201
{
3205
{
3202
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3206
	intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3203
				DP_TRAINING_PATTERN_DISABLE);
3207
				DP_TRAINING_PATTERN_DISABLE);
3204
}
3208
}
3205
 
3209
 
3206
static void
3210
static void
3207
intel_dp_link_down(struct intel_dp *intel_dp)
3211
intel_dp_link_down(struct intel_dp *intel_dp)
3208
{
3212
{
3209
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3213
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3210
	enum port port = intel_dig_port->port;
3214
	enum port port = intel_dig_port->port;
3211
	struct drm_device *dev = intel_dig_port->base.base.dev;
3215
	struct drm_device *dev = intel_dig_port->base.base.dev;
3212
	struct drm_i915_private *dev_priv = dev->dev_private;
3216
	struct drm_i915_private *dev_priv = dev->dev_private;
3213
	struct intel_crtc *intel_crtc =
3217
	struct intel_crtc *intel_crtc =
3214
		to_intel_crtc(intel_dig_port->base.base.crtc);
3218
		to_intel_crtc(intel_dig_port->base.base.crtc);
3215
	uint32_t DP = intel_dp->DP;
3219
	uint32_t DP = intel_dp->DP;
3216
 
3220
 
3217
	if (WARN_ON(HAS_DDI(dev)))
3221
	if (WARN_ON(HAS_DDI(dev)))
3218
		return;
3222
		return;
3219
 
3223
 
3220
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3224
	if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3221
		return;
3225
		return;
3222
 
3226
 
3223
	DRM_DEBUG_KMS("\n");
3227
	DRM_DEBUG_KMS("\n");
3224
 
3228
 
3225
	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3229
	if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
3226
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3230
		DP &= ~DP_LINK_TRAIN_MASK_CPT;
3227
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3231
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3228
	} else {
3232
	} else {
3229
		DP &= ~DP_LINK_TRAIN_MASK;
3233
		DP &= ~DP_LINK_TRAIN_MASK;
3230
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3234
		I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3231
	}
3235
	}
3232
	POSTING_READ(intel_dp->output_reg);
3236
	POSTING_READ(intel_dp->output_reg);
3233
 
3237
 
3234
	if (HAS_PCH_IBX(dev) &&
3238
	if (HAS_PCH_IBX(dev) &&
3235
	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3239
	    I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
3236
		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3240
		struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
3237
 
3241
 
3238
		/* Hardware workaround: leaving our transcoder select
3242
		/* Hardware workaround: leaving our transcoder select
3239
		 * set to transcoder B while it's off will prevent the
3243
		 * set to transcoder B while it's off will prevent the
3240
		 * corresponding HDMI output on transcoder A.
3244
		 * corresponding HDMI output on transcoder A.
3241
		 *
3245
		 *
3242
		 * Combine this with another hardware workaround:
3246
		 * Combine this with another hardware workaround:
3243
		 * transcoder select bit can only be cleared while the
3247
		 * transcoder select bit can only be cleared while the
3244
		 * port is enabled.
3248
		 * port is enabled.
3245
		 */
3249
		 */
3246
		DP &= ~DP_PIPEB_SELECT;
3250
		DP &= ~DP_PIPEB_SELECT;
3247
		I915_WRITE(intel_dp->output_reg, DP);
3251
		I915_WRITE(intel_dp->output_reg, DP);
3248
 
3252
 
3249
		/* Changes to enable or select take place the vblank
3253
		/* Changes to enable or select take place the vblank
3250
		 * after being written.
3254
		 * after being written.
3251
		 */
3255
		 */
3252
		if (WARN_ON(crtc == NULL)) {
3256
		if (WARN_ON(crtc == NULL)) {
3253
			/* We should never try to disable a port without a crtc
3257
			/* We should never try to disable a port without a crtc
3254
			 * attached. For paranoia keep the code around for a
3258
			 * attached. For paranoia keep the code around for a
3255
			 * bit. */
3259
			 * bit. */
3256
			POSTING_READ(intel_dp->output_reg);
3260
			POSTING_READ(intel_dp->output_reg);
3257
			msleep(50);
3261
			msleep(50);
3258
		} else
3262
		} else
3259
			intel_wait_for_vblank(dev, intel_crtc->pipe);
3263
			intel_wait_for_vblank(dev, intel_crtc->pipe);
3260
	}
3264
	}
3261
 
3265
 
3262
	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3266
	DP &= ~DP_AUDIO_OUTPUT_ENABLE;
3263
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3267
	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3264
	POSTING_READ(intel_dp->output_reg);
3268
	POSTING_READ(intel_dp->output_reg);
3265
	msleep(intel_dp->panel_power_down_delay);
3269
	msleep(intel_dp->panel_power_down_delay);
3266
}
3270
}
3267
 
3271
 
3268
static bool
3272
static bool
3269
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3273
intel_dp_get_dpcd(struct intel_dp *intel_dp)
3270
{
3274
{
3271
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3275
	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3272
	struct drm_device *dev = dig_port->base.base.dev;
3276
	struct drm_device *dev = dig_port->base.base.dev;
3273
	struct drm_i915_private *dev_priv = dev->dev_private;
3277
	struct drm_i915_private *dev_priv = dev->dev_private;
3274
 
3278
 
3275
	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3279
	char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3276
 
3280
 
3277
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3281
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3278
				    sizeof(intel_dp->dpcd)) < 0)
3282
				    sizeof(intel_dp->dpcd)) < 0)
3279
		return false; /* aux transfer failed */
3283
		return false; /* aux transfer failed */
3280
 
3284
 
3281
	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3285
	hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3282
			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3286
			   32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3283
	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3287
	DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3284
 
3288
 
3285
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3289
	if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3286
		return false; /* DPCD not present */
3290
		return false; /* DPCD not present */
3287
 
3291
 
3288
	/* Check if the panel supports PSR */
3292
	/* Check if the panel supports PSR */
3289
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3293
	memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3290
	if (is_edp(intel_dp)) {
3294
	if (is_edp(intel_dp)) {
3291
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3295
		intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3292
				       intel_dp->psr_dpcd,
3296
				       intel_dp->psr_dpcd,
3293
				       sizeof(intel_dp->psr_dpcd));
3297
				       sizeof(intel_dp->psr_dpcd));
3294
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3298
		if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3295
			dev_priv->psr.sink_support = true;
3299
			dev_priv->psr.sink_support = true;
3296
		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3300
		DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3297
		}
3301
		}
3298
	}
3302
	}
3299
 
3303
 
3300
	/* Training Pattern 3 support */
3304
	/* Training Pattern 3 support */
3301
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3305
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3302
	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3306
	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3303
		intel_dp->use_tps3 = true;
3307
		intel_dp->use_tps3 = true;
3304
		DRM_DEBUG_KMS("Displayport TPS3 supported");
3308
		DRM_DEBUG_KMS("Displayport TPS3 supported");
3305
	} else
3309
	} else
3306
		intel_dp->use_tps3 = false;
3310
		intel_dp->use_tps3 = false;
3307
 
3311
 
3308
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3312
	if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3309
	      DP_DWN_STRM_PORT_PRESENT))
3313
	      DP_DWN_STRM_PORT_PRESENT))
3310
		return true; /* native DP sink */
3314
		return true; /* native DP sink */
3311
 
3315
 
3312
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3316
	if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3313
		return true; /* no per-port downstream info */
3317
		return true; /* no per-port downstream info */
3314
 
3318
 
3315
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3319
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3316
					   intel_dp->downstream_ports,
3320
					   intel_dp->downstream_ports,
3317
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3321
				    DP_MAX_DOWNSTREAM_PORTS) < 0)
3318
		return false; /* downstream port status fetch failed */
3322
		return false; /* downstream port status fetch failed */
3319
 
3323
 
3320
		return true;
3324
		return true;
3321
}
3325
}
3322
 
3326
 
3323
static void
3327
static void
3324
intel_dp_probe_oui(struct intel_dp *intel_dp)
3328
intel_dp_probe_oui(struct intel_dp *intel_dp)
3325
{
3329
{
3326
	u8 buf[3];
3330
	u8 buf[3];
3327
 
3331
 
3328
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3332
	if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3329
		return;
3333
		return;
3330
 
3334
 
3331
	intel_edp_panel_vdd_on(intel_dp);
3335
	intel_edp_panel_vdd_on(intel_dp);
3332
 
3336
 
3333
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3337
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3334
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3338
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3335
			      buf[0], buf[1], buf[2]);
3339
			      buf[0], buf[1], buf[2]);
3336
 
3340
 
3337
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3341
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3338
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3342
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3339
			      buf[0], buf[1], buf[2]);
3343
			      buf[0], buf[1], buf[2]);
3340
 
3344
 
3341
	edp_panel_vdd_off(intel_dp, false);
3345
	edp_panel_vdd_off(intel_dp, false);
3342
}
3346
}
3343
 
3347
 
3344
static bool
3348
static bool
3345
intel_dp_probe_mst(struct intel_dp *intel_dp)
3349
intel_dp_probe_mst(struct intel_dp *intel_dp)
3346
{
3350
{
3347
	u8 buf[1];
3351
	u8 buf[1];
3348
 
3352
 
3349
	if (!intel_dp->can_mst)
3353
	if (!intel_dp->can_mst)
3350
		return false;
3354
		return false;
3351
 
3355
 
3352
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3356
	if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3353
		return false;
3357
		return false;
3354
 
3358
 
3355
	_edp_panel_vdd_on(intel_dp);
3359
	_edp_panel_vdd_on(intel_dp);
3356
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3360
	if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3357
		if (buf[0] & DP_MST_CAP) {
3361
		if (buf[0] & DP_MST_CAP) {
3358
			DRM_DEBUG_KMS("Sink is MST capable\n");
3362
			DRM_DEBUG_KMS("Sink is MST capable\n");
3359
			intel_dp->is_mst = true;
3363
			intel_dp->is_mst = true;
3360
		} else {
3364
		} else {
3361
			DRM_DEBUG_KMS("Sink is not MST capable\n");
3365
			DRM_DEBUG_KMS("Sink is not MST capable\n");
3362
			intel_dp->is_mst = false;
3366
			intel_dp->is_mst = false;
3363
		}
3367
		}
3364
	}
3368
	}
3365
	edp_panel_vdd_off(intel_dp, false);
3369
	edp_panel_vdd_off(intel_dp, false);
3366
 
3370
 
3367
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3371
	drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3368
	return intel_dp->is_mst;
3372
	return intel_dp->is_mst;
3369
}
3373
}
3370
 
3374
 
3371
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3375
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3372
{
3376
{
3373
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3377
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3374
	struct drm_device *dev = intel_dig_port->base.base.dev;
3378
	struct drm_device *dev = intel_dig_port->base.base.dev;
3375
	struct intel_crtc *intel_crtc =
3379
	struct intel_crtc *intel_crtc =
3376
		to_intel_crtc(intel_dig_port->base.base.crtc);
3380
		to_intel_crtc(intel_dig_port->base.base.crtc);
3377
	u8 buf[1];
3381
	u8 buf[1];
3378
 
3382
 
3379
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3383
	if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3380
		return -EAGAIN;
3384
		return -EAGAIN;
3381
 
3385
 
3382
	if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3386
	if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3383
		return -ENOTTY;
3387
		return -ENOTTY;
3384
 
3388
 
3385
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3389
	if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3386
			       DP_TEST_SINK_START) < 0)
3390
			       DP_TEST_SINK_START) < 0)
3387
		return -EAGAIN;
3391
		return -EAGAIN;
3388
 
3392
 
3389
	/* Wait 2 vblanks to be sure we will have the correct CRC value */
3393
	/* Wait 2 vblanks to be sure we will have the correct CRC value */
3390
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3394
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3391
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3395
	intel_wait_for_vblank(dev, intel_crtc->pipe);
3392
 
3396
 
3393
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3397
	if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3394
		return -EAGAIN;
3398
		return -EAGAIN;
3395
 
3399
 
3396
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3400
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3397
	return 0;
3401
	return 0;
3398
}
3402
}
3399
 
3403
 
3400
static bool
3404
static bool
3401
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3405
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3402
{
3406
{
3403
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3407
	return intel_dp_dpcd_read_wake(&intel_dp->aux,
3404
					     DP_DEVICE_SERVICE_IRQ_VECTOR,
3408
					     DP_DEVICE_SERVICE_IRQ_VECTOR,
3405
				       sink_irq_vector, 1) == 1;
3409
				       sink_irq_vector, 1) == 1;
3406
}
3410
}
3407
 
3411
 
3408
static bool
3412
static bool
3409
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3413
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3410
{
3414
{
3411
	int ret;
3415
	int ret;
3412
 
3416
 
3413
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3417
	ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3414
					     DP_SINK_COUNT_ESI,
3418
					     DP_SINK_COUNT_ESI,
3415
					     sink_irq_vector, 14);
3419
					     sink_irq_vector, 14);
3416
	if (ret != 14)
3420
	if (ret != 14)
3417
		return false;
3421
		return false;
3418
 
3422
 
3419
	return true;
3423
	return true;
3420
}
3424
}
3421
 
3425
 
3422
static void
3426
static void
3423
intel_dp_handle_test_request(struct intel_dp *intel_dp)
3427
intel_dp_handle_test_request(struct intel_dp *intel_dp)
3424
{
3428
{
3425
	/* NAK by default */
3429
	/* NAK by default */
3426
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3430
	drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
3427
}
3431
}
3428
 
3432
 
3429
static int
3433
static int
3430
intel_dp_check_mst_status(struct intel_dp *intel_dp)
3434
intel_dp_check_mst_status(struct intel_dp *intel_dp)
3431
{
3435
{
3432
	bool bret;
3436
	bool bret;
3433
 
3437
 
3434
	if (intel_dp->is_mst) {
3438
	if (intel_dp->is_mst) {
3435
		u8 esi[16] = { 0 };
3439
		u8 esi[16] = { 0 };
3436
		int ret = 0;
3440
		int ret = 0;
3437
		int retry;
3441
		int retry;
3438
		bool handled;
3442
		bool handled;
3439
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3443
		bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3440
go_again:
3444
go_again:
3441
		if (bret == true) {
3445
		if (bret == true) {
3442
 
3446
 
3443
			/* check link status - esi[10] = 0x200c */
3447
			/* check link status - esi[10] = 0x200c */
3444
			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3448
			if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3445
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3449
				DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3446
				intel_dp_start_link_train(intel_dp);
3450
				intel_dp_start_link_train(intel_dp);
3447
				intel_dp_complete_link_train(intel_dp);
3451
				intel_dp_complete_link_train(intel_dp);
3448
				intel_dp_stop_link_train(intel_dp);
3452
				intel_dp_stop_link_train(intel_dp);
3449
			}
3453
			}
3450
 
3454
 
3451
			DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3455
			DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3452
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3456
			ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3453
 
3457
 
3454
			if (handled) {
3458
			if (handled) {
3455
				for (retry = 0; retry < 3; retry++) {
3459
				for (retry = 0; retry < 3; retry++) {
3456
					int wret;
3460
					int wret;
3457
					wret = drm_dp_dpcd_write(&intel_dp->aux,
3461
					wret = drm_dp_dpcd_write(&intel_dp->aux,
3458
								 DP_SINK_COUNT_ESI+1,
3462
								 DP_SINK_COUNT_ESI+1,
3459
								 &esi[1], 3);
3463
								 &esi[1], 3);
3460
					if (wret == 3) {
3464
					if (wret == 3) {
3461
						break;
3465
						break;
3462
					}
3466
					}
3463
				}
3467
				}
3464
 
3468
 
3465
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3469
				bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3466
				if (bret == true) {
3470
				if (bret == true) {
3467
					DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3471
					DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3468
					goto go_again;
3472
					goto go_again;
3469
				}
3473
				}
3470
			} else
3474
			} else
3471
				ret = 0;
3475
				ret = 0;
3472
 
3476
 
3473
			return ret;
3477
			return ret;
3474
		} else {
3478
		} else {
3475
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3479
			struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3476
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3480
			DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3477
			intel_dp->is_mst = false;
3481
			intel_dp->is_mst = false;
3478
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3482
			drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3479
			/* send a hotplug event */
3483
			/* send a hotplug event */
3480
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3484
			drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3481
		}
3485
		}
3482
	}
3486
	}
3483
	return -EINVAL;
3487
	return -EINVAL;
3484
}
3488
}
3485
 
3489
 
3486
/*
3490
/*
3487
 * According to DP spec
3491
 * According to DP spec
3488
 * 5.1.2:
3492
 * 5.1.2:
3489
 *  1. Read DPCD
3493
 *  1. Read DPCD
3490
 *  2. Configure link according to Receiver Capabilities
3494
 *  2. Configure link according to Receiver Capabilities
3491
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3495
 *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
3492
 *  4. Check link status on receipt of hot-plug interrupt
3496
 *  4. Check link status on receipt of hot-plug interrupt
3493
 */
3497
 */
3494
void
3498
void
3495
intel_dp_check_link_status(struct intel_dp *intel_dp)
3499
intel_dp_check_link_status(struct intel_dp *intel_dp)
3496
{
3500
{
3497
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3501
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3498
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3502
	struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3499
	u8 sink_irq_vector;
3503
	u8 sink_irq_vector;
3500
	u8 link_status[DP_LINK_STATUS_SIZE];
3504
	u8 link_status[DP_LINK_STATUS_SIZE];
3501
 
3505
 
3502
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3506
	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3503
 
3507
 
3504
	if (!intel_encoder->connectors_active)
3508
	if (!intel_encoder->connectors_active)
3505
		return;
3509
		return;
3506
 
3510
 
3507
	if (WARN_ON(!intel_encoder->base.crtc))
3511
	if (WARN_ON(!intel_encoder->base.crtc))
3508
		return;
3512
		return;
3509
 
3513
 
3510
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3514
	if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3511
		return;
3515
		return;
3512
 
3516
 
3513
	/* Try to read receiver status if the link appears to be up */
3517
	/* Try to read receiver status if the link appears to be up */
3514
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
3518
	if (!intel_dp_get_link_status(intel_dp, link_status)) {
3515
		return;
3519
		return;
3516
	}
3520
	}
3517
 
3521
 
3518
	/* Now read the DPCD to see if it's actually running */
3522
	/* Now read the DPCD to see if it's actually running */
3519
	if (!intel_dp_get_dpcd(intel_dp)) {
3523
	if (!intel_dp_get_dpcd(intel_dp)) {
3520
		return;
3524
		return;
3521
	}
3525
	}
3522
 
3526
 
3523
	/* Try to read the source of the interrupt */
3527
	/* Try to read the source of the interrupt */
3524
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3528
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3525
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3529
	    intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3526
		/* Clear interrupt source */
3530
		/* Clear interrupt source */
3527
		drm_dp_dpcd_writeb(&intel_dp->aux,
3531
		drm_dp_dpcd_writeb(&intel_dp->aux,
3528
					    DP_DEVICE_SERVICE_IRQ_VECTOR,
3532
					    DP_DEVICE_SERVICE_IRQ_VECTOR,
3529
					    sink_irq_vector);
3533
					    sink_irq_vector);
3530
 
3534
 
3531
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3535
		if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
3532
			intel_dp_handle_test_request(intel_dp);
3536
			intel_dp_handle_test_request(intel_dp);
3533
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3537
		if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
3534
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3538
			DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
3535
	}
3539
	}
3536
 
3540
 
3537
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3541
	if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3538
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3542
		DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
3539
			      intel_encoder->base.name);
3543
			      intel_encoder->base.name);
3540
		intel_dp_start_link_train(intel_dp);
3544
		intel_dp_start_link_train(intel_dp);
3541
		intel_dp_complete_link_train(intel_dp);
3545
		intel_dp_complete_link_train(intel_dp);
3542
		intel_dp_stop_link_train(intel_dp);
3546
		intel_dp_stop_link_train(intel_dp);
3543
	}
3547
	}
3544
}
3548
}
3545
 
3549
 
3546
/* XXX this is probably wrong for multiple downstream ports */
3550
/* XXX this is probably wrong for multiple downstream ports */
3547
static enum drm_connector_status
3551
static enum drm_connector_status
3548
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3552
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3549
{
3553
{
3550
	uint8_t *dpcd = intel_dp->dpcd;
3554
	uint8_t *dpcd = intel_dp->dpcd;
3551
	uint8_t type;
3555
	uint8_t type;
3552
 
3556
 
3553
	if (!intel_dp_get_dpcd(intel_dp))
3557
	if (!intel_dp_get_dpcd(intel_dp))
3554
		return connector_status_disconnected;
3558
		return connector_status_disconnected;
3555
 
3559
 
3556
	/* if there's no downstream port, we're done */
3560
	/* if there's no downstream port, we're done */
3557
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3561
	if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
3558
		return connector_status_connected;
3562
		return connector_status_connected;
3559
 
3563
 
3560
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
3564
	/* If we're HPD-aware, SINK_COUNT changes dynamically */
3561
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3565
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3562
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3566
	    intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
3563
		uint8_t reg;
3567
		uint8_t reg;
3564
 
3568
 
3565
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
3569
		if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
3566
					    ®, 1) < 0)
3570
					    ®, 1) < 0)
3567
			return connector_status_unknown;
3571
			return connector_status_unknown;
3568
 
3572
 
3569
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3573
		return DP_GET_SINK_COUNT(reg) ? connector_status_connected
3570
					      : connector_status_disconnected;
3574
					      : connector_status_disconnected;
3571
	}
3575
	}
3572
 
3576
 
3573
	/* If no HPD, poke DDC gently */
3577
	/* If no HPD, poke DDC gently */
3574
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3578
	if (drm_probe_ddc(&intel_dp->aux.ddc))
3575
		return connector_status_connected;
3579
		return connector_status_connected;
3576
 
3580
 
3577
	/* Well we tried, say unknown for unreliable port types */
3581
	/* Well we tried, say unknown for unreliable port types */
3578
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3582
	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
3579
	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3583
	type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
3580
		if (type == DP_DS_PORT_TYPE_VGA ||
3584
		if (type == DP_DS_PORT_TYPE_VGA ||
3581
		    type == DP_DS_PORT_TYPE_NON_EDID)
3585
		    type == DP_DS_PORT_TYPE_NON_EDID)
3582
		return connector_status_unknown;
3586
		return connector_status_unknown;
3583
	} else {
3587
	} else {
3584
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3588
		type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3585
			DP_DWN_STRM_PORT_TYPE_MASK;
3589
			DP_DWN_STRM_PORT_TYPE_MASK;
3586
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3590
		if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
3587
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
3591
		    type == DP_DWN_STRM_PORT_TYPE_OTHER)
3588
			return connector_status_unknown;
3592
			return connector_status_unknown;
3589
	}
3593
	}
3590
 
3594
 
3591
	/* Anything else is out of spec, warn and ignore */
3595
	/* Anything else is out of spec, warn and ignore */
3592
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3596
	DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
3593
	return connector_status_disconnected;
3597
	return connector_status_disconnected;
3594
}
3598
}
3595
 
3599
 
3596
static enum drm_connector_status
3600
static enum drm_connector_status
3597
ironlake_dp_detect(struct intel_dp *intel_dp)
3601
ironlake_dp_detect(struct intel_dp *intel_dp)
3598
{
3602
{
3599
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3603
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3600
	struct drm_i915_private *dev_priv = dev->dev_private;
3604
	struct drm_i915_private *dev_priv = dev->dev_private;
3601
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3605
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3602
	enum drm_connector_status status;
3606
	enum drm_connector_status status;
3603
 
3607
 
3604
	/* Can't disconnect eDP, but you can close the lid... */
3608
	/* Can't disconnect eDP, but you can close the lid... */
3605
	if (is_edp(intel_dp)) {
3609
	if (is_edp(intel_dp)) {
3606
		status = intel_panel_detect(dev);
3610
		status = intel_panel_detect(dev);
3607
		if (status == connector_status_unknown)
3611
		if (status == connector_status_unknown)
3608
			status = connector_status_connected;
3612
			status = connector_status_connected;
3609
		return status;
3613
		return status;
3610
	}
3614
	}
3611
 
3615
 
3612
	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3616
	if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3613
		return connector_status_disconnected;
3617
		return connector_status_disconnected;
3614
 
3618
 
3615
	return intel_dp_detect_dpcd(intel_dp);
3619
	return intel_dp_detect_dpcd(intel_dp);
3616
}
3620
}
3617
 
3621
 
3618
static int g4x_digital_port_connected(struct drm_device *dev,
3622
static int g4x_digital_port_connected(struct drm_device *dev,
3619
				       struct intel_digital_port *intel_dig_port)
3623
				       struct intel_digital_port *intel_dig_port)
3620
{
3624
{
3621
	struct drm_i915_private *dev_priv = dev->dev_private;
3625
	struct drm_i915_private *dev_priv = dev->dev_private;
3622
	uint32_t bit;
3626
	uint32_t bit;
3623
 
3627
 
3624
	if (IS_VALLEYVIEW(dev)) {
3628
	if (IS_VALLEYVIEW(dev)) {
3625
		switch (intel_dig_port->port) {
3629
		switch (intel_dig_port->port) {
3626
		case PORT_B:
3630
		case PORT_B:
3627
			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3631
			bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
3628
			break;
3632
			break;
3629
		case PORT_C:
3633
		case PORT_C:
3630
			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3634
			bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
3631
			break;
3635
			break;
3632
		case PORT_D:
3636
		case PORT_D:
3633
			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3637
			bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
3634
			break;
3638
			break;
3635
		default:
3639
		default:
3636
			return -EINVAL;
3640
			return -EINVAL;
3637
		}
3641
		}
3638
	} else {
3642
	} else {
3639
	switch (intel_dig_port->port) {
3643
	switch (intel_dig_port->port) {
3640
	case PORT_B:
3644
	case PORT_B:
3641
			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3645
			bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
3642
		break;
3646
		break;
3643
	case PORT_C:
3647
	case PORT_C:
3644
			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3648
			bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
3645
		break;
3649
		break;
3646
	case PORT_D:
3650
	case PORT_D:
3647
			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3651
			bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
3648
		break;
3652
		break;
3649
	default:
3653
	default:
3650
			return -EINVAL;
3654
			return -EINVAL;
3651
	}
3655
	}
3652
	}
3656
	}
3653
 
3657
 
3654
	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3658
	if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
3655
		return 0;
3659
		return 0;
3656
	return 1;
3660
	return 1;
3657
}
3661
}
3658
 
3662
 
3659
static enum drm_connector_status
3663
static enum drm_connector_status
3660
g4x_dp_detect(struct intel_dp *intel_dp)
3664
g4x_dp_detect(struct intel_dp *intel_dp)
3661
{
3665
{
3662
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3666
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3663
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3667
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3664
	int ret;
3668
	int ret;
3665
 
3669
 
3666
	/* Can't disconnect eDP, but you can close the lid... */
3670
	/* Can't disconnect eDP, but you can close the lid... */
3667
	if (is_edp(intel_dp)) {
3671
	if (is_edp(intel_dp)) {
3668
		enum drm_connector_status status;
3672
		enum drm_connector_status status;
3669
 
3673
 
3670
		status = intel_panel_detect(dev);
3674
		status = intel_panel_detect(dev);
3671
		if (status == connector_status_unknown)
3675
		if (status == connector_status_unknown)
3672
			status = connector_status_connected;
3676
			status = connector_status_connected;
3673
		return status;
3677
		return status;
3674
	}
3678
	}
3675
 
3679
 
3676
	ret = g4x_digital_port_connected(dev, intel_dig_port);
3680
	ret = g4x_digital_port_connected(dev, intel_dig_port);
3677
	if (ret == -EINVAL)
3681
	if (ret == -EINVAL)
3678
		return connector_status_unknown;
3682
		return connector_status_unknown;
3679
	else if (ret == 0)
3683
	else if (ret == 0)
3680
		return connector_status_disconnected;
3684
		return connector_status_disconnected;
3681
 
3685
 
3682
	return intel_dp_detect_dpcd(intel_dp);
3686
	return intel_dp_detect_dpcd(intel_dp);
3683
}
3687
}
3684
 
3688
 
3685
static struct edid *
3689
static struct edid *
3686
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3690
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3687
{
3691
{
3688
	struct intel_connector *intel_connector = to_intel_connector(connector);
3692
	struct intel_connector *intel_connector = to_intel_connector(connector);
3689
 
3693
 
3690
	/* use cached edid if we have one */
3694
	/* use cached edid if we have one */
3691
	if (intel_connector->edid) {
3695
	if (intel_connector->edid) {
3692
		/* invalid edid */
3696
		/* invalid edid */
3693
		if (IS_ERR(intel_connector->edid))
3697
		if (IS_ERR(intel_connector->edid))
3694
			return NULL;
3698
			return NULL;
3695
 
3699
 
3696
		return drm_edid_duplicate(intel_connector->edid);
3700
		return drm_edid_duplicate(intel_connector->edid);
3697
	}
3701
	}
3698
 
3702
 
3699
	return drm_get_edid(connector, adapter);
3703
	return drm_get_edid(connector, adapter);
3700
}
3704
}
3701
 
3705
 
3702
static int
3706
static int
3703
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3707
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
3704
{
3708
{
3705
	struct intel_connector *intel_connector = to_intel_connector(connector);
3709
	struct intel_connector *intel_connector = to_intel_connector(connector);
3706
 
3710
 
3707
	/* use cached edid if we have one */
3711
	/* use cached edid if we have one */
3708
	if (intel_connector->edid) {
3712
	if (intel_connector->edid) {
3709
		/* invalid edid */
3713
		/* invalid edid */
3710
		if (IS_ERR(intel_connector->edid))
3714
		if (IS_ERR(intel_connector->edid))
3711
			return 0;
3715
			return 0;
3712
 
3716
 
3713
		return intel_connector_update_modes(connector,
3717
		return intel_connector_update_modes(connector,
3714
						    intel_connector->edid);
3718
						    intel_connector->edid);
3715
	}
3719
	}
3716
 
3720
 
3717
	return intel_ddc_get_modes(connector, adapter);
3721
	return intel_ddc_get_modes(connector, adapter);
3718
}
3722
}
3719
 
3723
 
3720
static enum drm_connector_status
3724
static enum drm_connector_status
3721
intel_dp_detect(struct drm_connector *connector, bool force)
3725
intel_dp_detect(struct drm_connector *connector, bool force)
3722
{
3726
{
3723
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3727
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3724
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3728
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3725
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3729
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3726
	struct drm_device *dev = connector->dev;
3730
	struct drm_device *dev = connector->dev;
3727
	struct drm_i915_private *dev_priv = dev->dev_private;
3731
	struct drm_i915_private *dev_priv = dev->dev_private;
3728
	enum drm_connector_status status;
3732
	enum drm_connector_status status;
3729
	enum intel_display_power_domain power_domain;
3733
	enum intel_display_power_domain power_domain;
3730
	struct edid *edid = NULL;
3734
	struct edid *edid = NULL;
3731
	bool ret;
3735
	bool ret;
3732
 
3736
 
3733
	power_domain = intel_display_port_power_domain(intel_encoder);
3737
	power_domain = intel_display_port_power_domain(intel_encoder);
3734
	intel_display_power_get(dev_priv, power_domain);
3738
	intel_display_power_get(dev_priv, power_domain);
3735
 
3739
 
3736
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3740
	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3737
		      connector->base.id, connector->name);
3741
		      connector->base.id, connector->name);
3738
 
3742
 
3739
	if (intel_dp->is_mst) {
3743
	if (intel_dp->is_mst) {
3740
		/* MST devices are disconnected from a monitor POV */
3744
		/* MST devices are disconnected from a monitor POV */
3741
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
3745
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
3742
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3746
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3743
		status = connector_status_disconnected;
3747
		status = connector_status_disconnected;
3744
		goto out;
3748
		goto out;
3745
	}
3749
	}
3746
 
3750
 
3747
	intel_dp->has_audio = false;
3751
	intel_dp->has_audio = false;
3748
 
3752
 
3749
	if (HAS_PCH_SPLIT(dev))
3753
	if (HAS_PCH_SPLIT(dev))
3750
		status = ironlake_dp_detect(intel_dp);
3754
		status = ironlake_dp_detect(intel_dp);
3751
	else
3755
	else
3752
		status = g4x_dp_detect(intel_dp);
3756
		status = g4x_dp_detect(intel_dp);
3753
 
3757
 
3754
	if (status != connector_status_connected)
3758
	if (status != connector_status_connected)
3755
		goto out;
3759
		goto out;
3756
 
3760
 
3757
	intel_dp_probe_oui(intel_dp);
3761
	intel_dp_probe_oui(intel_dp);
3758
 
3762
 
3759
	ret = intel_dp_probe_mst(intel_dp);
3763
	ret = intel_dp_probe_mst(intel_dp);
3760
	if (ret) {
3764
	if (ret) {
3761
		/* if we are in MST mode then this connector
3765
		/* if we are in MST mode then this connector
3762
		   won't appear connected or have anything with EDID on it */
3766
		   won't appear connected or have anything with EDID on it */
3763
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
3767
		if (intel_encoder->type != INTEL_OUTPUT_EDP)
3764
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3768
			intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3765
		status = connector_status_disconnected;
3769
		status = connector_status_disconnected;
3766
		goto out;
3770
		goto out;
3767
	}
3771
	}
3768
 
3772
 
3769
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3773
	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
3770
		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3774
		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3771
	} else {
3775
	} else {
3772
		edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3776
		edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3773
		if (edid) {
3777
		if (edid) {
3774
			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3778
			intel_dp->has_audio = drm_detect_monitor_audio(edid);
3775
			kfree(edid);
3779
			kfree(edid);
3776
		}
3780
		}
3777
	}
3781
	}
3778
 
3782
 
3779
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3783
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
3780
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3784
		intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3781
	status = connector_status_connected;
3785
	status = connector_status_connected;
3782
 
3786
 
3783
out:
3787
out:
3784
	intel_display_power_put(dev_priv, power_domain);
3788
	intel_display_power_put(dev_priv, power_domain);
3785
	return status;
3789
	return status;
3786
}
3790
}
3787
 
3791
 
3788
static int intel_dp_get_modes(struct drm_connector *connector)
3792
static int intel_dp_get_modes(struct drm_connector *connector)
3789
{
3793
{
3790
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3794
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3791
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3795
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3792
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3796
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3793
	struct intel_connector *intel_connector = to_intel_connector(connector);
3797
	struct intel_connector *intel_connector = to_intel_connector(connector);
3794
	struct drm_device *dev = connector->dev;
3798
	struct drm_device *dev = connector->dev;
3795
	struct drm_i915_private *dev_priv = dev->dev_private;
3799
	struct drm_i915_private *dev_priv = dev->dev_private;
3796
	enum intel_display_power_domain power_domain;
3800
	enum intel_display_power_domain power_domain;
3797
	int ret;
3801
	int ret;
3798
 
3802
 
3799
	/* We should parse the EDID data and find out if it has an audio sink
3803
	/* We should parse the EDID data and find out if it has an audio sink
3800
	 */
3804
	 */
3801
 
3805
 
3802
	power_domain = intel_display_port_power_domain(intel_encoder);
3806
	power_domain = intel_display_port_power_domain(intel_encoder);
3803
	intel_display_power_get(dev_priv, power_domain);
3807
	intel_display_power_get(dev_priv, power_domain);
3804
 
3808
 
3805
	ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3809
	ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
3806
	intel_display_power_put(dev_priv, power_domain);
3810
	intel_display_power_put(dev_priv, power_domain);
3807
	if (ret)
3811
	if (ret)
3808
		return ret;
3812
		return ret;
3809
 
3813
 
3810
	/* if eDP has no EDID, fall back to fixed mode */
3814
	/* if eDP has no EDID, fall back to fixed mode */
3811
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3815
	if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
3812
			struct drm_display_mode *mode;
3816
			struct drm_display_mode *mode;
3813
		mode = drm_mode_duplicate(dev,
3817
		mode = drm_mode_duplicate(dev,
3814
					  intel_connector->panel.fixed_mode);
3818
					  intel_connector->panel.fixed_mode);
3815
		if (mode) {
3819
		if (mode) {
3816
			drm_mode_probed_add(connector, mode);
3820
			drm_mode_probed_add(connector, mode);
3817
			return 1;
3821
			return 1;
3818
		}
3822
		}
3819
	}
3823
	}
3820
	return 0;
3824
	return 0;
3821
}
3825
}
3822
 
3826
 
3823
static bool
3827
static bool
3824
intel_dp_detect_audio(struct drm_connector *connector)
3828
intel_dp_detect_audio(struct drm_connector *connector)
3825
{
3829
{
3826
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3830
	struct intel_dp *intel_dp = intel_attached_dp(connector);
3827
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3831
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3828
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3832
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
3829
	struct drm_device *dev = connector->dev;
3833
	struct drm_device *dev = connector->dev;
3830
	struct drm_i915_private *dev_priv = dev->dev_private;
3834
	struct drm_i915_private *dev_priv = dev->dev_private;
3831
	enum intel_display_power_domain power_domain;
3835
	enum intel_display_power_domain power_domain;
3832
	struct edid *edid;
3836
	struct edid *edid;
3833
	bool has_audio = false;
3837
	bool has_audio = false;
3834
 
3838
 
3835
	power_domain = intel_display_port_power_domain(intel_encoder);
3839
	power_domain = intel_display_port_power_domain(intel_encoder);
3836
	intel_display_power_get(dev_priv, power_domain);
3840
	intel_display_power_get(dev_priv, power_domain);
3837
 
3841
 
3838
	edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3842
	edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3839
	if (edid) {
3843
	if (edid) {
3840
		has_audio = drm_detect_monitor_audio(edid);
3844
		has_audio = drm_detect_monitor_audio(edid);
3841
		kfree(edid);
3845
		kfree(edid);
3842
	}
3846
	}
3843
 
3847
 
3844
	intel_display_power_put(dev_priv, power_domain);
3848
	intel_display_power_put(dev_priv, power_domain);
3845
 
3849
 
3846
	return has_audio;
3850
	return has_audio;
3847
}
3851
}
3848
 
3852
 
3849
static int
3853
static int
3850
intel_dp_set_property(struct drm_connector *connector,
3854
intel_dp_set_property(struct drm_connector *connector,
3851
		      struct drm_property *property,
3855
		      struct drm_property *property,
3852
		      uint64_t val)
3856
		      uint64_t val)
3853
{
3857
{
3854
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3858
	struct drm_i915_private *dev_priv = connector->dev->dev_private;
3855
	struct intel_connector *intel_connector = to_intel_connector(connector);
3859
	struct intel_connector *intel_connector = to_intel_connector(connector);
3856
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3860
	struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
3857
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3861
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3858
	int ret;
3862
	int ret;
3859
 
3863
 
3860
	ret = drm_object_property_set_value(&connector->base, property, val);
3864
	ret = drm_object_property_set_value(&connector->base, property, val);
3861
	if (ret)
3865
	if (ret)
3862
		return ret;
3866
		return ret;
3863
 
3867
 
3864
	if (property == dev_priv->force_audio_property) {
3868
	if (property == dev_priv->force_audio_property) {
3865
		int i = val;
3869
		int i = val;
3866
		bool has_audio;
3870
		bool has_audio;
3867
 
3871
 
3868
		if (i == intel_dp->force_audio)
3872
		if (i == intel_dp->force_audio)
3869
			return 0;
3873
			return 0;
3870
 
3874
 
3871
		intel_dp->force_audio = i;
3875
		intel_dp->force_audio = i;
3872
 
3876
 
3873
		if (i == HDMI_AUDIO_AUTO)
3877
		if (i == HDMI_AUDIO_AUTO)
3874
			has_audio = intel_dp_detect_audio(connector);
3878
			has_audio = intel_dp_detect_audio(connector);
3875
		else
3879
		else
3876
			has_audio = (i == HDMI_AUDIO_ON);
3880
			has_audio = (i == HDMI_AUDIO_ON);
3877
 
3881
 
3878
		if (has_audio == intel_dp->has_audio)
3882
		if (has_audio == intel_dp->has_audio)
3879
			return 0;
3883
			return 0;
3880
 
3884
 
3881
		intel_dp->has_audio = has_audio;
3885
		intel_dp->has_audio = has_audio;
3882
		goto done;
3886
		goto done;
3883
	}
3887
	}
3884
 
3888
 
3885
	if (property == dev_priv->broadcast_rgb_property) {
3889
	if (property == dev_priv->broadcast_rgb_property) {
3886
		bool old_auto = intel_dp->color_range_auto;
3890
		bool old_auto = intel_dp->color_range_auto;
3887
		uint32_t old_range = intel_dp->color_range;
3891
		uint32_t old_range = intel_dp->color_range;
3888
 
3892
 
3889
		switch (val) {
3893
		switch (val) {
3890
		case INTEL_BROADCAST_RGB_AUTO:
3894
		case INTEL_BROADCAST_RGB_AUTO:
3891
			intel_dp->color_range_auto = true;
3895
			intel_dp->color_range_auto = true;
3892
			break;
3896
			break;
3893
		case INTEL_BROADCAST_RGB_FULL:
3897
		case INTEL_BROADCAST_RGB_FULL:
3894
			intel_dp->color_range_auto = false;
3898
			intel_dp->color_range_auto = false;
3895
			intel_dp->color_range = 0;
3899
			intel_dp->color_range = 0;
3896
			break;
3900
			break;
3897
		case INTEL_BROADCAST_RGB_LIMITED:
3901
		case INTEL_BROADCAST_RGB_LIMITED:
3898
			intel_dp->color_range_auto = false;
3902
			intel_dp->color_range_auto = false;
3899
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3903
			intel_dp->color_range = DP_COLOR_RANGE_16_235;
3900
			break;
3904
			break;
3901
		default:
3905
		default:
3902
			return -EINVAL;
3906
			return -EINVAL;
3903
		}
3907
		}
3904
 
3908
 
3905
		if (old_auto == intel_dp->color_range_auto &&
3909
		if (old_auto == intel_dp->color_range_auto &&
3906
		    old_range == intel_dp->color_range)
3910
		    old_range == intel_dp->color_range)
3907
			return 0;
3911
			return 0;
3908
 
3912
 
3909
	goto done;
3913
	goto done;
3910
	}
3914
	}
3911
 
3915
 
3912
	if (is_edp(intel_dp) &&
3916
	if (is_edp(intel_dp) &&
3913
	    property == connector->dev->mode_config.scaling_mode_property) {
3917
	    property == connector->dev->mode_config.scaling_mode_property) {
3914
		if (val == DRM_MODE_SCALE_NONE) {
3918
		if (val == DRM_MODE_SCALE_NONE) {
3915
			DRM_DEBUG_KMS("no scaling not supported\n");
3919
			DRM_DEBUG_KMS("no scaling not supported\n");
3916
			return -EINVAL;
3920
			return -EINVAL;
3917
		}
3921
		}
3918
 
3922
 
3919
		if (intel_connector->panel.fitting_mode == val) {
3923
		if (intel_connector->panel.fitting_mode == val) {
3920
			/* the eDP scaling property is not changed */
3924
			/* the eDP scaling property is not changed */
3921
			return 0;
3925
			return 0;
3922
		}
3926
		}
3923
		intel_connector->panel.fitting_mode = val;
3927
		intel_connector->panel.fitting_mode = val;
3924
 
3928
 
3925
		goto done;
3929
		goto done;
3926
	}
3930
	}
3927
 
3931
 
3928
	return -EINVAL;
3932
	return -EINVAL;
3929
 
3933
 
3930
done:
3934
done:
3931
	if (intel_encoder->base.crtc)
3935
	if (intel_encoder->base.crtc)
3932
		intel_crtc_restore_mode(intel_encoder->base.crtc);
3936
		intel_crtc_restore_mode(intel_encoder->base.crtc);
3933
 
3937
 
3934
	return 0;
3938
	return 0;
3935
}
3939
}
3936
 
3940
 
3937
static void
3941
static void
3938
intel_dp_connector_destroy(struct drm_connector *connector)
3942
intel_dp_connector_destroy(struct drm_connector *connector)
3939
{
3943
{
3940
	struct intel_connector *intel_connector = to_intel_connector(connector);
3944
	struct intel_connector *intel_connector = to_intel_connector(connector);
3941
 
3945
 
3942
	if (!IS_ERR_OR_NULL(intel_connector->edid))
3946
	if (!IS_ERR_OR_NULL(intel_connector->edid))
3943
		kfree(intel_connector->edid);
3947
		kfree(intel_connector->edid);
3944
 
3948
 
3945
	/* Can't call is_edp() since the encoder may have been destroyed
3949
	/* Can't call is_edp() since the encoder may have been destroyed
3946
	 * already. */
3950
	 * already. */
3947
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3951
	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3948
		intel_panel_fini(&intel_connector->panel);
3952
		intel_panel_fini(&intel_connector->panel);
3949
 
3953
 
3950
	drm_connector_cleanup(connector);
3954
	drm_connector_cleanup(connector);
3951
	kfree(connector);
3955
	kfree(connector);
3952
}
3956
}
3953
 
3957
 
3954
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3958
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3955
{
3959
{
3956
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3960
	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
3957
	struct intel_dp *intel_dp = &intel_dig_port->dp;
3961
	struct intel_dp *intel_dp = &intel_dig_port->dp;
3958
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3962
	struct drm_device *dev = intel_dp_to_dev(intel_dp);
3959
 
3963
 
3960
	drm_dp_aux_unregister(&intel_dp->aux);
3964
	drm_dp_aux_unregister(&intel_dp->aux);
3961
	intel_dp_mst_encoder_cleanup(intel_dig_port);
3965
	intel_dp_mst_encoder_cleanup(intel_dig_port);
3962
	drm_encoder_cleanup(encoder);
3966
	drm_encoder_cleanup(encoder);
3963
	if (is_edp(intel_dp)) {
3967
	if (is_edp(intel_dp)) {
3964
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3968
		cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3965
		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3969
		drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3966
		edp_panel_vdd_off_sync(intel_dp);
3970
		edp_panel_vdd_off_sync(intel_dp);
3967
		drm_modeset_unlock(&dev->mode_config.connection_mutex);
3971
		drm_modeset_unlock(&dev->mode_config.connection_mutex);
3968
	}
3972
	}
3969
	kfree(intel_dig_port);
3973
	kfree(intel_dig_port);
3970
}
3974
}
3971
 
3975
 
3972
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
3976
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
3973
{
3977
{
3974
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3978
	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3975
 
3979
 
3976
	if (!is_edp(intel_dp))
3980
	if (!is_edp(intel_dp))
3977
		return;
3981
		return;
3978
 
3982
 
3979
	edp_panel_vdd_off_sync(intel_dp);
3983
	edp_panel_vdd_off_sync(intel_dp);
3980
}
3984
}
3981
 
3985
 
3982
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
3986
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
3983
{
3987
{
3984
	intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
3988
	intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
3985
}
3989
}
3986
 
3990
 
3987
static const struct drm_connector_funcs intel_dp_connector_funcs = {
3991
static const struct drm_connector_funcs intel_dp_connector_funcs = {
3988
	.dpms = intel_connector_dpms,
3992
	.dpms = intel_connector_dpms,
3989
	.detect = intel_dp_detect,
3993
	.detect = intel_dp_detect,
3990
	.fill_modes = drm_helper_probe_single_connector_modes,
3994
	.fill_modes = drm_helper_probe_single_connector_modes,
3991
	.set_property = intel_dp_set_property,
3995
	.set_property = intel_dp_set_property,
3992
	.destroy = intel_dp_connector_destroy,
3996
	.destroy = intel_dp_connector_destroy,
3993
};
3997
};
3994
 
3998
 
3995
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3999
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3996
	.get_modes = intel_dp_get_modes,
4000
	.get_modes = intel_dp_get_modes,
3997
	.mode_valid = intel_dp_mode_valid,
4001
	.mode_valid = intel_dp_mode_valid,
3998
	.best_encoder = intel_best_encoder,
4002
	.best_encoder = intel_best_encoder,
3999
};
4003
};
4000
 
4004
 
4001
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4005
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4002
	.reset = intel_dp_encoder_reset,
4006
	.reset = intel_dp_encoder_reset,
4003
	.destroy = intel_dp_encoder_destroy,
4007
	.destroy = intel_dp_encoder_destroy,
4004
};
4008
};
4005
 
4009
 
4006
void
4010
void
4007
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4011
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
4008
{
4012
{
4009
	return;
4013
	return;
4010
}
4014
}
4011
 
4015
 
4012
bool
4016
bool
4013
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4017
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4014
{
4018
{
4015
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4019
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4016
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4020
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4017
	struct drm_device *dev = intel_dig_port->base.base.dev;
4021
	struct drm_device *dev = intel_dig_port->base.base.dev;
4018
	struct drm_i915_private *dev_priv = dev->dev_private;
4022
	struct drm_i915_private *dev_priv = dev->dev_private;
4019
	enum intel_display_power_domain power_domain;
4023
	enum intel_display_power_domain power_domain;
4020
	bool ret = true;
4024
	bool ret = true;
4021
 
4025
 
4022
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4026
	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4023
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4027
		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4024
 
4028
 
4025
	DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4029
	DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
4026
		      long_hpd ? "long" : "short");
4030
		      long_hpd ? "long" : "short");
4027
 
4031
 
4028
	power_domain = intel_display_port_power_domain(intel_encoder);
4032
	power_domain = intel_display_port_power_domain(intel_encoder);
4029
	intel_display_power_get(dev_priv, power_domain);
4033
	intel_display_power_get(dev_priv, power_domain);
4030
 
4034
 
4031
	if (long_hpd) {
4035
	if (long_hpd) {
4032
 
4036
 
4033
		if (HAS_PCH_SPLIT(dev)) {
4037
		if (HAS_PCH_SPLIT(dev)) {
4034
		if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4038
		if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4035
			goto mst_fail;
4039
			goto mst_fail;
4036
		} else {
4040
		} else {
4037
			if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4041
			if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4038
				goto mst_fail;
4042
				goto mst_fail;
4039
		}
4043
		}
4040
 
4044
 
4041
		if (!intel_dp_get_dpcd(intel_dp)) {
4045
		if (!intel_dp_get_dpcd(intel_dp)) {
4042
			goto mst_fail;
4046
			goto mst_fail;
4043
		}
4047
		}
4044
 
4048
 
4045
		intel_dp_probe_oui(intel_dp);
4049
		intel_dp_probe_oui(intel_dp);
4046
 
4050
 
4047
		if (!intel_dp_probe_mst(intel_dp))
4051
		if (!intel_dp_probe_mst(intel_dp))
4048
			goto mst_fail;
4052
			goto mst_fail;
4049
 
4053
 
4050
	} else {
4054
	} else {
4051
		if (intel_dp->is_mst) {
4055
		if (intel_dp->is_mst) {
4052
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4056
			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
4053
				goto mst_fail;
4057
				goto mst_fail;
4054
		}
4058
		}
4055
 
4059
 
4056
		if (!intel_dp->is_mst) {
4060
		if (!intel_dp->is_mst) {
4057
			/*
4061
			/*
4058
			 * we'll check the link status via the normal hot plug path later -
4062
			 * we'll check the link status via the normal hot plug path later -
4059
			 * but for short hpds we should check it now
4063
			 * but for short hpds we should check it now
4060
			 */
4064
			 */
4061
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4065
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4062
			intel_dp_check_link_status(intel_dp);
4066
			intel_dp_check_link_status(intel_dp);
4063
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4067
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4064
		}
4068
		}
4065
	}
4069
	}
4066
	ret = false;
4070
	ret = false;
4067
	goto put_power;
4071
	goto put_power;
4068
mst_fail:
4072
mst_fail:
4069
	/* if we were in MST mode, and device is not there get out of MST mode */
4073
	/* if we were in MST mode, and device is not there get out of MST mode */
4070
	if (intel_dp->is_mst) {
4074
	if (intel_dp->is_mst) {
4071
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4075
		DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4072
		intel_dp->is_mst = false;
4076
		intel_dp->is_mst = false;
4073
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4077
		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4074
	}
4078
	}
4075
put_power:
4079
put_power:
4076
	intel_display_power_put(dev_priv, power_domain);
4080
	intel_display_power_put(dev_priv, power_domain);
4077
 
4081
 
4078
	return ret;
4082
	return ret;
4079
}
4083
}
4080
 
4084
 
4081
/* Return which DP Port should be selected for Transcoder DP control */
4085
/* Return which DP Port should be selected for Transcoder DP control */
4082
int
4086
int
4083
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4087
intel_trans_dp_port_sel(struct drm_crtc *crtc)
4084
{
4088
{
4085
	struct drm_device *dev = crtc->dev;
4089
	struct drm_device *dev = crtc->dev;
4086
	struct intel_encoder *intel_encoder;
4090
	struct intel_encoder *intel_encoder;
4087
	struct intel_dp *intel_dp;
4091
	struct intel_dp *intel_dp;
4088
 
4092
 
4089
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4093
	for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4090
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
4094
		intel_dp = enc_to_intel_dp(&intel_encoder->base);
4091
 
4095
 
4092
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4096
		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4093
		    intel_encoder->type == INTEL_OUTPUT_EDP)
4097
		    intel_encoder->type == INTEL_OUTPUT_EDP)
4094
			return intel_dp->output_reg;
4098
			return intel_dp->output_reg;
4095
	}
4099
	}
4096
 
4100
 
4097
	return -1;
4101
	return -1;
4098
}
4102
}
4099
 
4103
 
4100
/* check the VBT to see whether the eDP is on DP-D port */
4104
/* check the VBT to see whether the eDP is on DP-D port */
4101
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4105
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4102
{
4106
{
4103
	struct drm_i915_private *dev_priv = dev->dev_private;
4107
	struct drm_i915_private *dev_priv = dev->dev_private;
4104
	union child_device_config *p_child;
4108
	union child_device_config *p_child;
4105
	int i;
4109
	int i;
4106
	static const short port_mapping[] = {
4110
	static const short port_mapping[] = {
4107
		[PORT_B] = PORT_IDPB,
4111
		[PORT_B] = PORT_IDPB,
4108
		[PORT_C] = PORT_IDPC,
4112
		[PORT_C] = PORT_IDPC,
4109
		[PORT_D] = PORT_IDPD,
4113
		[PORT_D] = PORT_IDPD,
4110
	};
4114
	};
4111
 
4115
 
4112
	if (port == PORT_A)
4116
	if (port == PORT_A)
4113
		return true;
4117
		return true;
4114
 
4118
 
4115
	if (!dev_priv->vbt.child_dev_num)
4119
	if (!dev_priv->vbt.child_dev_num)
4116
		return false;
4120
		return false;
4117
 
4121
 
4118
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4122
	for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4119
		p_child = dev_priv->vbt.child_dev + i;
4123
		p_child = dev_priv->vbt.child_dev + i;
4120
 
4124
 
4121
		if (p_child->common.dvo_port == port_mapping[port] &&
4125
		if (p_child->common.dvo_port == port_mapping[port] &&
4122
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4126
		    (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4123
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4127
		    (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
4124
			return true;
4128
			return true;
4125
	}
4129
	}
4126
	return false;
4130
	return false;
4127
}
4131
}
4128
 
4132
 
4129
void
4133
void
4130
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4134
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4131
{
4135
{
4132
	struct intel_connector *intel_connector = to_intel_connector(connector);
4136
	struct intel_connector *intel_connector = to_intel_connector(connector);
4133
 
4137
 
4134
	intel_attach_force_audio_property(connector);
4138
	intel_attach_force_audio_property(connector);
4135
	intel_attach_broadcast_rgb_property(connector);
4139
	intel_attach_broadcast_rgb_property(connector);
4136
	intel_dp->color_range_auto = true;
4140
	intel_dp->color_range_auto = true;
4137
 
4141
 
4138
	if (is_edp(intel_dp)) {
4142
	if (is_edp(intel_dp)) {
4139
		drm_mode_create_scaling_mode_property(connector->dev);
4143
		drm_mode_create_scaling_mode_property(connector->dev);
4140
		drm_object_attach_property(
4144
		drm_object_attach_property(
4141
			&connector->base,
4145
			&connector->base,
4142
			connector->dev->mode_config.scaling_mode_property,
4146
			connector->dev->mode_config.scaling_mode_property,
4143
			DRM_MODE_SCALE_ASPECT);
4147
			DRM_MODE_SCALE_ASPECT);
4144
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4148
		intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
4145
	}
4149
	}
4146
}
4150
}
4147
 
4151
 
4148
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4152
static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4149
{
4153
{
4150
	intel_dp->last_power_cycle = jiffies;
4154
	intel_dp->last_power_cycle = jiffies;
4151
	intel_dp->last_power_on = jiffies;
4155
	intel_dp->last_power_on = jiffies;
4152
	intel_dp->last_backlight_off = jiffies;
4156
	intel_dp->last_backlight_off = jiffies;
4153
}
4157
}
4154
 
4158
 
4155
static void
4159
static void
4156
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4160
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4157
				    struct intel_dp *intel_dp,
4161
				    struct intel_dp *intel_dp,
4158
				    struct edp_power_seq *out)
4162
				    struct edp_power_seq *out)
4159
{
4163
{
4160
	struct drm_i915_private *dev_priv = dev->dev_private;
4164
	struct drm_i915_private *dev_priv = dev->dev_private;
4161
	struct edp_power_seq cur, vbt, spec, final;
4165
	struct edp_power_seq cur, vbt, spec, final;
4162
	u32 pp_on, pp_off, pp_div, pp;
4166
	u32 pp_on, pp_off, pp_div, pp;
4163
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4167
	int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4164
 
4168
 
4165
	if (HAS_PCH_SPLIT(dev)) {
4169
	if (HAS_PCH_SPLIT(dev)) {
4166
		pp_ctrl_reg = PCH_PP_CONTROL;
4170
		pp_ctrl_reg = PCH_PP_CONTROL;
4167
		pp_on_reg = PCH_PP_ON_DELAYS;
4171
		pp_on_reg = PCH_PP_ON_DELAYS;
4168
		pp_off_reg = PCH_PP_OFF_DELAYS;
4172
		pp_off_reg = PCH_PP_OFF_DELAYS;
4169
		pp_div_reg = PCH_PP_DIVISOR;
4173
		pp_div_reg = PCH_PP_DIVISOR;
4170
	} else {
4174
	} else {
4171
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4175
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4172
 
4176
 
4173
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4177
		pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4174
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4178
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4175
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4179
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4176
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4180
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4177
	}
4181
	}
4178
 
4182
 
4179
	/* Workaround: Need to write PP_CONTROL with the unlock key as
4183
	/* Workaround: Need to write PP_CONTROL with the unlock key as
4180
	 * the very first thing. */
4184
	 * the very first thing. */
4181
	pp = ironlake_get_pp_control(intel_dp);
4185
	pp = ironlake_get_pp_control(intel_dp);
4182
	I915_WRITE(pp_ctrl_reg, pp);
4186
	I915_WRITE(pp_ctrl_reg, pp);
4183
 
4187
 
4184
	pp_on = I915_READ(pp_on_reg);
4188
	pp_on = I915_READ(pp_on_reg);
4185
	pp_off = I915_READ(pp_off_reg);
4189
	pp_off = I915_READ(pp_off_reg);
4186
	pp_div = I915_READ(pp_div_reg);
4190
	pp_div = I915_READ(pp_div_reg);
4187
 
4191
 
4188
	/* Pull timing values out of registers */
4192
	/* Pull timing values out of registers */
4189
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4193
	cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4190
		PANEL_POWER_UP_DELAY_SHIFT;
4194
		PANEL_POWER_UP_DELAY_SHIFT;
4191
 
4195
 
4192
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4196
	cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4193
		PANEL_LIGHT_ON_DELAY_SHIFT;
4197
		PANEL_LIGHT_ON_DELAY_SHIFT;
4194
 
4198
 
4195
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4199
	cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4196
		PANEL_LIGHT_OFF_DELAY_SHIFT;
4200
		PANEL_LIGHT_OFF_DELAY_SHIFT;
4197
 
4201
 
4198
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4202
	cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4199
		PANEL_POWER_DOWN_DELAY_SHIFT;
4203
		PANEL_POWER_DOWN_DELAY_SHIFT;
4200
 
4204
 
4201
	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4205
	cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4202
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4206
		       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4203
 
4207
 
4204
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4208
	DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4205
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4209
		      cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4206
 
4210
 
4207
	vbt = dev_priv->vbt.edp_pps;
4211
	vbt = dev_priv->vbt.edp_pps;
4208
 
4212
 
4209
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4213
	/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4210
	 * our hw here, which are all in 100usec. */
4214
	 * our hw here, which are all in 100usec. */
4211
	spec.t1_t3 = 210 * 10;
4215
	spec.t1_t3 = 210 * 10;
4212
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4216
	spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4213
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4217
	spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4214
	spec.t10 = 500 * 10;
4218
	spec.t10 = 500 * 10;
4215
	/* This one is special and actually in units of 100ms, but zero
4219
	/* This one is special and actually in units of 100ms, but zero
4216
	 * based in the hw (so we need to add 100 ms). But the sw vbt
4220
	 * based in the hw (so we need to add 100 ms). But the sw vbt
4217
	 * table multiplies it with 1000 to make it in units of 100usec,
4221
	 * table multiplies it with 1000 to make it in units of 100usec,
4218
	 * too. */
4222
	 * too. */
4219
	spec.t11_t12 = (510 + 100) * 10;
4223
	spec.t11_t12 = (510 + 100) * 10;
4220
 
4224
 
4221
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4225
	DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4222
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4226
		      vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4223
 
4227
 
4224
	/* Use the max of the register settings and vbt. If both are
4228
	/* Use the max of the register settings and vbt. If both are
4225
	 * unset, fall back to the spec limits. */
4229
	 * unset, fall back to the spec limits. */
4226
#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
4230
#define assign_final(field)	final.field = (max(cur.field, vbt.field) == 0 ? \
4227
				       spec.field : \
4231
				       spec.field : \
4228
				       max(cur.field, vbt.field))
4232
				       max(cur.field, vbt.field))
4229
	assign_final(t1_t3);
4233
	assign_final(t1_t3);
4230
	assign_final(t8);
4234
	assign_final(t8);
4231
	assign_final(t9);
4235
	assign_final(t9);
4232
	assign_final(t10);
4236
	assign_final(t10);
4233
	assign_final(t11_t12);
4237
	assign_final(t11_t12);
4234
#undef assign_final
4238
#undef assign_final
4235
 
4239
 
4236
#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
4240
#define get_delay(field)	(DIV_ROUND_UP(final.field, 10))
4237
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
4241
	intel_dp->panel_power_up_delay = get_delay(t1_t3);
4238
	intel_dp->backlight_on_delay = get_delay(t8);
4242
	intel_dp->backlight_on_delay = get_delay(t8);
4239
	intel_dp->backlight_off_delay = get_delay(t9);
4243
	intel_dp->backlight_off_delay = get_delay(t9);
4240
	intel_dp->panel_power_down_delay = get_delay(t10);
4244
	intel_dp->panel_power_down_delay = get_delay(t10);
4241
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4245
	intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4242
#undef get_delay
4246
#undef get_delay
4243
 
4247
 
4244
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4248
	DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4245
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4249
		      intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4246
		      intel_dp->panel_power_cycle_delay);
4250
		      intel_dp->panel_power_cycle_delay);
4247
 
4251
 
4248
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4252
	DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4249
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4253
		      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4250
 
4254
 
4251
	if (out)
4255
	if (out)
4252
		*out = final;
4256
		*out = final;
4253
}
4257
}
4254
 
4258
 
4255
static void
4259
static void
4256
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4260
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4257
					      struct intel_dp *intel_dp,
4261
					      struct intel_dp *intel_dp,
4258
					      struct edp_power_seq *seq)
4262
					      struct edp_power_seq *seq)
4259
{
4263
{
4260
	struct drm_i915_private *dev_priv = dev->dev_private;
4264
	struct drm_i915_private *dev_priv = dev->dev_private;
4261
	u32 pp_on, pp_off, pp_div, port_sel = 0;
4265
	u32 pp_on, pp_off, pp_div, port_sel = 0;
4262
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4266
	int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4263
	int pp_on_reg, pp_off_reg, pp_div_reg;
4267
	int pp_on_reg, pp_off_reg, pp_div_reg;
4264
 
4268
 
4265
	if (HAS_PCH_SPLIT(dev)) {
4269
	if (HAS_PCH_SPLIT(dev)) {
4266
		pp_on_reg = PCH_PP_ON_DELAYS;
4270
		pp_on_reg = PCH_PP_ON_DELAYS;
4267
		pp_off_reg = PCH_PP_OFF_DELAYS;
4271
		pp_off_reg = PCH_PP_OFF_DELAYS;
4268
		pp_div_reg = PCH_PP_DIVISOR;
4272
		pp_div_reg = PCH_PP_DIVISOR;
4269
	} else {
4273
	} else {
4270
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4274
		enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4271
 
4275
 
4272
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4276
		pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4273
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4277
		pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4274
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4278
		pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4275
	}
4279
	}
4276
 
4280
 
4277
	/*
4281
	/*
4278
	 * And finally store the new values in the power sequencer. The
4282
	 * And finally store the new values in the power sequencer. The
4279
	 * backlight delays are set to 1 because we do manual waits on them. For
4283
	 * backlight delays are set to 1 because we do manual waits on them. For
4280
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4284
	 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4281
	 * we'll end up waiting for the backlight off delay twice: once when we
4285
	 * we'll end up waiting for the backlight off delay twice: once when we
4282
	 * do the manual sleep, and once when we disable the panel and wait for
4286
	 * do the manual sleep, and once when we disable the panel and wait for
4283
	 * the PP_STATUS bit to become zero.
4287
	 * the PP_STATUS bit to become zero.
4284
	 */
4288
	 */
4285
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4289
	pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4286
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4290
		(1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4287
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4291
	pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4288
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4292
		 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4289
	/* Compute the divisor for the pp clock, simply match the Bspec
4293
	/* Compute the divisor for the pp clock, simply match the Bspec
4290
	 * formula. */
4294
	 * formula. */
4291
	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4295
	pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
4292
	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4296
	pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
4293
			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
4297
			<< PANEL_POWER_CYCLE_DELAY_SHIFT);
4294
 
4298
 
4295
	/* Haswell doesn't have any port selection bits for the panel
4299
	/* Haswell doesn't have any port selection bits for the panel
4296
	 * power sequencer any more. */
4300
	 * power sequencer any more. */
4297
	if (IS_VALLEYVIEW(dev)) {
4301
	if (IS_VALLEYVIEW(dev)) {
4298
		if (dp_to_dig_port(intel_dp)->port == PORT_B)
4302
		if (dp_to_dig_port(intel_dp)->port == PORT_B)
4299
			port_sel = PANEL_PORT_SELECT_DPB_VLV;
4303
			port_sel = PANEL_PORT_SELECT_DPB_VLV;
4300
		else
4304
		else
4301
			port_sel = PANEL_PORT_SELECT_DPC_VLV;
4305
			port_sel = PANEL_PORT_SELECT_DPC_VLV;
4302
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4306
	} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4303
		if (dp_to_dig_port(intel_dp)->port == PORT_A)
4307
		if (dp_to_dig_port(intel_dp)->port == PORT_A)
4304
			port_sel = PANEL_PORT_SELECT_DPA;
4308
			port_sel = PANEL_PORT_SELECT_DPA;
4305
		else
4309
		else
4306
			port_sel = PANEL_PORT_SELECT_DPD;
4310
			port_sel = PANEL_PORT_SELECT_DPD;
4307
	}
4311
	}
4308
 
4312
 
4309
	pp_on |= port_sel;
4313
	pp_on |= port_sel;
4310
 
4314
 
4311
	I915_WRITE(pp_on_reg, pp_on);
4315
	I915_WRITE(pp_on_reg, pp_on);
4312
	I915_WRITE(pp_off_reg, pp_off);
4316
	I915_WRITE(pp_off_reg, pp_off);
4313
	I915_WRITE(pp_div_reg, pp_div);
4317
	I915_WRITE(pp_div_reg, pp_div);
4314
 
4318
 
4315
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4319
	DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4316
		      I915_READ(pp_on_reg),
4320
		      I915_READ(pp_on_reg),
4317
		      I915_READ(pp_off_reg),
4321
		      I915_READ(pp_off_reg),
4318
		      I915_READ(pp_div_reg));
4322
		      I915_READ(pp_div_reg));
4319
}
4323
}
4320
 
4324
 
4321
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4325
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4322
{
4326
{
4323
	struct drm_i915_private *dev_priv = dev->dev_private;
4327
	struct drm_i915_private *dev_priv = dev->dev_private;
4324
	struct intel_encoder *encoder;
4328
	struct intel_encoder *encoder;
4325
	struct intel_dp *intel_dp = NULL;
4329
	struct intel_dp *intel_dp = NULL;
4326
	struct intel_crtc_config *config = NULL;
4330
	struct intel_crtc_config *config = NULL;
4327
	struct intel_crtc *intel_crtc = NULL;
4331
	struct intel_crtc *intel_crtc = NULL;
4328
	struct intel_connector *intel_connector = dev_priv->drrs.connector;
4332
	struct intel_connector *intel_connector = dev_priv->drrs.connector;
4329
	u32 reg, val;
4333
	u32 reg, val;
4330
	enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
4334
	enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR;
4331
 
4335
 
4332
	if (refresh_rate <= 0) {
4336
	if (refresh_rate <= 0) {
4333
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4337
		DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4334
		return;
4338
		return;
4335
	}
4339
	}
4336
 
4340
 
4337
	if (intel_connector == NULL) {
4341
	if (intel_connector == NULL) {
4338
		DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
4342
		DRM_DEBUG_KMS("DRRS supported for eDP only.\n");
4339
		return;
4343
		return;
4340
	}
4344
	}
4341
 
4345
 
4342
	/*
4346
	/*
4343
	 * FIXME: This needs proper synchronization with psr state. But really
4347
	 * FIXME: This needs proper synchronization with psr state. But really
4344
	 * hard to tell without seeing the user of this function of this code.
4348
	 * hard to tell without seeing the user of this function of this code.
4345
	 * Check locking and ordering once that lands.
4349
	 * Check locking and ordering once that lands.
4346
	 */
4350
	 */
4347
	if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4351
	if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
4348
		DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4352
		DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4349
		return;
4353
		return;
4350
	}
4354
	}
4351
 
4355
 
4352
	encoder = intel_attached_encoder(&intel_connector->base);
4356
	encoder = intel_attached_encoder(&intel_connector->base);
4353
	intel_dp = enc_to_intel_dp(&encoder->base);
4357
	intel_dp = enc_to_intel_dp(&encoder->base);
4354
	intel_crtc = encoder->new_crtc;
4358
	intel_crtc = encoder->new_crtc;
4355
 
4359
 
4356
	if (!intel_crtc) {
4360
	if (!intel_crtc) {
4357
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4361
		DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4358
		return;
4362
		return;
4359
	}
4363
	}
4360
 
4364
 
4361
	config = &intel_crtc->config;
4365
	config = &intel_crtc->config;
4362
 
4366
 
4363
	if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4367
	if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) {
4364
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4368
		DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4365
		return;
4369
		return;
4366
	}
4370
	}
4367
 
4371
 
4368
	if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4372
	if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate)
4369
		index = DRRS_LOW_RR;
4373
		index = DRRS_LOW_RR;
4370
 
4374
 
4371
	if (index == intel_dp->drrs_state.refresh_rate_type) {
4375
	if (index == intel_dp->drrs_state.refresh_rate_type) {
4372
		DRM_DEBUG_KMS(
4376
		DRM_DEBUG_KMS(
4373
			"DRRS requested for previously set RR...ignoring\n");
4377
			"DRRS requested for previously set RR...ignoring\n");
4374
		return;
4378
		return;
4375
	}
4379
	}
4376
 
4380
 
4377
	if (!intel_crtc->active) {
4381
	if (!intel_crtc->active) {
4378
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4382
		DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4379
		return;
4383
		return;
4380
	}
4384
	}
4381
 
4385
 
4382
	if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4386
	if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) {
4383
		reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4387
		reg = PIPECONF(intel_crtc->config.cpu_transcoder);
4384
		val = I915_READ(reg);
4388
		val = I915_READ(reg);
4385
		if (index > DRRS_HIGH_RR) {
4389
		if (index > DRRS_HIGH_RR) {
4386
			val |= PIPECONF_EDP_RR_MODE_SWITCH;
4390
			val |= PIPECONF_EDP_RR_MODE_SWITCH;
4387
			intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
4391
			intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
4388
		} else {
4392
		} else {
4389
			val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4393
			val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4390
		}
4394
		}
4391
		I915_WRITE(reg, val);
4395
		I915_WRITE(reg, val);
4392
	}
4396
	}
4393
 
4397
 
4394
	/*
4398
	/*
4395
	 * mutex taken to ensure that there is no race between differnt
4399
	 * mutex taken to ensure that there is no race between differnt
4396
	 * drrs calls trying to update refresh rate. This scenario may occur
4400
	 * drrs calls trying to update refresh rate. This scenario may occur
4397
	 * in future when idleness detection based DRRS in kernel and
4401
	 * in future when idleness detection based DRRS in kernel and
4398
	 * possible calls from user space to set differnt RR are made.
4402
	 * possible calls from user space to set differnt RR are made.
4399
	 */
4403
	 */
4400
 
4404
 
4401
	mutex_lock(&intel_dp->drrs_state.mutex);
4405
	mutex_lock(&intel_dp->drrs_state.mutex);
4402
 
4406
 
4403
	intel_dp->drrs_state.refresh_rate_type = index;
4407
	intel_dp->drrs_state.refresh_rate_type = index;
4404
 
4408
 
4405
	mutex_unlock(&intel_dp->drrs_state.mutex);
4409
	mutex_unlock(&intel_dp->drrs_state.mutex);
4406
 
4410
 
4407
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4411
	DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
4408
}
4412
}
4409
 
4413
 
4410
static struct drm_display_mode *
4414
static struct drm_display_mode *
4411
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4415
intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4412
			struct intel_connector *intel_connector,
4416
			struct intel_connector *intel_connector,
4413
			struct drm_display_mode *fixed_mode)
4417
			struct drm_display_mode *fixed_mode)
4414
{
4418
{
4415
	struct drm_connector *connector = &intel_connector->base;
4419
	struct drm_connector *connector = &intel_connector->base;
4416
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4420
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4417
	struct drm_device *dev = intel_dig_port->base.base.dev;
4421
	struct drm_device *dev = intel_dig_port->base.base.dev;
4418
	struct drm_i915_private *dev_priv = dev->dev_private;
4422
	struct drm_i915_private *dev_priv = dev->dev_private;
4419
	struct drm_display_mode *downclock_mode = NULL;
4423
	struct drm_display_mode *downclock_mode = NULL;
4420
 
4424
 
4421
	if (INTEL_INFO(dev)->gen <= 6) {
4425
	if (INTEL_INFO(dev)->gen <= 6) {
4422
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4426
		DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
4423
		return NULL;
4427
		return NULL;
4424
	}
4428
	}
4425
 
4429
 
4426
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4430
	if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4427
		DRM_INFO("VBT doesn't support DRRS\n");
4431
		DRM_INFO("VBT doesn't support DRRS\n");
4428
		return NULL;
4432
		return NULL;
4429
	}
4433
	}
4430
 
4434
 
4431
	downclock_mode = intel_find_panel_downclock
4435
	downclock_mode = intel_find_panel_downclock
4432
					(dev, fixed_mode, connector);
4436
					(dev, fixed_mode, connector);
4433
 
4437
 
4434
	if (!downclock_mode) {
4438
	if (!downclock_mode) {
4435
		DRM_INFO("DRRS not supported\n");
4439
		DRM_INFO("DRRS not supported\n");
4436
		return NULL;
4440
		return NULL;
4437
	}
4441
	}
4438
 
4442
 
4439
	dev_priv->drrs.connector = intel_connector;
4443
	dev_priv->drrs.connector = intel_connector;
4440
 
4444
 
4441
	mutex_init(&intel_dp->drrs_state.mutex);
4445
	mutex_init(&intel_dp->drrs_state.mutex);
4442
 
4446
 
4443
	intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4447
	intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4444
 
4448
 
4445
	intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4449
	intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4446
	DRM_INFO("seamless DRRS supported for eDP panel.\n");
4450
	DRM_INFO("seamless DRRS supported for eDP panel.\n");
4447
	return downclock_mode;
4451
	return downclock_mode;
4448
}
4452
}
4449
 
4453
 
4450
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4454
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4451
{
4455
{
4452
	struct drm_device *dev = intel_encoder->base.dev;
4456
	struct drm_device *dev = intel_encoder->base.dev;
4453
	struct drm_i915_private *dev_priv = dev->dev_private;
4457
	struct drm_i915_private *dev_priv = dev->dev_private;
4454
	struct intel_dp *intel_dp;
4458
	struct intel_dp *intel_dp;
4455
	enum intel_display_power_domain power_domain;
4459
	enum intel_display_power_domain power_domain;
4456
 
4460
 
4457
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4461
	if (intel_encoder->type != INTEL_OUTPUT_EDP)
4458
		return;
4462
		return;
4459
 
4463
 
4460
	intel_dp = enc_to_intel_dp(&intel_encoder->base);
4464
	intel_dp = enc_to_intel_dp(&intel_encoder->base);
4461
	if (!edp_have_panel_vdd(intel_dp))
4465
	if (!edp_have_panel_vdd(intel_dp))
4462
		return;
4466
		return;
4463
	/*
4467
	/*
4464
	 * The VDD bit needs a power domain reference, so if the bit is
4468
	 * The VDD bit needs a power domain reference, so if the bit is
4465
	 * already enabled when we boot or resume, grab this reference and
4469
	 * already enabled when we boot or resume, grab this reference and
4466
	 * schedule a vdd off, so we don't hold on to the reference
4470
	 * schedule a vdd off, so we don't hold on to the reference
4467
	 * indefinitely.
4471
	 * indefinitely.
4468
	 */
4472
	 */
4469
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4473
	DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4470
	power_domain = intel_display_port_power_domain(intel_encoder);
4474
	power_domain = intel_display_port_power_domain(intel_encoder);
4471
	intel_display_power_get(dev_priv, power_domain);
4475
	intel_display_power_get(dev_priv, power_domain);
4472
 
4476
 
4473
	edp_panel_vdd_schedule_off(intel_dp);
4477
	edp_panel_vdd_schedule_off(intel_dp);
4474
}
4478
}
4475
 
4479
 
4476
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4480
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4477
				     struct intel_connector *intel_connector,
4481
				     struct intel_connector *intel_connector,
4478
				     struct edp_power_seq *power_seq)
4482
				     struct edp_power_seq *power_seq)
4479
{
4483
{
4480
	struct drm_connector *connector = &intel_connector->base;
4484
	struct drm_connector *connector = &intel_connector->base;
4481
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4485
	struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4482
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4486
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4483
	struct drm_device *dev = intel_encoder->base.dev;
4487
	struct drm_device *dev = intel_encoder->base.dev;
4484
	struct drm_i915_private *dev_priv = dev->dev_private;
4488
	struct drm_i915_private *dev_priv = dev->dev_private;
4485
	struct drm_display_mode *fixed_mode = NULL;
4489
	struct drm_display_mode *fixed_mode = NULL;
4486
	struct drm_display_mode *downclock_mode = NULL;
4490
	struct drm_display_mode *downclock_mode = NULL;
4487
	bool has_dpcd;
4491
	bool has_dpcd;
4488
	struct drm_display_mode *scan;
4492
	struct drm_display_mode *scan;
4489
	struct edid *edid;
4493
	struct edid *edid;
4490
 
4494
 
4491
	intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4495
	intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4492
 
4496
 
4493
	if (!is_edp(intel_dp))
4497
	if (!is_edp(intel_dp))
4494
		return true;
4498
		return true;
4495
 
4499
 
4496
	intel_edp_panel_vdd_sanitize(intel_encoder);
4500
	intel_edp_panel_vdd_sanitize(intel_encoder);
4497
 
4501
 
4498
	/* Cache DPCD and EDID for edp. */
4502
	/* Cache DPCD and EDID for edp. */
4499
	intel_edp_panel_vdd_on(intel_dp);
4503
	intel_edp_panel_vdd_on(intel_dp);
4500
	has_dpcd = intel_dp_get_dpcd(intel_dp);
4504
	has_dpcd = intel_dp_get_dpcd(intel_dp);
4501
	edp_panel_vdd_off(intel_dp, false);
4505
	edp_panel_vdd_off(intel_dp, false);
4502
 
4506
 
4503
	if (has_dpcd) {
4507
	if (has_dpcd) {
4504
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4508
		if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4505
			dev_priv->no_aux_handshake =
4509
			dev_priv->no_aux_handshake =
4506
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4510
				intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4507
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4511
				DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
4508
	} else {
4512
	} else {
4509
		/* if this fails, presume the device is a ghost */
4513
		/* if this fails, presume the device is a ghost */
4510
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
4514
		DRM_INFO("failed to retrieve link info, disabling eDP\n");
4511
		return false;
4515
		return false;
4512
	}
4516
	}
4513
 
4517
 
4514
	/* We now know it's not a ghost, init power sequence regs. */
4518
	/* We now know it's not a ghost, init power sequence regs. */
4515
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4519
	intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4516
 
4520
 
4517
	mutex_lock(&dev->mode_config.mutex);
4521
	mutex_lock(&dev->mode_config.mutex);
4518
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4522
	edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4519
	if (edid) {
4523
	if (edid) {
4520
		if (drm_add_edid_modes(connector, edid)) {
4524
		if (drm_add_edid_modes(connector, edid)) {
4521
			drm_mode_connector_update_edid_property(connector,
4525
			drm_mode_connector_update_edid_property(connector,
4522
								edid);
4526
								edid);
4523
			drm_edid_to_eld(connector, edid);
4527
			drm_edid_to_eld(connector, edid);
4524
		} else {
4528
		} else {
4525
			kfree(edid);
4529
			kfree(edid);
4526
			edid = ERR_PTR(-EINVAL);
4530
			edid = ERR_PTR(-EINVAL);
4527
		}
4531
		}
4528
	} else {
4532
	} else {
4529
		edid = ERR_PTR(-ENOENT);
4533
		edid = ERR_PTR(-ENOENT);
4530
	}
4534
	}
4531
	intel_connector->edid = edid;
4535
	intel_connector->edid = edid;
4532
 
4536
 
4533
	/* prefer fixed mode from EDID if available */
4537
	/* prefer fixed mode from EDID if available */
4534
	list_for_each_entry(scan, &connector->probed_modes, head) {
4538
	list_for_each_entry(scan, &connector->probed_modes, head) {
4535
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
4539
		if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
4536
			fixed_mode = drm_mode_duplicate(dev, scan);
4540
			fixed_mode = drm_mode_duplicate(dev, scan);
4537
			downclock_mode = intel_dp_drrs_init(
4541
			downclock_mode = intel_dp_drrs_init(
4538
						intel_dig_port,
4542
						intel_dig_port,
4539
						intel_connector, fixed_mode);
4543
						intel_connector, fixed_mode);
4540
			break;
4544
			break;
4541
		}
4545
		}
4542
	}
4546
	}
4543
 
4547
 
4544
	/* fallback to VBT if available for eDP */
4548
	/* fallback to VBT if available for eDP */
4545
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
4549
	if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
4546
		fixed_mode = drm_mode_duplicate(dev,
4550
		fixed_mode = drm_mode_duplicate(dev,
4547
					dev_priv->vbt.lfp_lvds_vbt_mode);
4551
					dev_priv->vbt.lfp_lvds_vbt_mode);
4548
		if (fixed_mode)
4552
		if (fixed_mode)
4549
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
4553
			fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
4550
	}
4554
	}
4551
	mutex_unlock(&dev->mode_config.mutex);
4555
	mutex_unlock(&dev->mode_config.mutex);
4552
 
4556
 
4553
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4557
	intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4554
	intel_panel_setup_backlight(connector);
4558
	intel_panel_setup_backlight(connector);
4555
 
4559
 
4556
	return true;
4560
	return true;
4557
}
4561
}
4558
 
4562
 
4559
bool
4563
bool
4560
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4564
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4561
			struct intel_connector *intel_connector)
4565
			struct intel_connector *intel_connector)
4562
{
4566
{
4563
	struct drm_connector *connector = &intel_connector->base;
4567
	struct drm_connector *connector = &intel_connector->base;
4564
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4568
	struct intel_dp *intel_dp = &intel_dig_port->dp;
4565
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4569
	struct intel_encoder *intel_encoder = &intel_dig_port->base;
4566
	struct drm_device *dev = intel_encoder->base.dev;
4570
	struct drm_device *dev = intel_encoder->base.dev;
4567
	struct drm_i915_private *dev_priv = dev->dev_private;
4571
	struct drm_i915_private *dev_priv = dev->dev_private;
4568
	enum port port = intel_dig_port->port;
4572
	enum port port = intel_dig_port->port;
4569
	struct edp_power_seq power_seq = { 0 };
4573
	struct edp_power_seq power_seq = { 0 };
4570
	int type;
4574
	int type;
4571
 
4575
 
4572
	/* intel_dp vfuncs */
4576
	/* intel_dp vfuncs */
4573
	if (IS_VALLEYVIEW(dev))
4577
	if (IS_VALLEYVIEW(dev))
4574
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
4578
		intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
4575
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4579
	else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4576
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4580
		intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4577
	else if (HAS_PCH_SPLIT(dev))
4581
	else if (HAS_PCH_SPLIT(dev))
4578
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
4582
		intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
4579
	else
4583
	else
4580
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
4584
		intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
4581
 
4585
 
4582
	intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
4586
	intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
4583
 
4587
 
4584
	/* Preserve the current hw state. */
4588
	/* Preserve the current hw state. */
4585
	intel_dp->DP = I915_READ(intel_dp->output_reg);
4589
	intel_dp->DP = I915_READ(intel_dp->output_reg);
4586
	intel_dp->attached_connector = intel_connector;
4590
	intel_dp->attached_connector = intel_connector;
4587
 
4591
 
4588
	if (intel_dp_is_edp(dev, port))
4592
	if (intel_dp_is_edp(dev, port))
4589
		type = DRM_MODE_CONNECTOR_eDP;
4593
		type = DRM_MODE_CONNECTOR_eDP;
4590
	else
4594
	else
4591
	type = DRM_MODE_CONNECTOR_DisplayPort;
4595
	type = DRM_MODE_CONNECTOR_DisplayPort;
4592
 
4596
 
4593
	/*
4597
	/*
4594
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
4598
	 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
4595
	 * for DP the encoder type can be set by the caller to
4599
	 * for DP the encoder type can be set by the caller to
4596
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
4600
	 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
4597
	 */
4601
	 */
4598
	if (type == DRM_MODE_CONNECTOR_eDP)
4602
	if (type == DRM_MODE_CONNECTOR_eDP)
4599
		intel_encoder->type = INTEL_OUTPUT_EDP;
4603
		intel_encoder->type = INTEL_OUTPUT_EDP;
4600
 
4604
 
4601
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
4605
	DRM_DEBUG_KMS("Adding %s connector on port %c\n",
4602
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
4606
			type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
4603
			port_name(port));
4607
			port_name(port));
4604
 
4608
 
4605
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
4609
	drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
4606
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
4610
	drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
4607
 
4611
 
4608
	connector->interlace_allowed = true;
4612
	connector->interlace_allowed = true;
4609
	connector->doublescan_allowed = 0;
4613
	connector->doublescan_allowed = 0;
4610
 
4614
 
4611
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4615
	INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4612
			  edp_panel_vdd_work);
4616
			  edp_panel_vdd_work);
4613
 
4617
 
4614
	intel_connector_attach_encoder(intel_connector, intel_encoder);
4618
	intel_connector_attach_encoder(intel_connector, intel_encoder);
4615
	drm_connector_register(connector);
4619
	drm_connector_register(connector);
4616
 
4620
 
4617
	if (HAS_DDI(dev))
4621
	if (HAS_DDI(dev))
4618
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
4622
		intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
4619
	else
4623
	else
4620
	intel_connector->get_hw_state = intel_connector_get_hw_state;
4624
	intel_connector->get_hw_state = intel_connector_get_hw_state;
4621
	intel_connector->unregister = intel_dp_connector_unregister;
4625
	intel_connector->unregister = intel_dp_connector_unregister;
4622
 
4626
 
4623
	/* Set up the hotplug pin. */
4627
	/* Set up the hotplug pin. */
4624
	switch (port) {
4628
	switch (port) {
4625
	case PORT_A:
4629
	case PORT_A:
4626
		intel_encoder->hpd_pin = HPD_PORT_A;
4630
		intel_encoder->hpd_pin = HPD_PORT_A;
4627
			break;
4631
			break;
4628
	case PORT_B:
4632
	case PORT_B:
4629
		intel_encoder->hpd_pin = HPD_PORT_B;
4633
		intel_encoder->hpd_pin = HPD_PORT_B;
4630
			break;
4634
			break;
4631
	case PORT_C:
4635
	case PORT_C:
4632
		intel_encoder->hpd_pin = HPD_PORT_C;
4636
		intel_encoder->hpd_pin = HPD_PORT_C;
4633
			break;
4637
			break;
4634
	case PORT_D:
4638
	case PORT_D:
4635
		intel_encoder->hpd_pin = HPD_PORT_D;
4639
		intel_encoder->hpd_pin = HPD_PORT_D;
4636
			break;
4640
			break;
4637
	default:
4641
	default:
4638
		BUG();
4642
		BUG();
4639
	}
4643
	}
4640
 
4644
 
4641
	if (is_edp(intel_dp)) {
4645
	if (is_edp(intel_dp)) {
4642
		intel_dp_init_panel_power_timestamps(intel_dp);
4646
		intel_dp_init_panel_power_timestamps(intel_dp);
4643
		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
4647
		intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
4644
	}
4648
	}
4645
 
4649
 
4646
	intel_dp_aux_init(intel_dp, intel_connector);
4650
	intel_dp_aux_init(intel_dp, intel_connector);
4647
 
4651
 
4648
	/* init MST on ports that can support it */
4652
	/* init MST on ports that can support it */
4649
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4653
	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4650
		if (port == PORT_B || port == PORT_C || port == PORT_D) {
4654
		if (port == PORT_B || port == PORT_C || port == PORT_D) {
4651
			intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
4655
			intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
4652
		}
4656
		}
4653
	}
4657
	}
4654
 
4658
 
4655
	if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4659
	if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
4656
		drm_dp_aux_unregister(&intel_dp->aux);
4660
		drm_dp_aux_unregister(&intel_dp->aux);
4657
	if (is_edp(intel_dp)) {
4661
	if (is_edp(intel_dp)) {
4658
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4662
			cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4659
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4663
			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4660
			edp_panel_vdd_off_sync(intel_dp);
4664
			edp_panel_vdd_off_sync(intel_dp);
4661
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4665
			drm_modeset_unlock(&dev->mode_config.connection_mutex);
4662
		}
4666
		}
4663
		drm_connector_unregister(connector);
4667
		drm_connector_unregister(connector);
4664
		drm_connector_cleanup(connector);
4668
		drm_connector_cleanup(connector);
4665
		return false;
4669
		return false;
4666
	}
4670
	}
4667
 
4671
 
4668
	intel_dp_add_properties(intel_dp, connector);
4672
	intel_dp_add_properties(intel_dp, connector);
4669
 
4673
 
4670
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
4674
	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
4671
	 * 0xd.  Failure to do so will result in spurious interrupts being
4675
	 * 0xd.  Failure to do so will result in spurious interrupts being
4672
	 * generated on the port when a cable is not attached.
4676
	 * generated on the port when a cable is not attached.
4673
	 */
4677
	 */
4674
	if (IS_G4X(dev) && !IS_GM45(dev)) {
4678
	if (IS_G4X(dev) && !IS_GM45(dev)) {
4675
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
4679
		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
4676
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
4680
		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
4677
	}
4681
	}
4678
 
4682
 
4679
	return true;
4683
	return true;
4680
}
4684
}
4681
 
4685
 
4682
void
4686
void
4683
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4687
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4684
{
4688
{
4685
	struct drm_i915_private *dev_priv = dev->dev_private;
4689
	struct drm_i915_private *dev_priv = dev->dev_private;
4686
	struct intel_digital_port *intel_dig_port;
4690
	struct intel_digital_port *intel_dig_port;
4687
	struct intel_encoder *intel_encoder;
4691
	struct intel_encoder *intel_encoder;
4688
	struct drm_encoder *encoder;
4692
	struct drm_encoder *encoder;
4689
	struct intel_connector *intel_connector;
4693
	struct intel_connector *intel_connector;
4690
 
4694
 
4691
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
4695
	intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
4692
	if (!intel_dig_port)
4696
	if (!intel_dig_port)
4693
		return;
4697
		return;
4694
 
4698
 
4695
	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
4699
	intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
4696
	if (!intel_connector) {
4700
	if (!intel_connector) {
4697
		kfree(intel_dig_port);
4701
		kfree(intel_dig_port);
4698
		return;
4702
		return;
4699
	}
4703
	}
4700
 
4704
 
4701
	intel_encoder = &intel_dig_port->base;
4705
	intel_encoder = &intel_dig_port->base;
4702
	encoder = &intel_encoder->base;
4706
	encoder = &intel_encoder->base;
4703
 
4707
 
4704
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
4708
	drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
4705
			 DRM_MODE_ENCODER_TMDS);
4709
			 DRM_MODE_ENCODER_TMDS);
4706
 
4710
 
4707
	intel_encoder->compute_config = intel_dp_compute_config;
4711
	intel_encoder->compute_config = intel_dp_compute_config;
4708
	intel_encoder->disable = intel_disable_dp;
4712
	intel_encoder->disable = intel_disable_dp;
4709
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4713
	intel_encoder->get_hw_state = intel_dp_get_hw_state;
4710
	intel_encoder->get_config = intel_dp_get_config;
4714
	intel_encoder->get_config = intel_dp_get_config;
4711
	intel_encoder->suspend = intel_dp_encoder_suspend;
4715
	intel_encoder->suspend = intel_dp_encoder_suspend;
4712
	if (IS_CHERRYVIEW(dev)) {
4716
	if (IS_CHERRYVIEW(dev)) {
4713
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4717
		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
4714
		intel_encoder->pre_enable = chv_pre_enable_dp;
4718
		intel_encoder->pre_enable = chv_pre_enable_dp;
4715
		intel_encoder->enable = vlv_enable_dp;
4719
		intel_encoder->enable = vlv_enable_dp;
4716
		intel_encoder->post_disable = chv_post_disable_dp;
4720
		intel_encoder->post_disable = chv_post_disable_dp;
4717
	} else if (IS_VALLEYVIEW(dev)) {
4721
	} else if (IS_VALLEYVIEW(dev)) {
4718
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4722
		intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
4719
		intel_encoder->pre_enable = vlv_pre_enable_dp;
4723
		intel_encoder->pre_enable = vlv_pre_enable_dp;
4720
		intel_encoder->enable = vlv_enable_dp;
4724
		intel_encoder->enable = vlv_enable_dp;
4721
		intel_encoder->post_disable = vlv_post_disable_dp;
4725
		intel_encoder->post_disable = vlv_post_disable_dp;
4722
	} else {
4726
	} else {
4723
		intel_encoder->pre_enable = g4x_pre_enable_dp;
4727
		intel_encoder->pre_enable = g4x_pre_enable_dp;
4724
		intel_encoder->enable = g4x_enable_dp;
4728
		intel_encoder->enable = g4x_enable_dp;
4725
		intel_encoder->post_disable = g4x_post_disable_dp;
4729
		intel_encoder->post_disable = g4x_post_disable_dp;
4726
	}
4730
	}
4727
 
4731
 
4728
	intel_dig_port->port = port;
4732
	intel_dig_port->port = port;
4729
	intel_dig_port->dp.output_reg = output_reg;
4733
	intel_dig_port->dp.output_reg = output_reg;
4730
 
4734
 
4731
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4735
	intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4732
	if (IS_CHERRYVIEW(dev)) {
4736
	if (IS_CHERRYVIEW(dev)) {
4733
		if (port == PORT_D)
4737
		if (port == PORT_D)
4734
			intel_encoder->crtc_mask = 1 << 2;
4738
			intel_encoder->crtc_mask = 1 << 2;
4735
		else
4739
		else
4736
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
4740
			intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
4737
	} else {
4741
	} else {
4738
	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4742
	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
4739
	}
4743
	}
4740
	intel_encoder->cloneable = 0;
4744
	intel_encoder->cloneable = 0;
4741
	intel_encoder->hot_plug = intel_dp_hot_plug;
4745
	intel_encoder->hot_plug = intel_dp_hot_plug;
4742
 
4746
 
4743
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
4747
	intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
4744
	dev_priv->hpd_irq_port[port] = intel_dig_port;
4748
	dev_priv->hpd_irq_port[port] = intel_dig_port;
4745
 
4749
 
4746
	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4750
	if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
4747
		drm_encoder_cleanup(encoder);
4751
		drm_encoder_cleanup(encoder);
4748
		kfree(intel_dig_port);
4752
		kfree(intel_dig_port);
4749
		kfree(intel_connector);
4753
		kfree(intel_connector);
4750
	}
4754
	}
4751
}
4755
}
4752
 
4756
 
4753
void intel_dp_mst_suspend(struct drm_device *dev)
4757
void intel_dp_mst_suspend(struct drm_device *dev)
4754
{
4758
{
4755
	struct drm_i915_private *dev_priv = dev->dev_private;
4759
	struct drm_i915_private *dev_priv = dev->dev_private;
4756
	int i;
4760
	int i;
4757
 
4761
 
4758
	/* disable MST */
4762
	/* disable MST */
4759
	for (i = 0; i < I915_MAX_PORTS; i++) {
4763
	for (i = 0; i < I915_MAX_PORTS; i++) {
4760
		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4764
		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4761
		if (!intel_dig_port)
4765
		if (!intel_dig_port)
4762
			continue;
4766
			continue;
4763
 
4767
 
4764
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4768
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4765
			if (!intel_dig_port->dp.can_mst)
4769
			if (!intel_dig_port->dp.can_mst)
4766
				continue;
4770
				continue;
4767
			if (intel_dig_port->dp.is_mst)
4771
			if (intel_dig_port->dp.is_mst)
4768
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
4772
				drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
4769
		}
4773
		}
4770
	}
4774
	}
4771
}
4775
}
4772
 
4776
 
4773
void intel_dp_mst_resume(struct drm_device *dev)
4777
void intel_dp_mst_resume(struct drm_device *dev)
4774
{
4778
{
4775
	struct drm_i915_private *dev_priv = dev->dev_private;
4779
	struct drm_i915_private *dev_priv = dev->dev_private;
4776
	int i;
4780
	int i;
4777
 
4781
 
4778
	for (i = 0; i < I915_MAX_PORTS; i++) {
4782
	for (i = 0; i < I915_MAX_PORTS; i++) {
4779
		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4783
		struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
4780
		if (!intel_dig_port)
4784
		if (!intel_dig_port)
4781
			continue;
4785
			continue;
4782
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4786
		if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
4783
			int ret;
4787
			int ret;
4784
 
4788
 
4785
			if (!intel_dig_port->dp.can_mst)
4789
			if (!intel_dig_port->dp.can_mst)
4786
				continue;
4790
				continue;
4787
 
4791
 
4788
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
4792
			ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
4789
			if (ret != 0) {
4793
			if (ret != 0) {
4790
				intel_dp_check_mst_status(&intel_dig_port->dp);
4794
				intel_dp_check_mst_status(&intel_dig_port->dp);
4791
			}
4795
			}
4792
		}
4796
		}
4793
	}
4797
	}
4794
}
4798
}
4795
>
4799
>
4796
>
4800
>
4797
>
4801
>
4798
>
4802
>