Rev 3480 | Rev 3746 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3480 | Rev 3482 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008 Intel Corporation |
2 | * Copyright © 2008 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Keith Packard |
24 | * Keith Packard |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include "intel_drv.h" |
35 | #include "intel_drv.h" |
36 | #include |
36 | #include |
37 | #include "i915_drv.h" |
37 | #include "i915_drv.h" |
38 | 38 | ||
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
40 | 40 | ||
41 | /** |
41 | /** |
42 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
42 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
43 | * @intel_dp: DP struct |
43 | * @intel_dp: DP struct |
44 | * |
44 | * |
45 | * If a CPU or PCH DP output is attached to an eDP panel, this function |
45 | * If a CPU or PCH DP output is attached to an eDP panel, this function |
46 | * will return true, and false otherwise. |
46 | * will return true, and false otherwise. |
47 | */ |
47 | */ |
48 | static bool is_edp(struct intel_dp *intel_dp) |
48 | static bool is_edp(struct intel_dp *intel_dp) |
49 | { |
49 | { |
50 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
50 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
51 | 51 | ||
52 | return intel_dig_port->base.type == INTEL_OUTPUT_EDP; |
52 | return intel_dig_port->base.type == INTEL_OUTPUT_EDP; |
53 | } |
53 | } |
54 | 54 | ||
55 | /** |
55 | /** |
56 | * is_pch_edp - is the port on the PCH and attached to an eDP panel? |
56 | * is_pch_edp - is the port on the PCH and attached to an eDP panel? |
57 | * @intel_dp: DP struct |
57 | * @intel_dp: DP struct |
58 | * |
58 | * |
59 | * Returns true if the given DP struct corresponds to a PCH DP port attached |
59 | * Returns true if the given DP struct corresponds to a PCH DP port attached |
60 | * to an eDP panel, false otherwise. Helpful for determining whether we |
60 | * to an eDP panel, false otherwise. Helpful for determining whether we |
61 | * may need FDI resources for a given DP output or not. |
61 | * may need FDI resources for a given DP output or not. |
62 | */ |
62 | */ |
63 | static bool is_pch_edp(struct intel_dp *intel_dp) |
63 | static bool is_pch_edp(struct intel_dp *intel_dp) |
64 | { |
64 | { |
65 | return intel_dp->is_pch_edp; |
65 | return intel_dp->is_pch_edp; |
66 | } |
66 | } |
67 | 67 | ||
68 | /** |
68 | /** |
69 | * is_cpu_edp - is the port on the CPU and attached to an eDP panel? |
69 | * is_cpu_edp - is the port on the CPU and attached to an eDP panel? |
70 | * @intel_dp: DP struct |
70 | * @intel_dp: DP struct |
71 | * |
71 | * |
72 | * Returns true if the given DP struct corresponds to a CPU eDP port. |
72 | * Returns true if the given DP struct corresponds to a CPU eDP port. |
73 | */ |
73 | */ |
74 | static bool is_cpu_edp(struct intel_dp *intel_dp) |
74 | static bool is_cpu_edp(struct intel_dp *intel_dp) |
75 | { |
75 | { |
76 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); |
76 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); |
77 | } |
77 | } |
78 | 78 | ||
79 | static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) |
79 | static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) |
80 | { |
80 | { |
81 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
81 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
82 | 82 | ||
83 | return intel_dig_port->base.base.dev; |
83 | return intel_dig_port->base.base.dev; |
84 | } |
84 | } |
85 | 85 | ||
86 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
86 | static struct intel_dp *intel_attached_dp(struct drm_connector *connector) |
87 | { |
87 | { |
88 | return enc_to_intel_dp(&intel_attached_encoder(connector)->base); |
88 | return enc_to_intel_dp(&intel_attached_encoder(connector)->base); |
89 | } |
89 | } |
90 | 90 | ||
91 | /** |
91 | /** |
92 | * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? |
92 | * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? |
93 | * @encoder: DRM encoder |
93 | * @encoder: DRM encoder |
94 | * |
94 | * |
95 | * Return true if @encoder corresponds to a PCH attached eDP panel. Needed |
95 | * Return true if @encoder corresponds to a PCH attached eDP panel. Needed |
96 | * by intel_display.c. |
96 | * by intel_display.c. |
97 | */ |
97 | */ |
98 | bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) |
98 | bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) |
99 | { |
99 | { |
100 | struct intel_dp *intel_dp; |
100 | struct intel_dp *intel_dp; |
101 | 101 | ||
102 | if (!encoder) |
102 | if (!encoder) |
103 | return false; |
103 | return false; |
104 | 104 | ||
105 | intel_dp = enc_to_intel_dp(encoder); |
105 | intel_dp = enc_to_intel_dp(encoder); |
106 | 106 | ||
107 | return is_pch_edp(intel_dp); |
107 | return is_pch_edp(intel_dp); |
108 | } |
108 | } |
109 | 109 | ||
110 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
110 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
111 | 111 | ||
112 | void |
112 | void |
113 | intel_edp_link_config(struct intel_encoder *intel_encoder, |
113 | intel_edp_link_config(struct intel_encoder *intel_encoder, |
114 | int *lane_num, int *link_bw) |
114 | int *lane_num, int *link_bw) |
115 | { |
115 | { |
116 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
116 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
117 | 117 | ||
118 | *lane_num = intel_dp->lane_count; |
118 | *lane_num = intel_dp->lane_count; |
119 | *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
119 | *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); |
120 | } |
120 | } |
121 | 121 | ||
122 | int |
122 | int |
123 | intel_edp_target_clock(struct intel_encoder *intel_encoder, |
123 | intel_edp_target_clock(struct intel_encoder *intel_encoder, |
124 | struct drm_display_mode *mode) |
124 | struct drm_display_mode *mode) |
125 | { |
125 | { |
126 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
126 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
127 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
127 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
128 | 128 | ||
129 | if (intel_connector->panel.fixed_mode) |
129 | if (intel_connector->panel.fixed_mode) |
130 | return intel_connector->panel.fixed_mode->clock; |
130 | return intel_connector->panel.fixed_mode->clock; |
131 | else |
131 | else |
132 | return mode->clock; |
132 | return mode->clock; |
133 | } |
133 | } |
134 | 134 | ||
135 | static int |
135 | static int |
136 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
136 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
137 | { |
137 | { |
138 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
138 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
139 | 139 | ||
140 | switch (max_link_bw) { |
140 | switch (max_link_bw) { |
141 | case DP_LINK_BW_1_62: |
141 | case DP_LINK_BW_1_62: |
142 | case DP_LINK_BW_2_7: |
142 | case DP_LINK_BW_2_7: |
143 | break; |
143 | break; |
144 | default: |
144 | default: |
145 | max_link_bw = DP_LINK_BW_1_62; |
145 | max_link_bw = DP_LINK_BW_1_62; |
146 | break; |
146 | break; |
147 | } |
147 | } |
148 | return max_link_bw; |
148 | return max_link_bw; |
149 | } |
149 | } |
150 | 150 | ||
151 | /* |
151 | /* |
152 | * The units on the numbers in the next two are... bizarre. Examples will |
152 | * The units on the numbers in the next two are... bizarre. Examples will |
153 | * make it clearer; this one parallels an example in the eDP spec. |
153 | * make it clearer; this one parallels an example in the eDP spec. |
154 | * |
154 | * |
155 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: |
155 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: |
156 | * |
156 | * |
157 | * 270000 * 1 * 8 / 10 == 216000 |
157 | * 270000 * 1 * 8 / 10 == 216000 |
158 | * |
158 | * |
159 | * The actual data capacity of that configuration is 2.16Gbit/s, so the |
159 | * The actual data capacity of that configuration is 2.16Gbit/s, so the |
160 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - |
160 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - |
161 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be |
161 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be |
162 | * 119000. At 18bpp that's 2142000 kilobits per second. |
162 | * 119000. At 18bpp that's 2142000 kilobits per second. |
163 | * |
163 | * |
164 | * Thus the strange-looking division by 10 in intel_dp_link_required, to |
164 | * Thus the strange-looking division by 10 in intel_dp_link_required, to |
165 | * get the result in decakilobits instead of kilobits. |
165 | * get the result in decakilobits instead of kilobits. |
166 | */ |
166 | */ |
167 | 167 | ||
168 | static int |
168 | static int |
169 | intel_dp_link_required(int pixel_clock, int bpp) |
169 | intel_dp_link_required(int pixel_clock, int bpp) |
170 | { |
170 | { |
171 | return (pixel_clock * bpp + 9) / 10; |
171 | return (pixel_clock * bpp + 9) / 10; |
172 | } |
172 | } |
173 | 173 | ||
174 | static int |
174 | static int |
175 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) |
175 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) |
176 | { |
176 | { |
177 | return (max_link_clock * max_lanes * 8) / 10; |
177 | return (max_link_clock * max_lanes * 8) / 10; |
178 | } |
178 | } |
179 | 179 | ||
180 | static bool |
180 | static bool |
181 | intel_dp_adjust_dithering(struct intel_dp *intel_dp, |
181 | intel_dp_adjust_dithering(struct intel_dp *intel_dp, |
182 | struct drm_display_mode *mode, |
182 | struct drm_display_mode *mode, |
183 | bool adjust_mode) |
183 | bool adjust_mode) |
184 | { |
184 | { |
185 | int max_link_clock = |
185 | int max_link_clock = |
186 | drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
186 | drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
187 | int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
187 | int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
188 | int max_rate, mode_rate; |
188 | int max_rate, mode_rate; |
189 | 189 | ||
190 | mode_rate = intel_dp_link_required(mode->clock, 24); |
190 | mode_rate = intel_dp_link_required(mode->clock, 24); |
191 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
191 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
192 | 192 | ||
193 | if (mode_rate > max_rate) { |
193 | if (mode_rate > max_rate) { |
194 | mode_rate = intel_dp_link_required(mode->clock, 18); |
194 | mode_rate = intel_dp_link_required(mode->clock, 18); |
195 | if (mode_rate > max_rate) |
195 | if (mode_rate > max_rate) |
196 | return false; |
196 | return false; |
197 | 197 | ||
198 | if (adjust_mode) |
198 | if (adjust_mode) |
199 | mode->private_flags |
199 | mode->private_flags |
200 | |= INTEL_MODE_DP_FORCE_6BPC; |
200 | |= INTEL_MODE_DP_FORCE_6BPC; |
201 | 201 | ||
202 | return true; |
202 | return true; |
203 | } |
203 | } |
204 | 204 | ||
205 | return true; |
205 | return true; |
206 | } |
206 | } |
207 | 207 | ||
208 | static int |
208 | static int |
209 | intel_dp_mode_valid(struct drm_connector *connector, |
209 | intel_dp_mode_valid(struct drm_connector *connector, |
210 | struct drm_display_mode *mode) |
210 | struct drm_display_mode *mode) |
211 | { |
211 | { |
212 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
212 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
213 | struct intel_connector *intel_connector = to_intel_connector(connector); |
213 | struct intel_connector *intel_connector = to_intel_connector(connector); |
214 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
214 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
215 | 215 | ||
216 | if (is_edp(intel_dp) && fixed_mode) { |
216 | if (is_edp(intel_dp) && fixed_mode) { |
217 | if (mode->hdisplay > fixed_mode->hdisplay) |
217 | if (mode->hdisplay > fixed_mode->hdisplay) |
218 | return MODE_PANEL; |
218 | return MODE_PANEL; |
219 | 219 | ||
220 | if (mode->vdisplay > fixed_mode->vdisplay) |
220 | if (mode->vdisplay > fixed_mode->vdisplay) |
221 | return MODE_PANEL; |
221 | return MODE_PANEL; |
222 | } |
222 | } |
223 | 223 | ||
224 | if (!intel_dp_adjust_dithering(intel_dp, mode, false)) |
224 | if (!intel_dp_adjust_dithering(intel_dp, mode, false)) |
225 | return MODE_CLOCK_HIGH; |
225 | return MODE_CLOCK_HIGH; |
226 | 226 | ||
227 | if (mode->clock < 10000) |
227 | if (mode->clock < 10000) |
228 | return MODE_CLOCK_LOW; |
228 | return MODE_CLOCK_LOW; |
229 | 229 | ||
230 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
230 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
231 | return MODE_H_ILLEGAL; |
231 | return MODE_H_ILLEGAL; |
232 | 232 | ||
233 | return MODE_OK; |
233 | return MODE_OK; |
234 | } |
234 | } |
235 | 235 | ||
236 | static uint32_t |
236 | static uint32_t |
237 | pack_aux(uint8_t *src, int src_bytes) |
237 | pack_aux(uint8_t *src, int src_bytes) |
238 | { |
238 | { |
239 | int i; |
239 | int i; |
240 | uint32_t v = 0; |
240 | uint32_t v = 0; |
241 | 241 | ||
242 | if (src_bytes > 4) |
242 | if (src_bytes > 4) |
243 | src_bytes = 4; |
243 | src_bytes = 4; |
244 | for (i = 0; i < src_bytes; i++) |
244 | for (i = 0; i < src_bytes; i++) |
245 | v |= ((uint32_t) src[i]) << ((3-i) * 8); |
245 | v |= ((uint32_t) src[i]) << ((3-i) * 8); |
246 | return v; |
246 | return v; |
247 | } |
247 | } |
248 | 248 | ||
249 | static void |
249 | static void |
250 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) |
250 | unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) |
251 | { |
251 | { |
252 | int i; |
252 | int i; |
253 | if (dst_bytes > 4) |
253 | if (dst_bytes > 4) |
254 | dst_bytes = 4; |
254 | dst_bytes = 4; |
255 | for (i = 0; i < dst_bytes; i++) |
255 | for (i = 0; i < dst_bytes; i++) |
256 | dst[i] = src >> ((3-i) * 8); |
256 | dst[i] = src >> ((3-i) * 8); |
257 | } |
257 | } |
258 | 258 | ||
259 | /* hrawclock is 1/4 the FSB frequency */ |
259 | /* hrawclock is 1/4 the FSB frequency */ |
260 | static int |
260 | static int |
261 | intel_hrawclk(struct drm_device *dev) |
261 | intel_hrawclk(struct drm_device *dev) |
262 | { |
262 | { |
263 | struct drm_i915_private *dev_priv = dev->dev_private; |
263 | struct drm_i915_private *dev_priv = dev->dev_private; |
264 | uint32_t clkcfg; |
264 | uint32_t clkcfg; |
265 | 265 | ||
266 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ |
266 | /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ |
267 | if (IS_VALLEYVIEW(dev)) |
267 | if (IS_VALLEYVIEW(dev)) |
268 | return 200; |
268 | return 200; |
269 | 269 | ||
270 | clkcfg = I915_READ(CLKCFG); |
270 | clkcfg = I915_READ(CLKCFG); |
271 | switch (clkcfg & CLKCFG_FSB_MASK) { |
271 | switch (clkcfg & CLKCFG_FSB_MASK) { |
272 | case CLKCFG_FSB_400: |
272 | case CLKCFG_FSB_400: |
273 | return 100; |
273 | return 100; |
274 | case CLKCFG_FSB_533: |
274 | case CLKCFG_FSB_533: |
275 | return 133; |
275 | return 133; |
276 | case CLKCFG_FSB_667: |
276 | case CLKCFG_FSB_667: |
277 | return 166; |
277 | return 166; |
278 | case CLKCFG_FSB_800: |
278 | case CLKCFG_FSB_800: |
279 | return 200; |
279 | return 200; |
280 | case CLKCFG_FSB_1067: |
280 | case CLKCFG_FSB_1067: |
281 | return 266; |
281 | return 266; |
282 | case CLKCFG_FSB_1333: |
282 | case CLKCFG_FSB_1333: |
283 | return 333; |
283 | return 333; |
284 | /* these two are just a guess; one of them might be right */ |
284 | /* these two are just a guess; one of them might be right */ |
285 | case CLKCFG_FSB_1600: |
285 | case CLKCFG_FSB_1600: |
286 | case CLKCFG_FSB_1600_ALT: |
286 | case CLKCFG_FSB_1600_ALT: |
287 | return 400; |
287 | return 400; |
288 | default: |
288 | default: |
289 | return 133; |
289 | return 133; |
290 | } |
290 | } |
291 | } |
291 | } |
292 | 292 | ||
293 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) |
293 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) |
294 | { |
294 | { |
295 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
295 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
296 | struct drm_i915_private *dev_priv = dev->dev_private; |
296 | struct drm_i915_private *dev_priv = dev->dev_private; |
297 | 297 | ||
298 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; |
298 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; |
299 | } |
299 | } |
300 | 300 | ||
301 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) |
301 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) |
302 | { |
302 | { |
303 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
303 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
304 | struct drm_i915_private *dev_priv = dev->dev_private; |
304 | struct drm_i915_private *dev_priv = dev->dev_private; |
305 | 305 | ||
306 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; |
306 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; |
307 | } |
307 | } |
308 | 308 | ||
309 | static void |
309 | static void |
310 | intel_dp_check_edp(struct intel_dp *intel_dp) |
310 | intel_dp_check_edp(struct intel_dp *intel_dp) |
311 | { |
311 | { |
312 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
312 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
313 | struct drm_i915_private *dev_priv = dev->dev_private; |
313 | struct drm_i915_private *dev_priv = dev->dev_private; |
314 | 314 | ||
315 | if (!is_edp(intel_dp)) |
315 | if (!is_edp(intel_dp)) |
316 | return; |
316 | return; |
317 | if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { |
317 | if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { |
318 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); |
318 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); |
319 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", |
319 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", |
320 | I915_READ(PCH_PP_STATUS), |
320 | I915_READ(PCH_PP_STATUS), |
321 | I915_READ(PCH_PP_CONTROL)); |
321 | I915_READ(PCH_PP_CONTROL)); |
322 | } |
322 | } |
323 | } |
323 | } |
324 | 324 | ||
325 | static uint32_t |
325 | static uint32_t |
326 | intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) |
326 | intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) |
327 | { |
327 | { |
328 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
328 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
329 | struct drm_device *dev = intel_dig_port->base.base.dev; |
329 | struct drm_device *dev = intel_dig_port->base.base.dev; |
330 | struct drm_i915_private *dev_priv = dev->dev_private; |
330 | struct drm_i915_private *dev_priv = dev->dev_private; |
331 | uint32_t ch_ctl = intel_dp->output_reg + 0x10; |
331 | uint32_t ch_ctl = intel_dp->output_reg + 0x10; |
332 | uint32_t status; |
332 | uint32_t status; |
333 | bool done; |
333 | bool done; |
334 | 334 | ||
335 | if (IS_HASWELL(dev)) { |
335 | if (IS_HASWELL(dev)) { |
336 | switch (intel_dig_port->port) { |
336 | switch (intel_dig_port->port) { |
337 | case PORT_A: |
337 | case PORT_A: |
338 | ch_ctl = DPA_AUX_CH_CTL; |
338 | ch_ctl = DPA_AUX_CH_CTL; |
339 | break; |
339 | break; |
340 | case PORT_B: |
340 | case PORT_B: |
341 | ch_ctl = PCH_DPB_AUX_CH_CTL; |
341 | ch_ctl = PCH_DPB_AUX_CH_CTL; |
342 | break; |
342 | break; |
343 | case PORT_C: |
343 | case PORT_C: |
344 | ch_ctl = PCH_DPC_AUX_CH_CTL; |
344 | ch_ctl = PCH_DPC_AUX_CH_CTL; |
345 | break; |
345 | break; |
346 | case PORT_D: |
346 | case PORT_D: |
347 | ch_ctl = PCH_DPD_AUX_CH_CTL; |
347 | ch_ctl = PCH_DPD_AUX_CH_CTL; |
348 | break; |
348 | break; |
349 | default: |
349 | default: |
350 | BUG(); |
350 | BUG(); |
351 | } |
351 | } |
352 | } |
352 | } |
353 | 353 | ||
354 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
354 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
355 | if (has_aux_irq) |
355 | if (has_aux_irq) |
356 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
356 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
357 | msecs_to_jiffies(10)); |
357 | msecs_to_jiffies(10)); |
358 | else |
358 | else |
359 | done = wait_for_atomic(C, 10) == 0; |
359 | done = wait_for_atomic(C, 10) == 0; |
360 | if (!done) |
360 | if (!done) |
361 | DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", |
361 | DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", |
362 | has_aux_irq); |
362 | has_aux_irq); |
363 | #undef C |
363 | #undef C |
364 | 364 | ||
365 | return status; |
365 | return status; |
366 | } |
366 | } |
367 | 367 | ||
368 | static int |
368 | static int |
369 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
369 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
370 | uint8_t *send, int send_bytes, |
370 | uint8_t *send, int send_bytes, |
371 | uint8_t *recv, int recv_size) |
371 | uint8_t *recv, int recv_size) |
372 | { |
372 | { |
373 | uint32_t output_reg = intel_dp->output_reg; |
373 | uint32_t output_reg = intel_dp->output_reg; |
374 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
374 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
375 | struct drm_device *dev = intel_dig_port->base.base.dev; |
375 | struct drm_device *dev = intel_dig_port->base.base.dev; |
376 | struct drm_i915_private *dev_priv = dev->dev_private; |
376 | struct drm_i915_private *dev_priv = dev->dev_private; |
377 | uint32_t ch_ctl = output_reg + 0x10; |
377 | uint32_t ch_ctl = output_reg + 0x10; |
378 | uint32_t ch_data = ch_ctl + 4; |
378 | uint32_t ch_data = ch_ctl + 4; |
379 | int i, ret, recv_bytes; |
379 | int i, ret, recv_bytes; |
380 | uint32_t status; |
380 | uint32_t status; |
381 | uint32_t aux_clock_divider; |
381 | uint32_t aux_clock_divider; |
382 | int try, precharge; |
382 | int try, precharge; |
383 | bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); |
383 | bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); |
384 | 384 | ||
385 | /* dp aux is extremely sensitive to irq latency, hence request the |
385 | /* dp aux is extremely sensitive to irq latency, hence request the |
386 | * lowest possible wakeup latency and so prevent the cpu from going into |
386 | * lowest possible wakeup latency and so prevent the cpu from going into |
387 | * deep sleep states. |
387 | * deep sleep states. |
388 | */ |
388 | */ |
389 | // pm_qos_update_request(&dev_priv->pm_qos, 0); |
389 | // pm_qos_update_request(&dev_priv->pm_qos, 0); |
390 | 390 | ||
391 | if (IS_HASWELL(dev)) { |
391 | if (IS_HASWELL(dev)) { |
392 | switch (intel_dig_port->port) { |
392 | switch (intel_dig_port->port) { |
393 | case PORT_A: |
393 | case PORT_A: |
394 | ch_ctl = DPA_AUX_CH_CTL; |
394 | ch_ctl = DPA_AUX_CH_CTL; |
395 | ch_data = DPA_AUX_CH_DATA1; |
395 | ch_data = DPA_AUX_CH_DATA1; |
396 | break; |
396 | break; |
397 | case PORT_B: |
397 | case PORT_B: |
398 | ch_ctl = PCH_DPB_AUX_CH_CTL; |
398 | ch_ctl = PCH_DPB_AUX_CH_CTL; |
399 | ch_data = PCH_DPB_AUX_CH_DATA1; |
399 | ch_data = PCH_DPB_AUX_CH_DATA1; |
400 | break; |
400 | break; |
401 | case PORT_C: |
401 | case PORT_C: |
402 | ch_ctl = PCH_DPC_AUX_CH_CTL; |
402 | ch_ctl = PCH_DPC_AUX_CH_CTL; |
403 | ch_data = PCH_DPC_AUX_CH_DATA1; |
403 | ch_data = PCH_DPC_AUX_CH_DATA1; |
404 | break; |
404 | break; |
405 | case PORT_D: |
405 | case PORT_D: |
406 | ch_ctl = PCH_DPD_AUX_CH_CTL; |
406 | ch_ctl = PCH_DPD_AUX_CH_CTL; |
407 | ch_data = PCH_DPD_AUX_CH_DATA1; |
407 | ch_data = PCH_DPD_AUX_CH_DATA1; |
408 | break; |
408 | break; |
409 | default: |
409 | default: |
410 | BUG(); |
410 | BUG(); |
411 | } |
411 | } |
412 | } |
412 | } |
413 | 413 | ||
414 | intel_dp_check_edp(intel_dp); |
414 | intel_dp_check_edp(intel_dp); |
415 | /* The clock divider is based off the hrawclk, |
415 | /* The clock divider is based off the hrawclk, |
416 | * and would like to run at 2MHz. So, take the |
416 | * and would like to run at 2MHz. So, take the |
417 | * hrawclk value and divide by 2 and use that |
417 | * hrawclk value and divide by 2 and use that |
418 | * |
418 | * |
419 | * Note that PCH attached eDP panels should use a 125MHz input |
419 | * Note that PCH attached eDP panels should use a 125MHz input |
420 | * clock divider. |
420 | * clock divider. |
421 | */ |
421 | */ |
422 | if (is_cpu_edp(intel_dp)) { |
422 | if (is_cpu_edp(intel_dp)) { |
423 | if (HAS_DDI(dev)) |
423 | if (HAS_DDI(dev)) |
424 | aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; |
424 | aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; |
425 | else if (IS_VALLEYVIEW(dev)) |
425 | else if (IS_VALLEYVIEW(dev)) |
426 | aux_clock_divider = 100; |
426 | aux_clock_divider = 100; |
427 | else if (IS_GEN6(dev) || IS_GEN7(dev)) |
427 | else if (IS_GEN6(dev) || IS_GEN7(dev)) |
428 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ |
428 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ |
429 | else |
429 | else |
430 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
430 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
431 | } else if (HAS_PCH_SPLIT(dev)) |
431 | } else if (HAS_PCH_SPLIT(dev)) |
432 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
432 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
433 | else |
433 | else |
434 | aux_clock_divider = intel_hrawclk(dev) / 2; |
434 | aux_clock_divider = intel_hrawclk(dev) / 2; |
435 | 435 | ||
436 | if (IS_GEN6(dev)) |
436 | if (IS_GEN6(dev)) |
437 | precharge = 3; |
437 | precharge = 3; |
438 | else |
438 | else |
439 | precharge = 5; |
439 | precharge = 5; |
440 | 440 | ||
441 | /* Try to wait for any previous AUX channel activity */ |
441 | /* Try to wait for any previous AUX channel activity */ |
442 | for (try = 0; try < 3; try++) { |
442 | for (try = 0; try < 3; try++) { |
443 | status = I915_READ_NOTRACE(ch_ctl); |
443 | status = I915_READ_NOTRACE(ch_ctl); |
444 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
444 | if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
445 | break; |
445 | break; |
446 | msleep(1); |
446 | msleep(1); |
447 | } |
447 | } |
448 | 448 | ||
449 | if (try == 3) { |
449 | if (try == 3) { |
450 | WARN(1, "dp_aux_ch not started status 0x%08x\n", |
450 | WARN(1, "dp_aux_ch not started status 0x%08x\n", |
451 | I915_READ(ch_ctl)); |
451 | I915_READ(ch_ctl)); |
452 | ret = -EBUSY; |
452 | ret = -EBUSY; |
453 | goto out; |
453 | goto out; |
454 | } |
454 | } |
455 | 455 | ||
456 | /* Must try at least 3 times according to DP spec */ |
456 | /* Must try at least 3 times according to DP spec */ |
457 | for (try = 0; try < 5; try++) { |
457 | for (try = 0; try < 5; try++) { |
458 | /* Load the send data into the aux channel data registers */ |
458 | /* Load the send data into the aux channel data registers */ |
459 | for (i = 0; i < send_bytes; i += 4) |
459 | for (i = 0; i < send_bytes; i += 4) |
460 | I915_WRITE(ch_data + i, |
460 | I915_WRITE(ch_data + i, |
461 | pack_aux(send + i, send_bytes - i)); |
461 | pack_aux(send + i, send_bytes - i)); |
462 | 462 | ||
463 | /* Send the command and wait for it to complete */ |
463 | /* Send the command and wait for it to complete */ |
464 | I915_WRITE(ch_ctl, |
464 | I915_WRITE(ch_ctl, |
465 | DP_AUX_CH_CTL_SEND_BUSY | |
465 | DP_AUX_CH_CTL_SEND_BUSY | |
466 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | |
466 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | |
467 | DP_AUX_CH_CTL_TIME_OUT_400us | |
467 | DP_AUX_CH_CTL_TIME_OUT_400us | |
468 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
468 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
469 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
469 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
470 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | |
470 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | |
471 | DP_AUX_CH_CTL_DONE | |
471 | DP_AUX_CH_CTL_DONE | |
472 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
472 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
473 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
473 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
474 | 474 | ||
475 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); |
475 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); |
476 | 476 | ||
477 | /* Clear done status and any errors */ |
477 | /* Clear done status and any errors */ |
478 | I915_WRITE(ch_ctl, |
478 | I915_WRITE(ch_ctl, |
479 | status | |
479 | status | |
480 | DP_AUX_CH_CTL_DONE | |
480 | DP_AUX_CH_CTL_DONE | |
481 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
481 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
482 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
482 | DP_AUX_CH_CTL_RECEIVE_ERROR); |
483 | 483 | ||
484 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | |
484 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | |
485 | DP_AUX_CH_CTL_RECEIVE_ERROR)) |
485 | DP_AUX_CH_CTL_RECEIVE_ERROR)) |
486 | continue; |
486 | continue; |
487 | if (status & DP_AUX_CH_CTL_DONE) |
487 | if (status & DP_AUX_CH_CTL_DONE) |
488 | break; |
488 | break; |
489 | } |
489 | } |
490 | 490 | ||
491 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { |
491 | if ((status & DP_AUX_CH_CTL_DONE) == 0) { |
492 | DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); |
492 | DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); |
493 | ret = -EBUSY; |
493 | ret = -EBUSY; |
494 | goto out; |
494 | goto out; |
495 | } |
495 | } |
496 | 496 | ||
497 | /* Check for timeout or receive error. |
497 | /* Check for timeout or receive error. |
498 | * Timeouts occur when the sink is not connected |
498 | * Timeouts occur when the sink is not connected |
499 | */ |
499 | */ |
500 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { |
500 | if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { |
501 | DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); |
501 | DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); |
502 | ret = -EIO; |
502 | ret = -EIO; |
503 | goto out; |
503 | goto out; |
504 | } |
504 | } |
505 | 505 | ||
506 | /* Timeouts occur when the device isn't connected, so they're |
506 | /* Timeouts occur when the device isn't connected, so they're |
507 | * "normal" -- don't fill the kernel log with these */ |
507 | * "normal" -- don't fill the kernel log with these */ |
508 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { |
508 | if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { |
509 | DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); |
509 | DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); |
510 | ret = -ETIMEDOUT; |
510 | ret = -ETIMEDOUT; |
511 | goto out; |
511 | goto out; |
512 | } |
512 | } |
513 | 513 | ||
514 | /* Unload any bytes sent back from the other side */ |
514 | /* Unload any bytes sent back from the other side */ |
515 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
515 | recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> |
516 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
516 | DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); |
517 | if (recv_bytes > recv_size) |
517 | if (recv_bytes > recv_size) |
518 | recv_bytes = recv_size; |
518 | recv_bytes = recv_size; |
519 | 519 | ||
520 | for (i = 0; i < recv_bytes; i += 4) |
520 | for (i = 0; i < recv_bytes; i += 4) |
521 | unpack_aux(I915_READ(ch_data + i), |
521 | unpack_aux(I915_READ(ch_data + i), |
522 | recv + i, recv_bytes - i); |
522 | recv + i, recv_bytes - i); |
523 | 523 | ||
524 | ret = recv_bytes; |
524 | ret = recv_bytes; |
525 | out: |
525 | out: |
526 | // pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
526 | // pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
527 | 527 | ||
528 | return ret; |
528 | return ret; |
529 | } |
529 | } |
530 | 530 | ||
531 | /* Write data to the aux channel in native mode */ |
531 | /* Write data to the aux channel in native mode */ |
532 | static int |
532 | static int |
533 | intel_dp_aux_native_write(struct intel_dp *intel_dp, |
533 | intel_dp_aux_native_write(struct intel_dp *intel_dp, |
534 | uint16_t address, uint8_t *send, int send_bytes) |
534 | uint16_t address, uint8_t *send, int send_bytes) |
535 | { |
535 | { |
536 | int ret; |
536 | int ret; |
537 | uint8_t msg[20]; |
537 | uint8_t msg[20]; |
538 | int msg_bytes; |
538 | int msg_bytes; |
539 | uint8_t ack; |
539 | uint8_t ack; |
540 | 540 | ||
541 | intel_dp_check_edp(intel_dp); |
541 | intel_dp_check_edp(intel_dp); |
542 | if (send_bytes > 16) |
542 | if (send_bytes > 16) |
543 | return -1; |
543 | return -1; |
544 | msg[0] = AUX_NATIVE_WRITE << 4; |
544 | msg[0] = AUX_NATIVE_WRITE << 4; |
545 | msg[1] = address >> 8; |
545 | msg[1] = address >> 8; |
546 | msg[2] = address & 0xff; |
546 | msg[2] = address & 0xff; |
547 | msg[3] = send_bytes - 1; |
547 | msg[3] = send_bytes - 1; |
548 | memcpy(&msg[4], send, send_bytes); |
548 | memcpy(&msg[4], send, send_bytes); |
549 | msg_bytes = send_bytes + 4; |
549 | msg_bytes = send_bytes + 4; |
550 | for (;;) { |
550 | for (;;) { |
551 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
551 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); |
552 | if (ret < 0) |
552 | if (ret < 0) |
553 | return ret; |
553 | return ret; |
554 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
554 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
555 | break; |
555 | break; |
556 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
556 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
557 | udelay(100); |
557 | udelay(100); |
558 | else |
558 | else |
559 | return -EIO; |
559 | return -EIO; |
560 | } |
560 | } |
561 | return send_bytes; |
561 | return send_bytes; |
562 | } |
562 | } |
563 | 563 | ||
564 | /* Write a single byte to the aux channel in native mode */ |
564 | /* Write a single byte to the aux channel in native mode */ |
565 | static int |
565 | static int |
566 | intel_dp_aux_native_write_1(struct intel_dp *intel_dp, |
566 | intel_dp_aux_native_write_1(struct intel_dp *intel_dp, |
567 | uint16_t address, uint8_t byte) |
567 | uint16_t address, uint8_t byte) |
568 | { |
568 | { |
569 | return intel_dp_aux_native_write(intel_dp, address, &byte, 1); |
569 | return intel_dp_aux_native_write(intel_dp, address, &byte, 1); |
570 | } |
570 | } |
571 | 571 | ||
572 | /* read bytes from a native aux channel */ |
572 | /* read bytes from a native aux channel */ |
573 | static int |
573 | static int |
574 | intel_dp_aux_native_read(struct intel_dp *intel_dp, |
574 | intel_dp_aux_native_read(struct intel_dp *intel_dp, |
575 | uint16_t address, uint8_t *recv, int recv_bytes) |
575 | uint16_t address, uint8_t *recv, int recv_bytes) |
576 | { |
576 | { |
577 | uint8_t msg[4]; |
577 | uint8_t msg[4]; |
578 | int msg_bytes; |
578 | int msg_bytes; |
579 | uint8_t reply[20]; |
579 | uint8_t reply[20]; |
580 | int reply_bytes; |
580 | int reply_bytes; |
581 | uint8_t ack; |
581 | uint8_t ack; |
582 | int ret; |
582 | int ret; |
583 | 583 | ||
584 | intel_dp_check_edp(intel_dp); |
584 | intel_dp_check_edp(intel_dp); |
585 | msg[0] = AUX_NATIVE_READ << 4; |
585 | msg[0] = AUX_NATIVE_READ << 4; |
586 | msg[1] = address >> 8; |
586 | msg[1] = address >> 8; |
587 | msg[2] = address & 0xff; |
587 | msg[2] = address & 0xff; |
588 | msg[3] = recv_bytes - 1; |
588 | msg[3] = recv_bytes - 1; |
589 | 589 | ||
590 | msg_bytes = 4; |
590 | msg_bytes = 4; |
591 | reply_bytes = recv_bytes + 1; |
591 | reply_bytes = recv_bytes + 1; |
592 | 592 | ||
593 | for (;;) { |
593 | for (;;) { |
594 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, |
594 | ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, |
595 | reply, reply_bytes); |
595 | reply, reply_bytes); |
596 | if (ret == 0) |
596 | if (ret == 0) |
597 | return -EPROTO; |
597 | return -EPROTO; |
598 | if (ret < 0) |
598 | if (ret < 0) |
599 | return ret; |
599 | return ret; |
600 | ack = reply[0]; |
600 | ack = reply[0]; |
601 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { |
601 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { |
602 | memcpy(recv, reply + 1, ret - 1); |
602 | memcpy(recv, reply + 1, ret - 1); |
603 | return ret - 1; |
603 | return ret - 1; |
604 | } |
604 | } |
605 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
605 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
606 | udelay(100); |
606 | udelay(100); |
607 | else |
607 | else |
608 | return -EIO; |
608 | return -EIO; |
609 | } |
609 | } |
610 | } |
610 | } |
611 | 611 | ||
612 | static int |
612 | static int |
613 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
613 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
614 | uint8_t write_byte, uint8_t *read_byte) |
614 | uint8_t write_byte, uint8_t *read_byte) |
615 | { |
615 | { |
616 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
616 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
617 | struct intel_dp *intel_dp = container_of(adapter, |
617 | struct intel_dp *intel_dp = container_of(adapter, |
618 | struct intel_dp, |
618 | struct intel_dp, |
619 | adapter); |
619 | adapter); |
620 | uint16_t address = algo_data->address; |
620 | uint16_t address = algo_data->address; |
621 | uint8_t msg[5]; |
621 | uint8_t msg[5]; |
622 | uint8_t reply[2]; |
622 | uint8_t reply[2]; |
623 | unsigned retry; |
623 | unsigned retry; |
624 | int msg_bytes; |
624 | int msg_bytes; |
625 | int reply_bytes; |
625 | int reply_bytes; |
626 | int ret; |
626 | int ret; |
627 | 627 | ||
628 | intel_dp_check_edp(intel_dp); |
628 | intel_dp_check_edp(intel_dp); |
629 | /* Set up the command byte */ |
629 | /* Set up the command byte */ |
630 | if (mode & MODE_I2C_READ) |
630 | if (mode & MODE_I2C_READ) |
631 | msg[0] = AUX_I2C_READ << 4; |
631 | msg[0] = AUX_I2C_READ << 4; |
632 | else |
632 | else |
633 | msg[0] = AUX_I2C_WRITE << 4; |
633 | msg[0] = AUX_I2C_WRITE << 4; |
634 | 634 | ||
635 | if (!(mode & MODE_I2C_STOP)) |
635 | if (!(mode & MODE_I2C_STOP)) |
636 | msg[0] |= AUX_I2C_MOT << 4; |
636 | msg[0] |= AUX_I2C_MOT << 4; |
637 | 637 | ||
638 | msg[1] = address >> 8; |
638 | msg[1] = address >> 8; |
639 | msg[2] = address; |
639 | msg[2] = address; |
640 | 640 | ||
641 | switch (mode) { |
641 | switch (mode) { |
642 | case MODE_I2C_WRITE: |
642 | case MODE_I2C_WRITE: |
643 | msg[3] = 0; |
643 | msg[3] = 0; |
644 | msg[4] = write_byte; |
644 | msg[4] = write_byte; |
645 | msg_bytes = 5; |
645 | msg_bytes = 5; |
646 | reply_bytes = 1; |
646 | reply_bytes = 1; |
647 | break; |
647 | break; |
648 | case MODE_I2C_READ: |
648 | case MODE_I2C_READ: |
649 | msg[3] = 0; |
649 | msg[3] = 0; |
650 | msg_bytes = 4; |
650 | msg_bytes = 4; |
651 | reply_bytes = 2; |
651 | reply_bytes = 2; |
652 | break; |
652 | break; |
653 | default: |
653 | default: |
654 | msg_bytes = 3; |
654 | msg_bytes = 3; |
655 | reply_bytes = 1; |
655 | reply_bytes = 1; |
656 | break; |
656 | break; |
657 | } |
657 | } |
658 | 658 | ||
659 | for (retry = 0; retry < 5; retry++) { |
659 | for (retry = 0; retry < 5; retry++) { |
660 | ret = intel_dp_aux_ch(intel_dp, |
660 | ret = intel_dp_aux_ch(intel_dp, |
661 | msg, msg_bytes, |
661 | msg, msg_bytes, |
662 | reply, reply_bytes); |
662 | reply, reply_bytes); |
663 | if (ret < 0) { |
663 | if (ret < 0) { |
664 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
664 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
665 | return ret; |
665 | return ret; |
666 | } |
666 | } |
667 | 667 | ||
668 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { |
668 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { |
669 | case AUX_NATIVE_REPLY_ACK: |
669 | case AUX_NATIVE_REPLY_ACK: |
670 | /* I2C-over-AUX Reply field is only valid |
670 | /* I2C-over-AUX Reply field is only valid |
671 | * when paired with AUX ACK. |
671 | * when paired with AUX ACK. |
672 | */ |
672 | */ |
673 | break; |
673 | break; |
674 | case AUX_NATIVE_REPLY_NACK: |
674 | case AUX_NATIVE_REPLY_NACK: |
675 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
675 | DRM_DEBUG_KMS("aux_ch native nack\n"); |
676 | return -EREMOTEIO; |
676 | return -EREMOTEIO; |
677 | case AUX_NATIVE_REPLY_DEFER: |
677 | case AUX_NATIVE_REPLY_DEFER: |
678 | udelay(100); |
678 | udelay(100); |
679 | continue; |
679 | continue; |
680 | default: |
680 | default: |
681 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", |
681 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", |
682 | reply[0]); |
682 | reply[0]); |
683 | return -EREMOTEIO; |
683 | return -EREMOTEIO; |
684 | } |
684 | } |
685 | 685 | ||
686 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
686 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
687 | case AUX_I2C_REPLY_ACK: |
687 | case AUX_I2C_REPLY_ACK: |
688 | if (mode == MODE_I2C_READ) { |
688 | if (mode == MODE_I2C_READ) { |
689 | *read_byte = reply[1]; |
689 | *read_byte = reply[1]; |
690 | } |
690 | } |
691 | return reply_bytes - 1; |
691 | return reply_bytes - 1; |
692 | case AUX_I2C_REPLY_NACK: |
692 | case AUX_I2C_REPLY_NACK: |
693 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
693 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
694 | return -EREMOTEIO; |
694 | return -EREMOTEIO; |
695 | case AUX_I2C_REPLY_DEFER: |
695 | case AUX_I2C_REPLY_DEFER: |
696 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
696 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
697 | udelay(100); |
697 | udelay(100); |
698 | break; |
698 | break; |
699 | default: |
699 | default: |
700 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); |
700 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); |
701 | return -EREMOTEIO; |
701 | return -EREMOTEIO; |
702 | } |
702 | } |
703 | } |
703 | } |
704 | 704 | ||
705 | DRM_ERROR("too many retries, giving up\n"); |
705 | DRM_ERROR("too many retries, giving up\n"); |
706 | return -EREMOTEIO; |
706 | return -EREMOTEIO; |
707 | } |
707 | } |
708 | 708 | ||
709 | static int |
709 | static int |
710 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
710 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
711 | struct intel_connector *intel_connector, const char *name) |
711 | struct intel_connector *intel_connector, const char *name) |
712 | { |
712 | { |
713 | int ret; |
713 | int ret; |
714 | 714 | ||
715 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
715 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
716 | intel_dp->algo.running = false; |
716 | intel_dp->algo.running = false; |
717 | intel_dp->algo.address = 0; |
717 | intel_dp->algo.address = 0; |
718 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; |
718 | intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; |
719 | 719 | ||
720 | memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); |
720 | memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); |
721 | intel_dp->adapter.owner = THIS_MODULE; |
721 | intel_dp->adapter.owner = THIS_MODULE; |
722 | intel_dp->adapter.class = I2C_CLASS_DDC; |
722 | intel_dp->adapter.class = I2C_CLASS_DDC; |
723 | strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); |
723 | strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); |
724 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; |
724 | intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; |
725 | intel_dp->adapter.algo_data = &intel_dp->algo; |
725 | intel_dp->adapter.algo_data = &intel_dp->algo; |
726 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; |
726 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; |
727 | 727 | ||
728 | ironlake_edp_panel_vdd_on(intel_dp); |
728 | ironlake_edp_panel_vdd_on(intel_dp); |
729 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); |
729 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); |
730 | ironlake_edp_panel_vdd_off(intel_dp, false); |
730 | ironlake_edp_panel_vdd_off(intel_dp, false); |
731 | return ret; |
731 | return ret; |
732 | } |
732 | } |
733 | 733 | ||
734 | bool |
734 | bool |
735 | intel_dp_mode_fixup(struct drm_encoder *encoder, |
735 | intel_dp_mode_fixup(struct drm_encoder *encoder, |
736 | const struct drm_display_mode *mode, |
736 | const struct drm_display_mode *mode, |
737 | struct drm_display_mode *adjusted_mode) |
737 | struct drm_display_mode *adjusted_mode) |
738 | { |
738 | { |
739 | struct drm_device *dev = encoder->dev; |
739 | struct drm_device *dev = encoder->dev; |
740 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
740 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
741 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
741 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
742 | int lane_count, clock; |
742 | int lane_count, clock; |
743 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
743 | int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); |
744 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
744 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
745 | int bpp, mode_rate; |
745 | int bpp, mode_rate; |
746 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
746 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
747 | 747 | ||
748 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
748 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
749 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
749 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
750 | adjusted_mode); |
750 | adjusted_mode); |
751 | intel_pch_panel_fitting(dev, |
751 | intel_pch_panel_fitting(dev, |
752 | intel_connector->panel.fitting_mode, |
752 | intel_connector->panel.fitting_mode, |
753 | mode, adjusted_mode); |
753 | mode, adjusted_mode); |
754 | } |
754 | } |
755 | 755 | ||
756 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
756 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
757 | return false; |
757 | return false; |
758 | 758 | ||
759 | DRM_DEBUG_KMS("DP link computation with max lane count %i " |
759 | DRM_DEBUG_KMS("DP link computation with max lane count %i " |
760 | "max bw %02x pixel clock %iKHz\n", |
760 | "max bw %02x pixel clock %iKHz\n", |
761 | max_lane_count, bws[max_clock], adjusted_mode->clock); |
761 | max_lane_count, bws[max_clock], adjusted_mode->clock); |
762 | 762 | ||
763 | if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) |
763 | if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) |
764 | return false; |
764 | return false; |
765 | 765 | ||
766 | bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; |
766 | bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; |
767 | 767 | ||
768 | if (intel_dp->color_range_auto) { |
768 | if (intel_dp->color_range_auto) { |
769 | /* |
769 | /* |
770 | * See: |
770 | * See: |
771 | * CEA-861-E - 5.1 Default Encoding Parameters |
771 | * CEA-861-E - 5.1 Default Encoding Parameters |
772 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
772 | * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
773 | */ |
773 | */ |
774 | if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) |
774 | if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) |
775 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
775 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
776 | else |
776 | else |
777 | intel_dp->color_range = 0; |
777 | intel_dp->color_range = 0; |
778 | } |
778 | } |
779 | 779 | ||
780 | if (intel_dp->color_range) |
780 | if (intel_dp->color_range) |
781 | adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; |
781 | adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; |
782 | 782 | ||
783 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); |
783 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); |
784 | 784 | ||
785 | for (clock = 0; clock <= max_clock; clock++) { |
785 | for (clock = 0; clock <= max_clock; clock++) { |
786 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
786 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
787 | int link_bw_clock = |
787 | int link_bw_clock = |
788 | drm_dp_bw_code_to_link_rate(bws[clock]); |
788 | drm_dp_bw_code_to_link_rate(bws[clock]); |
789 | int link_avail = intel_dp_max_data_rate(link_bw_clock, |
789 | int link_avail = intel_dp_max_data_rate(link_bw_clock, |
790 | lane_count); |
790 | lane_count); |
791 | 791 | ||
792 | if (mode_rate <= link_avail) { |
792 | if (mode_rate <= link_avail) { |
793 | intel_dp->link_bw = bws[clock]; |
793 | intel_dp->link_bw = bws[clock]; |
794 | intel_dp->lane_count = lane_count; |
794 | intel_dp->lane_count = lane_count; |
795 | adjusted_mode->clock = link_bw_clock; |
795 | adjusted_mode->clock = link_bw_clock; |
796 | DRM_DEBUG_KMS("DP link bw %02x lane " |
796 | DRM_DEBUG_KMS("DP link bw %02x lane " |
797 | "count %d clock %d bpp %d\n", |
797 | "count %d clock %d bpp %d\n", |
798 | intel_dp->link_bw, intel_dp->lane_count, |
798 | intel_dp->link_bw, intel_dp->lane_count, |
799 | adjusted_mode->clock, bpp); |
799 | adjusted_mode->clock, bpp); |
800 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", |
800 | DRM_DEBUG_KMS("DP link bw required %i available %i\n", |
801 | mode_rate, link_avail); |
801 | mode_rate, link_avail); |
802 | return true; |
802 | return true; |
803 | } |
803 | } |
804 | } |
804 | } |
805 | } |
805 | } |
806 | 806 | ||
807 | return false; |
807 | return false; |
808 | } |
808 | } |
809 | 809 | ||
810 | void |
810 | void |
811 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
811 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
812 | struct drm_display_mode *adjusted_mode) |
812 | struct drm_display_mode *adjusted_mode) |
813 | { |
813 | { |
814 | struct drm_device *dev = crtc->dev; |
814 | struct drm_device *dev = crtc->dev; |
815 | struct intel_encoder *intel_encoder; |
815 | struct intel_encoder *intel_encoder; |
816 | struct intel_dp *intel_dp; |
816 | struct intel_dp *intel_dp; |
817 | struct drm_i915_private *dev_priv = dev->dev_private; |
817 | struct drm_i915_private *dev_priv = dev->dev_private; |
818 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
818 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
819 | int lane_count = 4; |
819 | int lane_count = 4; |
820 | struct intel_link_m_n m_n; |
820 | struct intel_link_m_n m_n; |
821 | int pipe = intel_crtc->pipe; |
821 | int pipe = intel_crtc->pipe; |
822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
823 | int target_clock; |
823 | int target_clock; |
824 | 824 | ||
825 | /* |
825 | /* |
826 | * Find the lane count in the intel_encoder private |
826 | * Find the lane count in the intel_encoder private |
827 | */ |
827 | */ |
828 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
828 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
829 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
829 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
830 | 830 | ||
831 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
831 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
832 | intel_encoder->type == INTEL_OUTPUT_EDP) |
832 | intel_encoder->type == INTEL_OUTPUT_EDP) |
833 | { |
833 | { |
834 | lane_count = intel_dp->lane_count; |
834 | lane_count = intel_dp->lane_count; |
835 | break; |
835 | break; |
836 | } |
836 | } |
837 | } |
837 | } |
838 | 838 | ||
839 | target_clock = mode->clock; |
839 | target_clock = mode->clock; |
840 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
840 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
841 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
841 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
842 | target_clock = intel_edp_target_clock(intel_encoder, |
842 | target_clock = intel_edp_target_clock(intel_encoder, |
843 | mode); |
843 | mode); |
844 | break; |
844 | break; |
845 | } |
845 | } |
846 | } |
846 | } |
847 | 847 | ||
848 | /* |
848 | /* |
849 | * Compute the GMCH and Link ratios. The '3' here is |
849 | * Compute the GMCH and Link ratios. The '3' here is |
850 | * the number of bytes_per_pixel post-LUT, which we always |
850 | * the number of bytes_per_pixel post-LUT, which we always |
851 | * set up for 8-bits of R/G/B, or 3 bytes total. |
851 | * set up for 8-bits of R/G/B, or 3 bytes total. |
852 | */ |
852 | */ |
853 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, |
853 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, |
854 | target_clock, adjusted_mode->clock, &m_n); |
854 | target_clock, adjusted_mode->clock, &m_n); |
855 | 855 | ||
856 | if (IS_HASWELL(dev)) { |
856 | if (IS_HASWELL(dev)) { |
857 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
857 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
858 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
858 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
859 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
859 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
860 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
860 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
861 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
861 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
862 | } else if (HAS_PCH_SPLIT(dev)) { |
862 | } else if (HAS_PCH_SPLIT(dev)) { |
863 | I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
863 | I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
864 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
864 | I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); |
865 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
865 | I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); |
866 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
866 | I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); |
867 | } else if (IS_VALLEYVIEW(dev)) { |
867 | } else if (IS_VALLEYVIEW(dev)) { |
868 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
868 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
869 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
869 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
870 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
870 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
871 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
871 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
872 | } else { |
872 | } else { |
873 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
873 | I915_WRITE(PIPE_GMCH_DATA_M(pipe), |
874 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
874 | TU_SIZE(m_n.tu) | m_n.gmch_m); |
875 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
875 | I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); |
876 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
876 | I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); |
877 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
877 | I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); |
878 | } |
878 | } |
879 | } |
879 | } |
880 | 880 | ||
881 | void intel_dp_init_link_config(struct intel_dp *intel_dp) |
881 | void intel_dp_init_link_config(struct intel_dp *intel_dp) |
882 | { |
882 | { |
883 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
883 | memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); |
884 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
884 | intel_dp->link_configuration[0] = intel_dp->link_bw; |
885 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
885 | intel_dp->link_configuration[1] = intel_dp->lane_count; |
886 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; |
886 | intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; |
887 | /* |
887 | /* |
888 | * Check for DPCD version > 1.1 and enhanced framing support |
888 | * Check for DPCD version > 1.1 and enhanced framing support |
889 | */ |
889 | */ |
890 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
890 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
891 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { |
891 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { |
892 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
892 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
893 | } |
893 | } |
894 | } |
894 | } |
895 | 895 | ||
896 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
896 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
897 | { |
897 | { |
898 | struct drm_device *dev = crtc->dev; |
898 | struct drm_device *dev = crtc->dev; |
899 | struct drm_i915_private *dev_priv = dev->dev_private; |
899 | struct drm_i915_private *dev_priv = dev->dev_private; |
900 | u32 dpa_ctl; |
900 | u32 dpa_ctl; |
901 | 901 | ||
902 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
902 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
903 | dpa_ctl = I915_READ(DP_A); |
903 | dpa_ctl = I915_READ(DP_A); |
904 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
904 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
905 | 905 | ||
906 | if (clock < 200000) { |
906 | if (clock < 200000) { |
907 | /* For a long time we've carried around a ILK-DevA w/a for the |
907 | /* For a long time we've carried around a ILK-DevA w/a for the |
908 | * 160MHz clock. If we're really unlucky, it's still required. |
908 | * 160MHz clock. If we're really unlucky, it's still required. |
909 | */ |
909 | */ |
910 | DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); |
910 | DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); |
911 | dpa_ctl |= DP_PLL_FREQ_160MHZ; |
911 | dpa_ctl |= DP_PLL_FREQ_160MHZ; |
912 | } else { |
912 | } else { |
913 | dpa_ctl |= DP_PLL_FREQ_270MHZ; |
913 | dpa_ctl |= DP_PLL_FREQ_270MHZ; |
914 | } |
914 | } |
915 | 915 | ||
916 | I915_WRITE(DP_A, dpa_ctl); |
916 | I915_WRITE(DP_A, dpa_ctl); |
917 | 917 | ||
918 | POSTING_READ(DP_A); |
918 | POSTING_READ(DP_A); |
919 | udelay(500); |
919 | udelay(500); |
920 | } |
920 | } |
921 | 921 | ||
922 | static void |
922 | static void |
923 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
923 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
924 | struct drm_display_mode *adjusted_mode) |
924 | struct drm_display_mode *adjusted_mode) |
925 | { |
925 | { |
926 | struct drm_device *dev = encoder->dev; |
926 | struct drm_device *dev = encoder->dev; |
927 | struct drm_i915_private *dev_priv = dev->dev_private; |
927 | struct drm_i915_private *dev_priv = dev->dev_private; |
928 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
928 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
929 | struct drm_crtc *crtc = encoder->crtc; |
929 | struct drm_crtc *crtc = encoder->crtc; |
930 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
930 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
931 | 931 | ||
932 | /* |
932 | /* |
933 | * There are four kinds of DP registers: |
933 | * There are four kinds of DP registers: |
934 | * |
934 | * |
935 | * IBX PCH |
935 | * IBX PCH |
936 | * SNB CPU |
936 | * SNB CPU |
937 | * IVB CPU |
937 | * IVB CPU |
938 | * CPT PCH |
938 | * CPT PCH |
939 | * |
939 | * |
940 | * IBX PCH and CPU are the same for almost everything, |
940 | * IBX PCH and CPU are the same for almost everything, |
941 | * except that the CPU DP PLL is configured in this |
941 | * except that the CPU DP PLL is configured in this |
942 | * register |
942 | * register |
943 | * |
943 | * |
944 | * CPT PCH is quite different, having many bits moved |
944 | * CPT PCH is quite different, having many bits moved |
945 | * to the TRANS_DP_CTL register instead. That |
945 | * to the TRANS_DP_CTL register instead. That |
946 | * configuration happens (oddly) in ironlake_pch_enable |
946 | * configuration happens (oddly) in ironlake_pch_enable |
947 | */ |
947 | */ |
948 | 948 | ||
949 | /* Preserve the BIOS-computed detected bit. This is |
949 | /* Preserve the BIOS-computed detected bit. This is |
950 | * supposed to be read-only. |
950 | * supposed to be read-only. |
951 | */ |
951 | */ |
952 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; |
952 | intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; |
953 | 953 | ||
954 | /* Handle DP bits in common between all three register formats */ |
954 | /* Handle DP bits in common between all three register formats */ |
955 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
955 | intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
956 | 956 | ||
957 | switch (intel_dp->lane_count) { |
957 | switch (intel_dp->lane_count) { |
958 | case 1: |
958 | case 1: |
959 | intel_dp->DP |= DP_PORT_WIDTH_1; |
959 | intel_dp->DP |= DP_PORT_WIDTH_1; |
960 | break; |
960 | break; |
961 | case 2: |
961 | case 2: |
962 | intel_dp->DP |= DP_PORT_WIDTH_2; |
962 | intel_dp->DP |= DP_PORT_WIDTH_2; |
963 | break; |
963 | break; |
964 | case 4: |
964 | case 4: |
965 | intel_dp->DP |= DP_PORT_WIDTH_4; |
965 | intel_dp->DP |= DP_PORT_WIDTH_4; |
966 | break; |
966 | break; |
967 | } |
967 | } |
968 | if (intel_dp->has_audio) { |
968 | if (intel_dp->has_audio) { |
969 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", |
969 | DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", |
970 | pipe_name(intel_crtc->pipe)); |
970 | pipe_name(intel_crtc->pipe)); |
971 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
971 | intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; |
972 | intel_write_eld(encoder, adjusted_mode); |
972 | intel_write_eld(encoder, adjusted_mode); |
973 | } |
973 | } |
974 | 974 | ||
975 | intel_dp_init_link_config(intel_dp); |
975 | intel_dp_init_link_config(intel_dp); |
976 | 976 | ||
977 | /* Split out the IBX/CPU vs CPT settings */ |
977 | /* Split out the IBX/CPU vs CPT settings */ |
978 | 978 | ||
979 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
979 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { |
980 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
980 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
981 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
981 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
982 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
982 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
983 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
983 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
984 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
984 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
985 | 985 | ||
986 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) |
986 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) |
987 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
987 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
988 | 988 | ||
989 | intel_dp->DP |= intel_crtc->pipe << 29; |
989 | intel_dp->DP |= intel_crtc->pipe << 29; |
990 | 990 | ||
991 | /* don't miss out required setting for eDP */ |
991 | /* don't miss out required setting for eDP */ |
992 | if (adjusted_mode->clock < 200000) |
992 | if (adjusted_mode->clock < 200000) |
993 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
993 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
994 | else |
994 | else |
995 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
995 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
996 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
996 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
997 | if (!HAS_PCH_SPLIT(dev)) |
997 | if (!HAS_PCH_SPLIT(dev)) |
998 | intel_dp->DP |= intel_dp->color_range; |
998 | intel_dp->DP |= intel_dp->color_range; |
999 | 999 | ||
1000 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1000 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1001 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
1001 | intel_dp->DP |= DP_SYNC_HS_HIGH; |
1002 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1002 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
1003 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
1003 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
1004 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
1004 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
1005 | 1005 | ||
1006 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) |
1006 | if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) |
1007 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
1007 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
1008 | 1008 | ||
1009 | if (intel_crtc->pipe == 1) |
1009 | if (intel_crtc->pipe == 1) |
1010 | intel_dp->DP |= DP_PIPEB_SELECT; |
1010 | intel_dp->DP |= DP_PIPEB_SELECT; |
1011 | 1011 | ||
1012 | if (is_cpu_edp(intel_dp)) { |
1012 | if (is_cpu_edp(intel_dp)) { |
1013 | /* don't miss out required setting for eDP */ |
1013 | /* don't miss out required setting for eDP */ |
1014 | if (adjusted_mode->clock < 200000) |
1014 | if (adjusted_mode->clock < 200000) |
1015 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
1015 | intel_dp->DP |= DP_PLL_FREQ_160MHZ; |
1016 | else |
1016 | else |
1017 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
1017 | intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
1018 | } |
1018 | } |
1019 | } else { |
1019 | } else { |
1020 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
1020 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
1021 | } |
1021 | } |
1022 | 1022 | ||
1023 | if (is_cpu_edp(intel_dp)) |
1023 | if (is_cpu_edp(intel_dp)) |
1024 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
1024 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
1025 | } |
1025 | } |
1026 | 1026 | ||
1027 | #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
1027 | #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
1028 | #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) |
1028 | #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) |
1029 | 1029 | ||
1030 | #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
1030 | #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
1031 | #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) |
1031 | #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) |
1032 | 1032 | ||
1033 | #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) |
1033 | #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) |
1034 | #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) |
1034 | #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) |
1035 | 1035 | ||
1036 | static void ironlake_wait_panel_status(struct intel_dp *intel_dp, |
1036 | static void ironlake_wait_panel_status(struct intel_dp *intel_dp, |
1037 | u32 mask, |
1037 | u32 mask, |
1038 | u32 value) |
1038 | u32 value) |
1039 | { |
1039 | { |
1040 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1040 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1041 | struct drm_i915_private *dev_priv = dev->dev_private; |
1041 | struct drm_i915_private *dev_priv = dev->dev_private; |
1042 | 1042 | ||
1043 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
1043 | DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", |
1044 | mask, value, |
1044 | mask, value, |
1045 | I915_READ(PCH_PP_STATUS), |
1045 | I915_READ(PCH_PP_STATUS), |
1046 | I915_READ(PCH_PP_CONTROL)); |
1046 | I915_READ(PCH_PP_CONTROL)); |
1047 | 1047 | ||
1048 | if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { |
1048 | if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { |
1049 | DRM_ERROR("Panel status timeout: status %08x control %08x\n", |
1049 | DRM_ERROR("Panel status timeout: status %08x control %08x\n", |
1050 | I915_READ(PCH_PP_STATUS), |
1050 | I915_READ(PCH_PP_STATUS), |
1051 | I915_READ(PCH_PP_CONTROL)); |
1051 | I915_READ(PCH_PP_CONTROL)); |
1052 | } |
1052 | } |
1053 | } |
1053 | } |
1054 | 1054 | ||
1055 | static void ironlake_wait_panel_on(struct intel_dp *intel_dp) |
1055 | static void ironlake_wait_panel_on(struct intel_dp *intel_dp) |
1056 | { |
1056 | { |
1057 | DRM_DEBUG_KMS("Wait for panel power on\n"); |
1057 | DRM_DEBUG_KMS("Wait for panel power on\n"); |
1058 | ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); |
1058 | ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); |
1059 | } |
1059 | } |
1060 | 1060 | ||
1061 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) |
1061 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) |
1062 | { |
1062 | { |
1063 | DRM_DEBUG_KMS("Wait for panel power off time\n"); |
1063 | DRM_DEBUG_KMS("Wait for panel power off time\n"); |
1064 | ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); |
1064 | ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); |
1065 | } |
1065 | } |
1066 | 1066 | ||
1067 | static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) |
1067 | static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) |
1068 | { |
1068 | { |
1069 | DRM_DEBUG_KMS("Wait for panel power cycle\n"); |
1069 | DRM_DEBUG_KMS("Wait for panel power cycle\n"); |
1070 | ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); |
1070 | ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); |
1071 | } |
1071 | } |
1072 | 1072 | ||
1073 | 1073 | ||
1074 | /* Read the current pp_control value, unlocking the register if it |
1074 | /* Read the current pp_control value, unlocking the register if it |
1075 | * is locked |
1075 | * is locked |
1076 | */ |
1076 | */ |
1077 | 1077 | ||
1078 | static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) |
1078 | static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) |
1079 | { |
1079 | { |
1080 | u32 control = I915_READ(PCH_PP_CONTROL); |
1080 | u32 control = I915_READ(PCH_PP_CONTROL); |
1081 | 1081 | ||
1082 | control &= ~PANEL_UNLOCK_MASK; |
1082 | control &= ~PANEL_UNLOCK_MASK; |
1083 | control |= PANEL_UNLOCK_REGS; |
1083 | control |= PANEL_UNLOCK_REGS; |
1084 | return control; |
1084 | return control; |
1085 | } |
1085 | } |
1086 | 1086 | ||
1087 | void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
1087 | void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
1088 | { |
1088 | { |
1089 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1089 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1090 | struct drm_i915_private *dev_priv = dev->dev_private; |
1090 | struct drm_i915_private *dev_priv = dev->dev_private; |
1091 | u32 pp; |
1091 | u32 pp; |
1092 | 1092 | ||
1093 | if (!is_edp(intel_dp)) |
1093 | if (!is_edp(intel_dp)) |
1094 | return; |
1094 | return; |
1095 | DRM_DEBUG_KMS("Turn eDP VDD on\n"); |
1095 | DRM_DEBUG_KMS("Turn eDP VDD on\n"); |
1096 | 1096 | ||
1097 | WARN(intel_dp->want_panel_vdd, |
1097 | WARN(intel_dp->want_panel_vdd, |
1098 | "eDP VDD already requested on\n"); |
1098 | "eDP VDD already requested on\n"); |
1099 | 1099 | ||
1100 | intel_dp->want_panel_vdd = true; |
1100 | intel_dp->want_panel_vdd = true; |
1101 | 1101 | ||
1102 | if (ironlake_edp_have_panel_vdd(intel_dp)) { |
1102 | if (ironlake_edp_have_panel_vdd(intel_dp)) { |
1103 | DRM_DEBUG_KMS("eDP VDD already on\n"); |
1103 | DRM_DEBUG_KMS("eDP VDD already on\n"); |
1104 | return; |
1104 | return; |
1105 | } |
1105 | } |
1106 | 1106 | ||
1107 | if (!ironlake_edp_have_panel_power(intel_dp)) |
1107 | if (!ironlake_edp_have_panel_power(intel_dp)) |
1108 | ironlake_wait_panel_power_cycle(intel_dp); |
1108 | ironlake_wait_panel_power_cycle(intel_dp); |
1109 | 1109 | ||
1110 | pp = ironlake_get_pp_control(dev_priv); |
1110 | pp = ironlake_get_pp_control(dev_priv); |
1111 | pp |= EDP_FORCE_VDD; |
1111 | pp |= EDP_FORCE_VDD; |
1112 | I915_WRITE(PCH_PP_CONTROL, pp); |
1112 | I915_WRITE(PCH_PP_CONTROL, pp); |
1113 | POSTING_READ(PCH_PP_CONTROL); |
1113 | POSTING_READ(PCH_PP_CONTROL); |
1114 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", |
1114 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", |
1115 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); |
1115 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); |
1116 | 1116 | ||
1117 | /* |
1117 | /* |
1118 | * If the panel wasn't on, delay before accessing aux channel |
1118 | * If the panel wasn't on, delay before accessing aux channel |
1119 | */ |
1119 | */ |
1120 | if (!ironlake_edp_have_panel_power(intel_dp)) { |
1120 | if (!ironlake_edp_have_panel_power(intel_dp)) { |
1121 | DRM_DEBUG_KMS("eDP was not running\n"); |
1121 | DRM_DEBUG_KMS("eDP was not running\n"); |
1122 | msleep(intel_dp->panel_power_up_delay); |
1122 | msleep(intel_dp->panel_power_up_delay); |
1123 | } |
1123 | } |
1124 | } |
1124 | } |
1125 | 1125 | ||
1126 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
1126 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
1127 | { |
1127 | { |
1128 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1128 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1129 | struct drm_i915_private *dev_priv = dev->dev_private; |
1129 | struct drm_i915_private *dev_priv = dev->dev_private; |
1130 | u32 pp; |
1130 | u32 pp; |
1131 | 1131 | ||
1132 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
1132 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
1133 | 1133 | ||
1134 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
1134 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
1135 | pp = ironlake_get_pp_control(dev_priv); |
1135 | pp = ironlake_get_pp_control(dev_priv); |
1136 | pp &= ~EDP_FORCE_VDD; |
1136 | pp &= ~EDP_FORCE_VDD; |
1137 | I915_WRITE(PCH_PP_CONTROL, pp); |
1137 | I915_WRITE(PCH_PP_CONTROL, pp); |
1138 | POSTING_READ(PCH_PP_CONTROL); |
1138 | POSTING_READ(PCH_PP_CONTROL); |
1139 | 1139 | ||
1140 | /* Make sure sequencer is idle before allowing subsequent activity */ |
1140 | /* Make sure sequencer is idle before allowing subsequent activity */ |
1141 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", |
1141 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", |
1142 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); |
1142 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); |
1143 | 1143 | ||
1144 | msleep(intel_dp->panel_power_down_delay); |
1144 | msleep(intel_dp->panel_power_down_delay); |
1145 | } |
1145 | } |
1146 | } |
1146 | } |
1147 | 1147 | ||
1148 | static void ironlake_panel_vdd_work(struct work_struct *__work) |
1148 | static void ironlake_panel_vdd_work(struct work_struct *__work) |
1149 | { |
1149 | { |
1150 | // struct intel_dp *intel_dp = container_of(to_delayed_work(__work), |
1150 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), |
1151 | // struct intel_dp, panel_vdd_work); |
1151 | struct intel_dp, panel_vdd_work); |
1152 | // struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1152 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1153 | // |
1153 | |
1154 | // mutex_lock(&dev->mode_config.mutex); |
1154 | mutex_lock(&dev->mode_config.mutex); |
1155 | // ironlake_panel_vdd_off_sync(intel_dp); |
1155 | ironlake_panel_vdd_off_sync(intel_dp); |
1156 | // mutex_unlock(&dev->mode_config.mutex); |
1156 | mutex_unlock(&dev->mode_config.mutex); |
1157 | } |
1157 | } |
1158 | 1158 | ||
1159 | void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
1159 | void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) |
1160 | { |
1160 | { |
1161 | if (!is_edp(intel_dp)) |
1161 | if (!is_edp(intel_dp)) |
1162 | return; |
1162 | return; |
1163 | 1163 | ||
1164 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); |
1164 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); |
1165 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); |
1165 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); |
1166 | 1166 | ||
1167 | intel_dp->want_panel_vdd = false; |
1167 | intel_dp->want_panel_vdd = false; |
1168 | 1168 | ||
1169 | if (sync) { |
1169 | if (sync) { |
1170 | ironlake_panel_vdd_off_sync(intel_dp); |
1170 | ironlake_panel_vdd_off_sync(intel_dp); |
1171 | } else { |
1171 | } else { |
1172 | /* |
1172 | /* |
1173 | * Queue the timer to fire a long |
1173 | * Queue the timer to fire a long |
1174 | * time from now (relative to the power down delay) |
1174 | * time from now (relative to the power down delay) |
1175 | * to keep the panel power up across a sequence of operations |
1175 | * to keep the panel power up across a sequence of operations |
1176 | */ |
1176 | */ |
1177 | // schedule_delayed_work(&intel_dp->panel_vdd_work, |
1177 | schedule_delayed_work(&intel_dp->panel_vdd_work, |
1178 | // msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); |
1178 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); |
1179 | } |
1179 | } |
1180 | } |
1180 | } |
1181 | 1181 | ||
1182 | void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
1182 | void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
1183 | { |
1183 | { |
1184 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1184 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1185 | struct drm_i915_private *dev_priv = dev->dev_private; |
1185 | struct drm_i915_private *dev_priv = dev->dev_private; |
1186 | u32 pp; |
1186 | u32 pp; |
1187 | 1187 | ||
1188 | if (!is_edp(intel_dp)) |
1188 | if (!is_edp(intel_dp)) |
1189 | return; |
1189 | return; |
1190 | 1190 | ||
1191 | DRM_DEBUG_KMS("Turn eDP power on\n"); |
1191 | DRM_DEBUG_KMS("Turn eDP power on\n"); |
1192 | 1192 | ||
1193 | if (ironlake_edp_have_panel_power(intel_dp)) { |
1193 | if (ironlake_edp_have_panel_power(intel_dp)) { |
1194 | DRM_DEBUG_KMS("eDP power already on\n"); |
1194 | DRM_DEBUG_KMS("eDP power already on\n"); |
1195 | return; |
1195 | return; |
1196 | } |
1196 | } |
1197 | 1197 | ||
1198 | ironlake_wait_panel_power_cycle(intel_dp); |
1198 | ironlake_wait_panel_power_cycle(intel_dp); |
1199 | 1199 | ||
1200 | pp = ironlake_get_pp_control(dev_priv); |
1200 | pp = ironlake_get_pp_control(dev_priv); |
1201 | if (IS_GEN5(dev)) { |
1201 | if (IS_GEN5(dev)) { |
1202 | /* ILK workaround: disable reset around power sequence */ |
1202 | /* ILK workaround: disable reset around power sequence */ |
1203 | pp &= ~PANEL_POWER_RESET; |
1203 | pp &= ~PANEL_POWER_RESET; |
1204 | I915_WRITE(PCH_PP_CONTROL, pp); |
1204 | I915_WRITE(PCH_PP_CONTROL, pp); |
1205 | POSTING_READ(PCH_PP_CONTROL); |
1205 | POSTING_READ(PCH_PP_CONTROL); |
1206 | } |
1206 | } |
1207 | 1207 | ||
1208 | pp |= POWER_TARGET_ON; |
1208 | pp |= POWER_TARGET_ON; |
1209 | if (!IS_GEN5(dev)) |
1209 | if (!IS_GEN5(dev)) |
1210 | pp |= PANEL_POWER_RESET; |
1210 | pp |= PANEL_POWER_RESET; |
1211 | 1211 | ||
1212 | I915_WRITE(PCH_PP_CONTROL, pp); |
1212 | I915_WRITE(PCH_PP_CONTROL, pp); |
1213 | POSTING_READ(PCH_PP_CONTROL); |
1213 | POSTING_READ(PCH_PP_CONTROL); |
1214 | 1214 | ||
1215 | ironlake_wait_panel_on(intel_dp); |
1215 | ironlake_wait_panel_on(intel_dp); |
1216 | 1216 | ||
1217 | if (IS_GEN5(dev)) { |
1217 | if (IS_GEN5(dev)) { |
1218 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
1218 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
1219 | I915_WRITE(PCH_PP_CONTROL, pp); |
1219 | I915_WRITE(PCH_PP_CONTROL, pp); |
1220 | POSTING_READ(PCH_PP_CONTROL); |
1220 | POSTING_READ(PCH_PP_CONTROL); |
1221 | } |
1221 | } |
1222 | } |
1222 | } |
1223 | 1223 | ||
1224 | void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
1224 | void ironlake_edp_panel_off(struct intel_dp *intel_dp) |
1225 | { |
1225 | { |
1226 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1226 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
1227 | struct drm_i915_private *dev_priv = dev->dev_private; |
1228 | u32 pp; |
1228 | u32 pp; |
1229 | 1229 | ||
1230 | if (!is_edp(intel_dp)) |
1230 | if (!is_edp(intel_dp)) |
1231 | return; |
1231 | return; |
1232 | 1232 | ||
1233 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
1233 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
1234 | 1234 | ||
1235 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); |
1235 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); |
1236 | 1236 | ||
1237 | pp = ironlake_get_pp_control(dev_priv); |
1237 | pp = ironlake_get_pp_control(dev_priv); |
1238 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
1238 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
1239 | * panels get very unhappy and cease to work. */ |
1239 | * panels get very unhappy and cease to work. */ |
1240 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
1240 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
1241 | I915_WRITE(PCH_PP_CONTROL, pp); |
1241 | I915_WRITE(PCH_PP_CONTROL, pp); |
1242 | POSTING_READ(PCH_PP_CONTROL); |
1242 | POSTING_READ(PCH_PP_CONTROL); |
1243 | 1243 | ||
1244 | intel_dp->want_panel_vdd = false; |
1244 | intel_dp->want_panel_vdd = false; |
1245 | 1245 | ||
1246 | ironlake_wait_panel_off(intel_dp); |
1246 | ironlake_wait_panel_off(intel_dp); |
1247 | } |
1247 | } |
1248 | 1248 | ||
1249 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
1249 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
1250 | { |
1250 | { |
1251 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1251 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1252 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1252 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1253 | struct drm_i915_private *dev_priv = dev->dev_private; |
1253 | struct drm_i915_private *dev_priv = dev->dev_private; |
1254 | int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; |
1254 | int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe; |
1255 | u32 pp; |
1255 | u32 pp; |
1256 | 1256 | ||
1257 | if (!is_edp(intel_dp)) |
1257 | if (!is_edp(intel_dp)) |
1258 | return; |
1258 | return; |
1259 | 1259 | ||
1260 | DRM_DEBUG_KMS("\n"); |
1260 | DRM_DEBUG_KMS("\n"); |
1261 | /* |
1261 | /* |
1262 | * If we enable the backlight right away following a panel power |
1262 | * If we enable the backlight right away following a panel power |
1263 | * on, we may see slight flicker as the panel syncs with the eDP |
1263 | * on, we may see slight flicker as the panel syncs with the eDP |
1264 | * link. So delay a bit to make sure the image is solid before |
1264 | * link. So delay a bit to make sure the image is solid before |
1265 | * allowing it to appear. |
1265 | * allowing it to appear. |
1266 | */ |
1266 | */ |
1267 | msleep(intel_dp->backlight_on_delay); |
1267 | msleep(intel_dp->backlight_on_delay); |
1268 | pp = ironlake_get_pp_control(dev_priv); |
1268 | pp = ironlake_get_pp_control(dev_priv); |
1269 | pp |= EDP_BLC_ENABLE; |
1269 | pp |= EDP_BLC_ENABLE; |
1270 | I915_WRITE(PCH_PP_CONTROL, pp); |
1270 | I915_WRITE(PCH_PP_CONTROL, pp); |
1271 | POSTING_READ(PCH_PP_CONTROL); |
1271 | POSTING_READ(PCH_PP_CONTROL); |
1272 | 1272 | ||
1273 | intel_panel_enable_backlight(dev, pipe); |
1273 | intel_panel_enable_backlight(dev, pipe); |
1274 | } |
1274 | } |
1275 | 1275 | ||
1276 | void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
1276 | void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
1277 | { |
1277 | { |
1278 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1278 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1279 | struct drm_i915_private *dev_priv = dev->dev_private; |
1279 | struct drm_i915_private *dev_priv = dev->dev_private; |
1280 | u32 pp; |
1280 | u32 pp; |
1281 | 1281 | ||
1282 | if (!is_edp(intel_dp)) |
1282 | if (!is_edp(intel_dp)) |
1283 | return; |
1283 | return; |
1284 | 1284 | ||
1285 | intel_panel_disable_backlight(dev); |
1285 | intel_panel_disable_backlight(dev); |
1286 | 1286 | ||
1287 | DRM_DEBUG_KMS("\n"); |
1287 | DRM_DEBUG_KMS("\n"); |
1288 | pp = ironlake_get_pp_control(dev_priv); |
1288 | pp = ironlake_get_pp_control(dev_priv); |
1289 | pp &= ~EDP_BLC_ENABLE; |
1289 | pp &= ~EDP_BLC_ENABLE; |
1290 | I915_WRITE(PCH_PP_CONTROL, pp); |
1290 | I915_WRITE(PCH_PP_CONTROL, pp); |
1291 | POSTING_READ(PCH_PP_CONTROL); |
1291 | POSTING_READ(PCH_PP_CONTROL); |
1292 | msleep(intel_dp->backlight_off_delay); |
1292 | msleep(intel_dp->backlight_off_delay); |
1293 | } |
1293 | } |
1294 | 1294 | ||
1295 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) |
1295 | static void ironlake_edp_pll_on(struct intel_dp *intel_dp) |
1296 | { |
1296 | { |
1297 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1297 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1298 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1298 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1299 | struct drm_device *dev = crtc->dev; |
1299 | struct drm_device *dev = crtc->dev; |
1300 | struct drm_i915_private *dev_priv = dev->dev_private; |
1300 | struct drm_i915_private *dev_priv = dev->dev_private; |
1301 | u32 dpa_ctl; |
1301 | u32 dpa_ctl; |
1302 | 1302 | ||
1303 | assert_pipe_disabled(dev_priv, |
1303 | assert_pipe_disabled(dev_priv, |
1304 | to_intel_crtc(crtc)->pipe); |
1304 | to_intel_crtc(crtc)->pipe); |
1305 | 1305 | ||
1306 | DRM_DEBUG_KMS("\n"); |
1306 | DRM_DEBUG_KMS("\n"); |
1307 | dpa_ctl = I915_READ(DP_A); |
1307 | dpa_ctl = I915_READ(DP_A); |
1308 | WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); |
1308 | WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); |
1309 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1309 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1310 | 1310 | ||
1311 | /* We don't adjust intel_dp->DP while tearing down the link, to |
1311 | /* We don't adjust intel_dp->DP while tearing down the link, to |
1312 | * facilitate link retraining (e.g. after hotplug). Hence clear all |
1312 | * facilitate link retraining (e.g. after hotplug). Hence clear all |
1313 | * enable bits here to ensure that we don't enable too much. */ |
1313 | * enable bits here to ensure that we don't enable too much. */ |
1314 | intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); |
1314 | intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); |
1315 | intel_dp->DP |= DP_PLL_ENABLE; |
1315 | intel_dp->DP |= DP_PLL_ENABLE; |
1316 | I915_WRITE(DP_A, intel_dp->DP); |
1316 | I915_WRITE(DP_A, intel_dp->DP); |
1317 | POSTING_READ(DP_A); |
1317 | POSTING_READ(DP_A); |
1318 | udelay(200); |
1318 | udelay(200); |
1319 | } |
1319 | } |
1320 | 1320 | ||
1321 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) |
1321 | static void ironlake_edp_pll_off(struct intel_dp *intel_dp) |
1322 | { |
1322 | { |
1323 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1323 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1324 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1324 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
1325 | struct drm_device *dev = crtc->dev; |
1325 | struct drm_device *dev = crtc->dev; |
1326 | struct drm_i915_private *dev_priv = dev->dev_private; |
1326 | struct drm_i915_private *dev_priv = dev->dev_private; |
1327 | u32 dpa_ctl; |
1327 | u32 dpa_ctl; |
1328 | 1328 | ||
1329 | assert_pipe_disabled(dev_priv, |
1329 | assert_pipe_disabled(dev_priv, |
1330 | to_intel_crtc(crtc)->pipe); |
1330 | to_intel_crtc(crtc)->pipe); |
1331 | 1331 | ||
1332 | dpa_ctl = I915_READ(DP_A); |
1332 | dpa_ctl = I915_READ(DP_A); |
1333 | WARN((dpa_ctl & DP_PLL_ENABLE) == 0, |
1333 | WARN((dpa_ctl & DP_PLL_ENABLE) == 0, |
1334 | "dp pll off, should be on\n"); |
1334 | "dp pll off, should be on\n"); |
1335 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1335 | WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); |
1336 | 1336 | ||
1337 | /* We can't rely on the value tracked for the DP register in |
1337 | /* We can't rely on the value tracked for the DP register in |
1338 | * intel_dp->DP because link_down must not change that (otherwise link |
1338 | * intel_dp->DP because link_down must not change that (otherwise link |
1339 | * re-training will fail. */ |
1339 | * re-training will fail. */ |
1340 | dpa_ctl &= ~DP_PLL_ENABLE; |
1340 | dpa_ctl &= ~DP_PLL_ENABLE; |
1341 | I915_WRITE(DP_A, dpa_ctl); |
1341 | I915_WRITE(DP_A, dpa_ctl); |
1342 | POSTING_READ(DP_A); |
1342 | POSTING_READ(DP_A); |
1343 | udelay(200); |
1343 | udelay(200); |
1344 | } |
1344 | } |
1345 | 1345 | ||
1346 | /* If the sink supports it, try to set the power state appropriately */ |
1346 | /* If the sink supports it, try to set the power state appropriately */ |
1347 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
1347 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) |
1348 | { |
1348 | { |
1349 | int ret, i; |
1349 | int ret, i; |
1350 | 1350 | ||
1351 | /* Should have a valid DPCD by this point */ |
1351 | /* Should have a valid DPCD by this point */ |
1352 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
1352 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) |
1353 | return; |
1353 | return; |
1354 | 1354 | ||
1355 | if (mode != DRM_MODE_DPMS_ON) { |
1355 | if (mode != DRM_MODE_DPMS_ON) { |
1356 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, |
1356 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, |
1357 | DP_SET_POWER_D3); |
1357 | DP_SET_POWER_D3); |
1358 | if (ret != 1) |
1358 | if (ret != 1) |
1359 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); |
1359 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); |
1360 | } else { |
1360 | } else { |
1361 | /* |
1361 | /* |
1362 | * When turning on, we need to retry for 1ms to give the sink |
1362 | * When turning on, we need to retry for 1ms to give the sink |
1363 | * time to wake up. |
1363 | * time to wake up. |
1364 | */ |
1364 | */ |
1365 | for (i = 0; i < 3; i++) { |
1365 | for (i = 0; i < 3; i++) { |
1366 | ret = intel_dp_aux_native_write_1(intel_dp, |
1366 | ret = intel_dp_aux_native_write_1(intel_dp, |
1367 | DP_SET_POWER, |
1367 | DP_SET_POWER, |
1368 | DP_SET_POWER_D0); |
1368 | DP_SET_POWER_D0); |
1369 | if (ret == 1) |
1369 | if (ret == 1) |
1370 | break; |
1370 | break; |
1371 | msleep(1); |
1371 | msleep(1); |
1372 | } |
1372 | } |
1373 | } |
1373 | } |
1374 | } |
1374 | } |
1375 | 1375 | ||
1376 | static bool intel_dp_get_hw_state(struct intel_encoder *encoder, |
1376 | static bool intel_dp_get_hw_state(struct intel_encoder *encoder, |
1377 | enum pipe *pipe) |
1377 | enum pipe *pipe) |
1378 | { |
1378 | { |
1379 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1379 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1380 | struct drm_device *dev = encoder->base.dev; |
1380 | struct drm_device *dev = encoder->base.dev; |
1381 | struct drm_i915_private *dev_priv = dev->dev_private; |
1381 | struct drm_i915_private *dev_priv = dev->dev_private; |
1382 | u32 tmp = I915_READ(intel_dp->output_reg); |
1382 | u32 tmp = I915_READ(intel_dp->output_reg); |
1383 | 1383 | ||
1384 | if (!(tmp & DP_PORT_EN)) |
1384 | if (!(tmp & DP_PORT_EN)) |
1385 | return false; |
1385 | return false; |
1386 | 1386 | ||
1387 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { |
1387 | if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { |
1388 | *pipe = PORT_TO_PIPE_CPT(tmp); |
1388 | *pipe = PORT_TO_PIPE_CPT(tmp); |
1389 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
1389 | } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
1390 | *pipe = PORT_TO_PIPE(tmp); |
1390 | *pipe = PORT_TO_PIPE(tmp); |
1391 | } else { |
1391 | } else { |
1392 | u32 trans_sel; |
1392 | u32 trans_sel; |
1393 | u32 trans_dp; |
1393 | u32 trans_dp; |
1394 | int i; |
1394 | int i; |
1395 | 1395 | ||
1396 | switch (intel_dp->output_reg) { |
1396 | switch (intel_dp->output_reg) { |
1397 | case PCH_DP_B: |
1397 | case PCH_DP_B: |
1398 | trans_sel = TRANS_DP_PORT_SEL_B; |
1398 | trans_sel = TRANS_DP_PORT_SEL_B; |
1399 | break; |
1399 | break; |
1400 | case PCH_DP_C: |
1400 | case PCH_DP_C: |
1401 | trans_sel = TRANS_DP_PORT_SEL_C; |
1401 | trans_sel = TRANS_DP_PORT_SEL_C; |
1402 | break; |
1402 | break; |
1403 | case PCH_DP_D: |
1403 | case PCH_DP_D: |
1404 | trans_sel = TRANS_DP_PORT_SEL_D; |
1404 | trans_sel = TRANS_DP_PORT_SEL_D; |
1405 | break; |
1405 | break; |
1406 | default: |
1406 | default: |
1407 | return true; |
1407 | return true; |
1408 | } |
1408 | } |
1409 | 1409 | ||
1410 | for_each_pipe(i) { |
1410 | for_each_pipe(i) { |
1411 | trans_dp = I915_READ(TRANS_DP_CTL(i)); |
1411 | trans_dp = I915_READ(TRANS_DP_CTL(i)); |
1412 | if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { |
1412 | if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { |
1413 | *pipe = i; |
1413 | *pipe = i; |
1414 | return true; |
1414 | return true; |
1415 | } |
1415 | } |
1416 | } |
1416 | } |
1417 | 1417 | ||
1418 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", |
1418 | DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", |
1419 | intel_dp->output_reg); |
1419 | intel_dp->output_reg); |
1420 | } |
1420 | } |
1421 | 1421 | ||
1422 | return true; |
1422 | return true; |
1423 | } |
1423 | } |
1424 | 1424 | ||
1425 | static void intel_disable_dp(struct intel_encoder *encoder) |
1425 | static void intel_disable_dp(struct intel_encoder *encoder) |
1426 | { |
1426 | { |
1427 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1427 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1428 | 1428 | ||
1429 | /* Make sure the panel is off before trying to change the mode. But also |
1429 | /* Make sure the panel is off before trying to change the mode. But also |
1430 | * ensure that we have vdd while we switch off the panel. */ |
1430 | * ensure that we have vdd while we switch off the panel. */ |
1431 | ironlake_edp_panel_vdd_on(intel_dp); |
1431 | ironlake_edp_panel_vdd_on(intel_dp); |
1432 | ironlake_edp_backlight_off(intel_dp); |
1432 | ironlake_edp_backlight_off(intel_dp); |
1433 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1433 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1434 | ironlake_edp_panel_off(intel_dp); |
1434 | ironlake_edp_panel_off(intel_dp); |
1435 | 1435 | ||
1436 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ |
1436 | /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ |
1437 | if (!is_cpu_edp(intel_dp)) |
1437 | if (!is_cpu_edp(intel_dp)) |
1438 | intel_dp_link_down(intel_dp); |
1438 | intel_dp_link_down(intel_dp); |
1439 | } |
1439 | } |
1440 | 1440 | ||
1441 | static void intel_post_disable_dp(struct intel_encoder *encoder) |
1441 | static void intel_post_disable_dp(struct intel_encoder *encoder) |
1442 | { |
1442 | { |
1443 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1443 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1444 | 1444 | ||
1445 | if (is_cpu_edp(intel_dp)) { |
1445 | if (is_cpu_edp(intel_dp)) { |
1446 | intel_dp_link_down(intel_dp); |
1446 | intel_dp_link_down(intel_dp); |
1447 | ironlake_edp_pll_off(intel_dp); |
1447 | ironlake_edp_pll_off(intel_dp); |
1448 | } |
1448 | } |
1449 | } |
1449 | } |
1450 | 1450 | ||
1451 | static void intel_enable_dp(struct intel_encoder *encoder) |
1451 | static void intel_enable_dp(struct intel_encoder *encoder) |
1452 | { |
1452 | { |
1453 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1453 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1454 | struct drm_device *dev = encoder->base.dev; |
1454 | struct drm_device *dev = encoder->base.dev; |
1455 | struct drm_i915_private *dev_priv = dev->dev_private; |
1455 | struct drm_i915_private *dev_priv = dev->dev_private; |
1456 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1456 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1457 | 1457 | ||
1458 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
1458 | if (WARN_ON(dp_reg & DP_PORT_EN)) |
1459 | return; |
1459 | return; |
1460 | 1460 | ||
1461 | ironlake_edp_panel_vdd_on(intel_dp); |
1461 | ironlake_edp_panel_vdd_on(intel_dp); |
1462 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1462 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1463 | intel_dp_start_link_train(intel_dp); |
1463 | intel_dp_start_link_train(intel_dp); |
1464 | ironlake_edp_panel_on(intel_dp); |
1464 | ironlake_edp_panel_on(intel_dp); |
1465 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1465 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1466 | intel_dp_complete_link_train(intel_dp); |
1466 | intel_dp_complete_link_train(intel_dp); |
1467 | ironlake_edp_backlight_on(intel_dp); |
1467 | ironlake_edp_backlight_on(intel_dp); |
1468 | } |
1468 | } |
1469 | 1469 | ||
1470 | static void intel_pre_enable_dp(struct intel_encoder *encoder) |
1470 | static void intel_pre_enable_dp(struct intel_encoder *encoder) |
1471 | { |
1471 | { |
1472 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1472 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1473 | 1473 | ||
1474 | if (is_cpu_edp(intel_dp)) |
1474 | if (is_cpu_edp(intel_dp)) |
1475 | ironlake_edp_pll_on(intel_dp); |
1475 | ironlake_edp_pll_on(intel_dp); |
1476 | } |
1476 | } |
1477 | 1477 | ||
1478 | /* |
1478 | /* |
1479 | * Native read with retry for link status and receiver capability reads for |
1479 | * Native read with retry for link status and receiver capability reads for |
1480 | * cases where the sink may still be asleep. |
1480 | * cases where the sink may still be asleep. |
1481 | */ |
1481 | */ |
1482 | static bool |
1482 | static bool |
1483 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, |
1483 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, |
1484 | uint8_t *recv, int recv_bytes) |
1484 | uint8_t *recv, int recv_bytes) |
1485 | { |
1485 | { |
1486 | int ret, i; |
1486 | int ret, i; |
1487 | 1487 | ||
1488 | /* |
1488 | /* |
1489 | * Sinks are *supposed* to come up within 1ms from an off state, |
1489 | * Sinks are *supposed* to come up within 1ms from an off state, |
1490 | * but we're also supposed to retry 3 times per the spec. |
1490 | * but we're also supposed to retry 3 times per the spec. |
1491 | */ |
1491 | */ |
1492 | for (i = 0; i < 3; i++) { |
1492 | for (i = 0; i < 3; i++) { |
1493 | ret = intel_dp_aux_native_read(intel_dp, address, recv, |
1493 | ret = intel_dp_aux_native_read(intel_dp, address, recv, |
1494 | recv_bytes); |
1494 | recv_bytes); |
1495 | if (ret == recv_bytes) |
1495 | if (ret == recv_bytes) |
1496 | return true; |
1496 | return true; |
1497 | msleep(1); |
1497 | msleep(1); |
1498 | } |
1498 | } |
1499 | 1499 | ||
1500 | return false; |
1500 | return false; |
1501 | } |
1501 | } |
1502 | 1502 | ||
1503 | /* |
1503 | /* |
1504 | * Fetch AUX CH registers 0x202 - 0x207 which contain |
1504 | * Fetch AUX CH registers 0x202 - 0x207 which contain |
1505 | * link status information |
1505 | * link status information |
1506 | */ |
1506 | */ |
1507 | static bool |
1507 | static bool |
1508 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1508 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1509 | { |
1509 | { |
1510 | return intel_dp_aux_native_read_retry(intel_dp, |
1510 | return intel_dp_aux_native_read_retry(intel_dp, |
1511 | DP_LANE0_1_STATUS, |
1511 | DP_LANE0_1_STATUS, |
1512 | link_status, |
1512 | link_status, |
1513 | DP_LINK_STATUS_SIZE); |
1513 | DP_LINK_STATUS_SIZE); |
1514 | } |
1514 | } |
1515 | 1515 | ||
1516 | #if 0 |
1516 | #if 0 |
1517 | static char *voltage_names[] = { |
1517 | static char *voltage_names[] = { |
1518 | "0.4V", "0.6V", "0.8V", "1.2V" |
1518 | "0.4V", "0.6V", "0.8V", "1.2V" |
1519 | }; |
1519 | }; |
1520 | static char *pre_emph_names[] = { |
1520 | static char *pre_emph_names[] = { |
1521 | "0dB", "3.5dB", "6dB", "9.5dB" |
1521 | "0dB", "3.5dB", "6dB", "9.5dB" |
1522 | }; |
1522 | }; |
1523 | static char *link_train_names[] = { |
1523 | static char *link_train_names[] = { |
1524 | "pattern 1", "pattern 2", "idle", "off" |
1524 | "pattern 1", "pattern 2", "idle", "off" |
1525 | }; |
1525 | }; |
1526 | #endif |
1526 | #endif |
1527 | 1527 | ||
1528 | /* |
1528 | /* |
1529 | * These are source-specific values; current Intel hardware supports |
1529 | * These are source-specific values; current Intel hardware supports |
1530 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB |
1530 | * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB |
1531 | */ |
1531 | */ |
1532 | 1532 | ||
1533 | static uint8_t |
1533 | static uint8_t |
1534 | intel_dp_voltage_max(struct intel_dp *intel_dp) |
1534 | intel_dp_voltage_max(struct intel_dp *intel_dp) |
1535 | { |
1535 | { |
1536 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1536 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1537 | 1537 | ||
1538 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) |
1538 | if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) |
1539 | return DP_TRAIN_VOLTAGE_SWING_800; |
1539 | return DP_TRAIN_VOLTAGE_SWING_800; |
1540 | else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1540 | else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1541 | return DP_TRAIN_VOLTAGE_SWING_1200; |
1541 | return DP_TRAIN_VOLTAGE_SWING_1200; |
1542 | else |
1542 | else |
1543 | return DP_TRAIN_VOLTAGE_SWING_800; |
1543 | return DP_TRAIN_VOLTAGE_SWING_800; |
1544 | } |
1544 | } |
1545 | 1545 | ||
1546 | static uint8_t |
1546 | static uint8_t |
1547 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) |
1547 | intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) |
1548 | { |
1548 | { |
1549 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1549 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
1550 | 1550 | ||
1551 | if (IS_HASWELL(dev)) { |
1551 | if (IS_HASWELL(dev)) { |
1552 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1552 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1553 | case DP_TRAIN_VOLTAGE_SWING_400: |
1553 | case DP_TRAIN_VOLTAGE_SWING_400: |
1554 | return DP_TRAIN_PRE_EMPHASIS_9_5; |
1554 | return DP_TRAIN_PRE_EMPHASIS_9_5; |
1555 | case DP_TRAIN_VOLTAGE_SWING_600: |
1555 | case DP_TRAIN_VOLTAGE_SWING_600: |
1556 | return DP_TRAIN_PRE_EMPHASIS_6; |
1556 | return DP_TRAIN_PRE_EMPHASIS_6; |
1557 | case DP_TRAIN_VOLTAGE_SWING_800: |
1557 | case DP_TRAIN_VOLTAGE_SWING_800: |
1558 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1558 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1559 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1559 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1560 | default: |
1560 | default: |
1561 | return DP_TRAIN_PRE_EMPHASIS_0; |
1561 | return DP_TRAIN_PRE_EMPHASIS_0; |
1562 | } |
1562 | } |
1563 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
1563 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
1564 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1564 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1565 | case DP_TRAIN_VOLTAGE_SWING_400: |
1565 | case DP_TRAIN_VOLTAGE_SWING_400: |
1566 | return DP_TRAIN_PRE_EMPHASIS_6; |
1566 | return DP_TRAIN_PRE_EMPHASIS_6; |
1567 | case DP_TRAIN_VOLTAGE_SWING_600: |
1567 | case DP_TRAIN_VOLTAGE_SWING_600: |
1568 | case DP_TRAIN_VOLTAGE_SWING_800: |
1568 | case DP_TRAIN_VOLTAGE_SWING_800: |
1569 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1569 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1570 | default: |
1570 | default: |
1571 | return DP_TRAIN_PRE_EMPHASIS_0; |
1571 | return DP_TRAIN_PRE_EMPHASIS_0; |
1572 | } |
1572 | } |
1573 | } else { |
1573 | } else { |
1574 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1574 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1575 | case DP_TRAIN_VOLTAGE_SWING_400: |
1575 | case DP_TRAIN_VOLTAGE_SWING_400: |
1576 | return DP_TRAIN_PRE_EMPHASIS_6; |
1576 | return DP_TRAIN_PRE_EMPHASIS_6; |
1577 | case DP_TRAIN_VOLTAGE_SWING_600: |
1577 | case DP_TRAIN_VOLTAGE_SWING_600: |
1578 | return DP_TRAIN_PRE_EMPHASIS_6; |
1578 | return DP_TRAIN_PRE_EMPHASIS_6; |
1579 | case DP_TRAIN_VOLTAGE_SWING_800: |
1579 | case DP_TRAIN_VOLTAGE_SWING_800: |
1580 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1580 | return DP_TRAIN_PRE_EMPHASIS_3_5; |
1581 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1581 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1582 | default: |
1582 | default: |
1583 | return DP_TRAIN_PRE_EMPHASIS_0; |
1583 | return DP_TRAIN_PRE_EMPHASIS_0; |
1584 | } |
1584 | } |
1585 | } |
1585 | } |
1586 | } |
1586 | } |
1587 | 1587 | ||
1588 | static void |
1588 | static void |
1589 | intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1589 | intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) |
1590 | { |
1590 | { |
1591 | uint8_t v = 0; |
1591 | uint8_t v = 0; |
1592 | uint8_t p = 0; |
1592 | uint8_t p = 0; |
1593 | int lane; |
1593 | int lane; |
1594 | uint8_t voltage_max; |
1594 | uint8_t voltage_max; |
1595 | uint8_t preemph_max; |
1595 | uint8_t preemph_max; |
1596 | 1596 | ||
1597 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1597 | for (lane = 0; lane < intel_dp->lane_count; lane++) { |
1598 | uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
1598 | uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
1599 | uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
1599 | uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
1600 | 1600 | ||
1601 | if (this_v > v) |
1601 | if (this_v > v) |
1602 | v = this_v; |
1602 | v = this_v; |
1603 | if (this_p > p) |
1603 | if (this_p > p) |
1604 | p = this_p; |
1604 | p = this_p; |
1605 | } |
1605 | } |
1606 | 1606 | ||
1607 | voltage_max = intel_dp_voltage_max(intel_dp); |
1607 | voltage_max = intel_dp_voltage_max(intel_dp); |
1608 | if (v >= voltage_max) |
1608 | if (v >= voltage_max) |
1609 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; |
1609 | v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; |
1610 | 1610 | ||
1611 | preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); |
1611 | preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); |
1612 | if (p >= preemph_max) |
1612 | if (p >= preemph_max) |
1613 | p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
1613 | p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; |
1614 | 1614 | ||
1615 | for (lane = 0; lane < 4; lane++) |
1615 | for (lane = 0; lane < 4; lane++) |
1616 | intel_dp->train_set[lane] = v | p; |
1616 | intel_dp->train_set[lane] = v | p; |
1617 | } |
1617 | } |
1618 | 1618 | ||
1619 | static uint32_t |
1619 | static uint32_t |
1620 | intel_gen4_signal_levels(uint8_t train_set) |
1620 | intel_gen4_signal_levels(uint8_t train_set) |
1621 | { |
1621 | { |
1622 | uint32_t signal_levels = 0; |
1622 | uint32_t signal_levels = 0; |
1623 | 1623 | ||
1624 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1624 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1625 | case DP_TRAIN_VOLTAGE_SWING_400: |
1625 | case DP_TRAIN_VOLTAGE_SWING_400: |
1626 | default: |
1626 | default: |
1627 | signal_levels |= DP_VOLTAGE_0_4; |
1627 | signal_levels |= DP_VOLTAGE_0_4; |
1628 | break; |
1628 | break; |
1629 | case DP_TRAIN_VOLTAGE_SWING_600: |
1629 | case DP_TRAIN_VOLTAGE_SWING_600: |
1630 | signal_levels |= DP_VOLTAGE_0_6; |
1630 | signal_levels |= DP_VOLTAGE_0_6; |
1631 | break; |
1631 | break; |
1632 | case DP_TRAIN_VOLTAGE_SWING_800: |
1632 | case DP_TRAIN_VOLTAGE_SWING_800: |
1633 | signal_levels |= DP_VOLTAGE_0_8; |
1633 | signal_levels |= DP_VOLTAGE_0_8; |
1634 | break; |
1634 | break; |
1635 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1635 | case DP_TRAIN_VOLTAGE_SWING_1200: |
1636 | signal_levels |= DP_VOLTAGE_1_2; |
1636 | signal_levels |= DP_VOLTAGE_1_2; |
1637 | break; |
1637 | break; |
1638 | } |
1638 | } |
1639 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
1639 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
1640 | case DP_TRAIN_PRE_EMPHASIS_0: |
1640 | case DP_TRAIN_PRE_EMPHASIS_0: |
1641 | default: |
1641 | default: |
1642 | signal_levels |= DP_PRE_EMPHASIS_0; |
1642 | signal_levels |= DP_PRE_EMPHASIS_0; |
1643 | break; |
1643 | break; |
1644 | case DP_TRAIN_PRE_EMPHASIS_3_5: |
1644 | case DP_TRAIN_PRE_EMPHASIS_3_5: |
1645 | signal_levels |= DP_PRE_EMPHASIS_3_5; |
1645 | signal_levels |= DP_PRE_EMPHASIS_3_5; |
1646 | break; |
1646 | break; |
1647 | case DP_TRAIN_PRE_EMPHASIS_6: |
1647 | case DP_TRAIN_PRE_EMPHASIS_6: |
1648 | signal_levels |= DP_PRE_EMPHASIS_6; |
1648 | signal_levels |= DP_PRE_EMPHASIS_6; |
1649 | break; |
1649 | break; |
1650 | case DP_TRAIN_PRE_EMPHASIS_9_5: |
1650 | case DP_TRAIN_PRE_EMPHASIS_9_5: |
1651 | signal_levels |= DP_PRE_EMPHASIS_9_5; |
1651 | signal_levels |= DP_PRE_EMPHASIS_9_5; |
1652 | break; |
1652 | break; |
1653 | } |
1653 | } |
1654 | return signal_levels; |
1654 | return signal_levels; |
1655 | } |
1655 | } |
1656 | 1656 | ||
1657 | /* Gen6's DP voltage swing and pre-emphasis control */ |
1657 | /* Gen6's DP voltage swing and pre-emphasis control */ |
1658 | static uint32_t |
1658 | static uint32_t |
1659 | intel_gen6_edp_signal_levels(uint8_t train_set) |
1659 | intel_gen6_edp_signal_levels(uint8_t train_set) |
1660 | { |
1660 | { |
1661 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1661 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1662 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1662 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1663 | switch (signal_levels) { |
1663 | switch (signal_levels) { |
1664 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1664 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1665 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1665 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1666 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
1666 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
1667 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1667 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1668 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; |
1668 | return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; |
1669 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1669 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1670 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1670 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1671 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; |
1671 | return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; |
1672 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1672 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1673 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1673 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1674 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; |
1674 | return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; |
1675 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1675 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1676 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
1676 | case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: |
1677 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; |
1677 | return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; |
1678 | default: |
1678 | default: |
1679 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1679 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1680 | "0x%x\n", signal_levels); |
1680 | "0x%x\n", signal_levels); |
1681 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
1681 | return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; |
1682 | } |
1682 | } |
1683 | } |
1683 | } |
1684 | 1684 | ||
1685 | /* Gen7's DP voltage swing and pre-emphasis control */ |
1685 | /* Gen7's DP voltage swing and pre-emphasis control */ |
1686 | static uint32_t |
1686 | static uint32_t |
1687 | intel_gen7_edp_signal_levels(uint8_t train_set) |
1687 | intel_gen7_edp_signal_levels(uint8_t train_set) |
1688 | { |
1688 | { |
1689 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1689 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1690 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1690 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1691 | switch (signal_levels) { |
1691 | switch (signal_levels) { |
1692 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1692 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1693 | return EDP_LINK_TRAIN_400MV_0DB_IVB; |
1693 | return EDP_LINK_TRAIN_400MV_0DB_IVB; |
1694 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1694 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1695 | return EDP_LINK_TRAIN_400MV_3_5DB_IVB; |
1695 | return EDP_LINK_TRAIN_400MV_3_5DB_IVB; |
1696 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1696 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1697 | return EDP_LINK_TRAIN_400MV_6DB_IVB; |
1697 | return EDP_LINK_TRAIN_400MV_6DB_IVB; |
1698 | 1698 | ||
1699 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1699 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1700 | return EDP_LINK_TRAIN_600MV_0DB_IVB; |
1700 | return EDP_LINK_TRAIN_600MV_0DB_IVB; |
1701 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1701 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1702 | return EDP_LINK_TRAIN_600MV_3_5DB_IVB; |
1702 | return EDP_LINK_TRAIN_600MV_3_5DB_IVB; |
1703 | 1703 | ||
1704 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1704 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1705 | return EDP_LINK_TRAIN_800MV_0DB_IVB; |
1705 | return EDP_LINK_TRAIN_800MV_0DB_IVB; |
1706 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1706 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1707 | return EDP_LINK_TRAIN_800MV_3_5DB_IVB; |
1707 | return EDP_LINK_TRAIN_800MV_3_5DB_IVB; |
1708 | 1708 | ||
1709 | default: |
1709 | default: |
1710 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1710 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1711 | "0x%x\n", signal_levels); |
1711 | "0x%x\n", signal_levels); |
1712 | return EDP_LINK_TRAIN_500MV_0DB_IVB; |
1712 | return EDP_LINK_TRAIN_500MV_0DB_IVB; |
1713 | } |
1713 | } |
1714 | } |
1714 | } |
1715 | 1715 | ||
1716 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
1716 | /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
1717 | static uint32_t |
1717 | static uint32_t |
1718 | intel_hsw_signal_levels(uint8_t train_set) |
1718 | intel_hsw_signal_levels(uint8_t train_set) |
1719 | { |
1719 | { |
1720 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1720 | int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
1721 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1721 | DP_TRAIN_PRE_EMPHASIS_MASK); |
1722 | switch (signal_levels) { |
1722 | switch (signal_levels) { |
1723 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1723 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: |
1724 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1724 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1725 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1725 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1726 | return DDI_BUF_EMP_400MV_3_5DB_HSW; |
1726 | return DDI_BUF_EMP_400MV_3_5DB_HSW; |
1727 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1727 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: |
1728 | return DDI_BUF_EMP_400MV_6DB_HSW; |
1728 | return DDI_BUF_EMP_400MV_6DB_HSW; |
1729 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: |
1729 | case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: |
1730 | return DDI_BUF_EMP_400MV_9_5DB_HSW; |
1730 | return DDI_BUF_EMP_400MV_9_5DB_HSW; |
1731 | 1731 | ||
1732 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1732 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: |
1733 | return DDI_BUF_EMP_600MV_0DB_HSW; |
1733 | return DDI_BUF_EMP_600MV_0DB_HSW; |
1734 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1734 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1735 | return DDI_BUF_EMP_600MV_3_5DB_HSW; |
1735 | return DDI_BUF_EMP_600MV_3_5DB_HSW; |
1736 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1736 | case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: |
1737 | return DDI_BUF_EMP_600MV_6DB_HSW; |
1737 | return DDI_BUF_EMP_600MV_6DB_HSW; |
1738 | 1738 | ||
1739 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1739 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: |
1740 | return DDI_BUF_EMP_800MV_0DB_HSW; |
1740 | return DDI_BUF_EMP_800MV_0DB_HSW; |
1741 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1741 | case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: |
1742 | return DDI_BUF_EMP_800MV_3_5DB_HSW; |
1742 | return DDI_BUF_EMP_800MV_3_5DB_HSW; |
1743 | default: |
1743 | default: |
1744 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1744 | DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" |
1745 | "0x%x\n", signal_levels); |
1745 | "0x%x\n", signal_levels); |
1746 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1746 | return DDI_BUF_EMP_400MV_0DB_HSW; |
1747 | } |
1747 | } |
1748 | } |
1748 | } |
1749 | 1749 | ||
1750 | /* Properly updates "DP" with the correct signal levels. */ |
1750 | /* Properly updates "DP" with the correct signal levels. */ |
1751 | static void |
1751 | static void |
1752 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
1752 | intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
1753 | { |
1753 | { |
1754 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1754 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1755 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1755 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1756 | uint32_t signal_levels, mask; |
1756 | uint32_t signal_levels, mask; |
1757 | uint8_t train_set = intel_dp->train_set[0]; |
1757 | uint8_t train_set = intel_dp->train_set[0]; |
1758 | 1758 | ||
1759 | if (IS_HASWELL(dev)) { |
1759 | if (IS_HASWELL(dev)) { |
1760 | signal_levels = intel_hsw_signal_levels(train_set); |
1760 | signal_levels = intel_hsw_signal_levels(train_set); |
1761 | mask = DDI_BUF_EMP_MASK; |
1761 | mask = DDI_BUF_EMP_MASK; |
1762 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
1762 | } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
1763 | signal_levels = intel_gen7_edp_signal_levels(train_set); |
1763 | signal_levels = intel_gen7_edp_signal_levels(train_set); |
1764 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; |
1764 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; |
1765 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
1765 | } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
1766 | signal_levels = intel_gen6_edp_signal_levels(train_set); |
1766 | signal_levels = intel_gen6_edp_signal_levels(train_set); |
1767 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; |
1767 | mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; |
1768 | } else { |
1768 | } else { |
1769 | signal_levels = intel_gen4_signal_levels(train_set); |
1769 | signal_levels = intel_gen4_signal_levels(train_set); |
1770 | mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; |
1770 | mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; |
1771 | } |
1771 | } |
1772 | 1772 | ||
1773 | DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); |
1773 | DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); |
1774 | 1774 | ||
1775 | *DP = (*DP & ~mask) | signal_levels; |
1775 | *DP = (*DP & ~mask) | signal_levels; |
1776 | } |
1776 | } |
1777 | 1777 | ||
1778 | static bool |
1778 | static bool |
1779 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1779 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1780 | uint32_t dp_reg_value, |
1780 | uint32_t dp_reg_value, |
1781 | uint8_t dp_train_pat) |
1781 | uint8_t dp_train_pat) |
1782 | { |
1782 | { |
1783 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1783 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
1784 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1784 | struct drm_device *dev = intel_dig_port->base.base.dev; |
1785 | struct drm_i915_private *dev_priv = dev->dev_private; |
1785 | struct drm_i915_private *dev_priv = dev->dev_private; |
1786 | enum port port = intel_dig_port->port; |
1786 | enum port port = intel_dig_port->port; |
1787 | int ret; |
1787 | int ret; |
1788 | uint32_t temp; |
1788 | uint32_t temp; |
1789 | 1789 | ||
1790 | if (IS_HASWELL(dev)) { |
1790 | if (IS_HASWELL(dev)) { |
1791 | temp = I915_READ(DP_TP_CTL(port)); |
1791 | temp = I915_READ(DP_TP_CTL(port)); |
1792 | 1792 | ||
1793 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) |
1793 | if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) |
1794 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; |
1794 | temp |= DP_TP_CTL_SCRAMBLE_DISABLE; |
1795 | else |
1795 | else |
1796 | temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; |
1796 | temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; |
1797 | 1797 | ||
1798 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
1798 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
1799 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1799 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1800 | case DP_TRAINING_PATTERN_DISABLE: |
1800 | case DP_TRAINING_PATTERN_DISABLE: |
1801 | 1801 | ||
1802 | if (port != PORT_A) { |
1802 | if (port != PORT_A) { |
1803 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; |
1803 | temp |= DP_TP_CTL_LINK_TRAIN_IDLE; |
1804 | I915_WRITE(DP_TP_CTL(port), temp); |
1804 | I915_WRITE(DP_TP_CTL(port), temp); |
1805 | 1805 | ||
1806 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & |
1806 | if (wait_for((I915_READ(DP_TP_STATUS(port)) & |
1807 | DP_TP_STATUS_IDLE_DONE), 1)) |
1807 | DP_TP_STATUS_IDLE_DONE), 1)) |
1808 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); |
1808 | DRM_ERROR("Timed out waiting for DP idle patterns\n"); |
1809 | 1809 | ||
1810 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
1810 | temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
1811 | } |
1811 | } |
1812 | 1812 | ||
1813 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
1813 | temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
1814 | 1814 | ||
1815 | break; |
1815 | break; |
1816 | case DP_TRAINING_PATTERN_1: |
1816 | case DP_TRAINING_PATTERN_1: |
1817 | temp |= DP_TP_CTL_LINK_TRAIN_PAT1; |
1817 | temp |= DP_TP_CTL_LINK_TRAIN_PAT1; |
1818 | break; |
1818 | break; |
1819 | case DP_TRAINING_PATTERN_2: |
1819 | case DP_TRAINING_PATTERN_2: |
1820 | temp |= DP_TP_CTL_LINK_TRAIN_PAT2; |
1820 | temp |= DP_TP_CTL_LINK_TRAIN_PAT2; |
1821 | break; |
1821 | break; |
1822 | case DP_TRAINING_PATTERN_3: |
1822 | case DP_TRAINING_PATTERN_3: |
1823 | temp |= DP_TP_CTL_LINK_TRAIN_PAT3; |
1823 | temp |= DP_TP_CTL_LINK_TRAIN_PAT3; |
1824 | break; |
1824 | break; |
1825 | } |
1825 | } |
1826 | I915_WRITE(DP_TP_CTL(port), temp); |
1826 | I915_WRITE(DP_TP_CTL(port), temp); |
1827 | 1827 | ||
1828 | } else if (HAS_PCH_CPT(dev) && |
1828 | } else if (HAS_PCH_CPT(dev) && |
1829 | (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
1829 | (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
1830 | dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; |
1830 | dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; |
1831 | 1831 | ||
1832 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1832 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1833 | case DP_TRAINING_PATTERN_DISABLE: |
1833 | case DP_TRAINING_PATTERN_DISABLE: |
1834 | dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; |
1834 | dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; |
1835 | break; |
1835 | break; |
1836 | case DP_TRAINING_PATTERN_1: |
1836 | case DP_TRAINING_PATTERN_1: |
1837 | dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; |
1837 | dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; |
1838 | break; |
1838 | break; |
1839 | case DP_TRAINING_PATTERN_2: |
1839 | case DP_TRAINING_PATTERN_2: |
1840 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; |
1840 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; |
1841 | break; |
1841 | break; |
1842 | case DP_TRAINING_PATTERN_3: |
1842 | case DP_TRAINING_PATTERN_3: |
1843 | DRM_ERROR("DP training pattern 3 not supported\n"); |
1843 | DRM_ERROR("DP training pattern 3 not supported\n"); |
1844 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; |
1844 | dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; |
1845 | break; |
1845 | break; |
1846 | } |
1846 | } |
1847 | 1847 | ||
1848 | } else { |
1848 | } else { |
1849 | dp_reg_value &= ~DP_LINK_TRAIN_MASK; |
1849 | dp_reg_value &= ~DP_LINK_TRAIN_MASK; |
1850 | 1850 | ||
1851 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1851 | switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
1852 | case DP_TRAINING_PATTERN_DISABLE: |
1852 | case DP_TRAINING_PATTERN_DISABLE: |
1853 | dp_reg_value |= DP_LINK_TRAIN_OFF; |
1853 | dp_reg_value |= DP_LINK_TRAIN_OFF; |
1854 | break; |
1854 | break; |
1855 | case DP_TRAINING_PATTERN_1: |
1855 | case DP_TRAINING_PATTERN_1: |
1856 | dp_reg_value |= DP_LINK_TRAIN_PAT_1; |
1856 | dp_reg_value |= DP_LINK_TRAIN_PAT_1; |
1857 | break; |
1857 | break; |
1858 | case DP_TRAINING_PATTERN_2: |
1858 | case DP_TRAINING_PATTERN_2: |
1859 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; |
1859 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; |
1860 | break; |
1860 | break; |
1861 | case DP_TRAINING_PATTERN_3: |
1861 | case DP_TRAINING_PATTERN_3: |
1862 | DRM_ERROR("DP training pattern 3 not supported\n"); |
1862 | DRM_ERROR("DP training pattern 3 not supported\n"); |
1863 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; |
1863 | dp_reg_value |= DP_LINK_TRAIN_PAT_2; |
1864 | break; |
1864 | break; |
1865 | } |
1865 | } |
1866 | } |
1866 | } |
1867 | 1867 | ||
1868 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1868 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1869 | POSTING_READ(intel_dp->output_reg); |
1869 | POSTING_READ(intel_dp->output_reg); |
1870 | 1870 | ||
1871 | intel_dp_aux_native_write_1(intel_dp, |
1871 | intel_dp_aux_native_write_1(intel_dp, |
1872 | DP_TRAINING_PATTERN_SET, |
1872 | DP_TRAINING_PATTERN_SET, |
1873 | dp_train_pat); |
1873 | dp_train_pat); |
1874 | 1874 | ||
1875 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != |
1875 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != |
1876 | DP_TRAINING_PATTERN_DISABLE) { |
1876 | DP_TRAINING_PATTERN_DISABLE) { |
1877 | ret = intel_dp_aux_native_write(intel_dp, |
1877 | ret = intel_dp_aux_native_write(intel_dp, |
1878 | DP_TRAINING_LANE0_SET, |
1878 | DP_TRAINING_LANE0_SET, |
1879 | intel_dp->train_set, |
1879 | intel_dp->train_set, |
1880 | intel_dp->lane_count); |
1880 | intel_dp->lane_count); |
1881 | if (ret != intel_dp->lane_count) |
1881 | if (ret != intel_dp->lane_count) |
1882 | return false; |
1882 | return false; |
1883 | } |
1883 | } |
1884 | 1884 | ||
1885 | return true; |
1885 | return true; |
1886 | } |
1886 | } |
1887 | 1887 | ||
1888 | /* Enable corresponding port and start training pattern 1 */ |
1888 | /* Enable corresponding port and start training pattern 1 */ |
1889 | void |
1889 | void |
1890 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
1890 | intel_dp_start_link_train(struct intel_dp *intel_dp) |
1891 | { |
1891 | { |
1892 | struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; |
1892 | struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; |
1893 | struct drm_device *dev = encoder->dev; |
1893 | struct drm_device *dev = encoder->dev; |
1894 | int i; |
1894 | int i; |
1895 | uint8_t voltage; |
1895 | uint8_t voltage; |
1896 | bool clock_recovery = false; |
1896 | bool clock_recovery = false; |
1897 | int voltage_tries, loop_tries; |
1897 | int voltage_tries, loop_tries; |
1898 | uint32_t DP = intel_dp->DP; |
1898 | uint32_t DP = intel_dp->DP; |
1899 | 1899 | ||
1900 | if (HAS_DDI(dev)) |
1900 | if (HAS_DDI(dev)) |
1901 | intel_ddi_prepare_link_retrain(encoder); |
1901 | intel_ddi_prepare_link_retrain(encoder); |
1902 | 1902 | ||
1903 | /* Write the link configuration data */ |
1903 | /* Write the link configuration data */ |
1904 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1904 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1905 | intel_dp->link_configuration, |
1905 | intel_dp->link_configuration, |
1906 | DP_LINK_CONFIGURATION_SIZE); |
1906 | DP_LINK_CONFIGURATION_SIZE); |
1907 | 1907 | ||
1908 | DP |= DP_PORT_EN; |
1908 | DP |= DP_PORT_EN; |
1909 | 1909 | ||
1910 | memset(intel_dp->train_set, 0, 4); |
1910 | memset(intel_dp->train_set, 0, 4); |
1911 | voltage = 0xff; |
1911 | voltage = 0xff; |
1912 | voltage_tries = 0; |
1912 | voltage_tries = 0; |
1913 | loop_tries = 0; |
1913 | loop_tries = 0; |
1914 | clock_recovery = false; |
1914 | clock_recovery = false; |
1915 | for (;;) { |
1915 | for (;;) { |
1916 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1916 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1917 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1917 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1918 | 1918 | ||
1919 | intel_dp_set_signal_levels(intel_dp, &DP); |
1919 | intel_dp_set_signal_levels(intel_dp, &DP); |
1920 | 1920 | ||
1921 | /* Set training pattern 1 */ |
1921 | /* Set training pattern 1 */ |
1922 | if (!intel_dp_set_link_train(intel_dp, DP, |
1922 | if (!intel_dp_set_link_train(intel_dp, DP, |
1923 | DP_TRAINING_PATTERN_1 | |
1923 | DP_TRAINING_PATTERN_1 | |
1924 | DP_LINK_SCRAMBLING_DISABLE)) |
1924 | DP_LINK_SCRAMBLING_DISABLE)) |
1925 | break; |
1925 | break; |
1926 | 1926 | ||
1927 | drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); |
1927 | drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); |
1928 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1928 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
1929 | DRM_ERROR("failed to get link status\n"); |
1929 | DRM_ERROR("failed to get link status\n"); |
1930 | break; |
1930 | break; |
1931 | } |
1931 | } |
1932 | 1932 | ||
1933 | if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1933 | if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
1934 | DRM_DEBUG_KMS("clock recovery OK\n"); |
1934 | DRM_DEBUG_KMS("clock recovery OK\n"); |
1935 | clock_recovery = true; |
1935 | clock_recovery = true; |
1936 | break; |
1936 | break; |
1937 | } |
1937 | } |
1938 | 1938 | ||
1939 | /* Check to see if we've tried the max voltage */ |
1939 | /* Check to see if we've tried the max voltage */ |
1940 | for (i = 0; i < intel_dp->lane_count; i++) |
1940 | for (i = 0; i < intel_dp->lane_count; i++) |
1941 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1941 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1942 | break; |
1942 | break; |
1943 | if (i == intel_dp->lane_count) { |
1943 | if (i == intel_dp->lane_count) { |
1944 | ++loop_tries; |
1944 | ++loop_tries; |
1945 | if (loop_tries == 5) { |
1945 | if (loop_tries == 5) { |
1946 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
1946 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
1947 | break; |
1947 | break; |
1948 | } |
1948 | } |
1949 | memset(intel_dp->train_set, 0, 4); |
1949 | memset(intel_dp->train_set, 0, 4); |
1950 | voltage_tries = 0; |
1950 | voltage_tries = 0; |
1951 | continue; |
1951 | continue; |
1952 | } |
1952 | } |
1953 | 1953 | ||
1954 | /* Check to see if we've tried the same voltage 5 times */ |
1954 | /* Check to see if we've tried the same voltage 5 times */ |
1955 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1955 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1956 | ++voltage_tries; |
1956 | ++voltage_tries; |
1957 | if (voltage_tries == 5) { |
1957 | if (voltage_tries == 5) { |
1958 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); |
1958 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); |
1959 | break; |
1959 | break; |
1960 | } |
1960 | } |
1961 | } else |
1961 | } else |
1962 | voltage_tries = 0; |
1962 | voltage_tries = 0; |
1963 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1963 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1964 | 1964 | ||
1965 | /* Compute new intel_dp->train_set as requested by target */ |
1965 | /* Compute new intel_dp->train_set as requested by target */ |
1966 | intel_get_adjust_train(intel_dp, link_status); |
1966 | intel_get_adjust_train(intel_dp, link_status); |
1967 | } |
1967 | } |
1968 | 1968 | ||
1969 | intel_dp->DP = DP; |
1969 | intel_dp->DP = DP; |
1970 | } |
1970 | } |
1971 | 1971 | ||
1972 | void |
1972 | void |
1973 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
1973 | intel_dp_complete_link_train(struct intel_dp *intel_dp) |
1974 | { |
1974 | { |
1975 | bool channel_eq = false; |
1975 | bool channel_eq = false; |
1976 | int tries, cr_tries; |
1976 | int tries, cr_tries; |
1977 | uint32_t DP = intel_dp->DP; |
1977 | uint32_t DP = intel_dp->DP; |
1978 | 1978 | ||
1979 | /* channel equalization */ |
1979 | /* channel equalization */ |
1980 | tries = 0; |
1980 | tries = 0; |
1981 | cr_tries = 0; |
1981 | cr_tries = 0; |
1982 | channel_eq = false; |
1982 | channel_eq = false; |
1983 | for (;;) { |
1983 | for (;;) { |
1984 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1984 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
1985 | 1985 | ||
1986 | if (cr_tries > 5) { |
1986 | if (cr_tries > 5) { |
1987 | DRM_ERROR("failed to train DP, aborting\n"); |
1987 | DRM_ERROR("failed to train DP, aborting\n"); |
1988 | intel_dp_link_down(intel_dp); |
1988 | intel_dp_link_down(intel_dp); |
1989 | break; |
1989 | break; |
1990 | } |
1990 | } |
1991 | 1991 | ||
1992 | intel_dp_set_signal_levels(intel_dp, &DP); |
1992 | intel_dp_set_signal_levels(intel_dp, &DP); |
1993 | 1993 | ||
1994 | /* channel eq pattern */ |
1994 | /* channel eq pattern */ |
1995 | if (!intel_dp_set_link_train(intel_dp, DP, |
1995 | if (!intel_dp_set_link_train(intel_dp, DP, |
1996 | DP_TRAINING_PATTERN_2 | |
1996 | DP_TRAINING_PATTERN_2 | |
1997 | DP_LINK_SCRAMBLING_DISABLE)) |
1997 | DP_LINK_SCRAMBLING_DISABLE)) |
1998 | break; |
1998 | break; |
1999 | 1999 | ||
2000 | drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); |
2000 | drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); |
2001 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
2001 | if (!intel_dp_get_link_status(intel_dp, link_status)) |
2002 | break; |
2002 | break; |
2003 | 2003 | ||
2004 | /* Make sure clock is still ok */ |
2004 | /* Make sure clock is still ok */ |
2005 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
2005 | if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { |
2006 | intel_dp_start_link_train(intel_dp); |
2006 | intel_dp_start_link_train(intel_dp); |
2007 | cr_tries++; |
2007 | cr_tries++; |
2008 | continue; |
2008 | continue; |
2009 | } |
2009 | } |
2010 | 2010 | ||
2011 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2011 | if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2012 | channel_eq = true; |
2012 | channel_eq = true; |
2013 | break; |
2013 | break; |
2014 | } |
2014 | } |
2015 | 2015 | ||
2016 | /* Try 5 times, then try clock recovery if that fails */ |
2016 | /* Try 5 times, then try clock recovery if that fails */ |
2017 | if (tries > 5) { |
2017 | if (tries > 5) { |
2018 | intel_dp_link_down(intel_dp); |
2018 | intel_dp_link_down(intel_dp); |
2019 | intel_dp_start_link_train(intel_dp); |
2019 | intel_dp_start_link_train(intel_dp); |
2020 | tries = 0; |
2020 | tries = 0; |
2021 | cr_tries++; |
2021 | cr_tries++; |
2022 | continue; |
2022 | continue; |
2023 | } |
2023 | } |
2024 | 2024 | ||
2025 | /* Compute new intel_dp->train_set as requested by target */ |
2025 | /* Compute new intel_dp->train_set as requested by target */ |
2026 | intel_get_adjust_train(intel_dp, link_status); |
2026 | intel_get_adjust_train(intel_dp, link_status); |
2027 | ++tries; |
2027 | ++tries; |
2028 | } |
2028 | } |
2029 | 2029 | ||
2030 | if (channel_eq) |
2030 | if (channel_eq) |
2031 | DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); |
2031 | DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n"); |
2032 | 2032 | ||
2033 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); |
2033 | intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); |
2034 | } |
2034 | } |
2035 | 2035 | ||
2036 | static void |
2036 | static void |
2037 | intel_dp_link_down(struct intel_dp *intel_dp) |
2037 | intel_dp_link_down(struct intel_dp *intel_dp) |
2038 | { |
2038 | { |
2039 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2039 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2040 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2040 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2041 | struct drm_i915_private *dev_priv = dev->dev_private; |
2041 | struct drm_i915_private *dev_priv = dev->dev_private; |
2042 | struct intel_crtc *intel_crtc = |
2042 | struct intel_crtc *intel_crtc = |
2043 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2043 | to_intel_crtc(intel_dig_port->base.base.crtc); |
2044 | uint32_t DP = intel_dp->DP; |
2044 | uint32_t DP = intel_dp->DP; |
2045 | 2045 | ||
2046 | /* |
2046 | /* |
2047 | * DDI code has a strict mode set sequence and we should try to respect |
2047 | * DDI code has a strict mode set sequence and we should try to respect |
2048 | * it, otherwise we might hang the machine in many different ways. So we |
2048 | * it, otherwise we might hang the machine in many different ways. So we |
2049 | * really should be disabling the port only on a complete crtc_disable |
2049 | * really should be disabling the port only on a complete crtc_disable |
2050 | * sequence. This function is just called under two conditions on DDI |
2050 | * sequence. This function is just called under two conditions on DDI |
2051 | * code: |
2051 | * code: |
2052 | * - Link train failed while doing crtc_enable, and on this case we |
2052 | * - Link train failed while doing crtc_enable, and on this case we |
2053 | * really should respect the mode set sequence and wait for a |
2053 | * really should respect the mode set sequence and wait for a |
2054 | * crtc_disable. |
2054 | * crtc_disable. |
2055 | * - Someone turned the monitor off and intel_dp_check_link_status |
2055 | * - Someone turned the monitor off and intel_dp_check_link_status |
2056 | * called us. We don't need to disable the whole port on this case, so |
2056 | * called us. We don't need to disable the whole port on this case, so |
2057 | * when someone turns the monitor on again, |
2057 | * when someone turns the monitor on again, |
2058 | * intel_ddi_prepare_link_retrain will take care of redoing the link |
2058 | * intel_ddi_prepare_link_retrain will take care of redoing the link |
2059 | * train. |
2059 | * train. |
2060 | */ |
2060 | */ |
2061 | if (HAS_DDI(dev)) |
2061 | if (HAS_DDI(dev)) |
2062 | return; |
2062 | return; |
2063 | 2063 | ||
2064 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
2064 | if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
2065 | return; |
2065 | return; |
2066 | 2066 | ||
2067 | DRM_DEBUG_KMS("\n"); |
2067 | DRM_DEBUG_KMS("\n"); |
2068 | 2068 | ||
2069 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
2069 | if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { |
2070 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
2070 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
2071 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
2071 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
2072 | } else { |
2072 | } else { |
2073 | DP &= ~DP_LINK_TRAIN_MASK; |
2073 | DP &= ~DP_LINK_TRAIN_MASK; |
2074 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); |
2074 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); |
2075 | } |
2075 | } |
2076 | POSTING_READ(intel_dp->output_reg); |
2076 | POSTING_READ(intel_dp->output_reg); |
2077 | 2077 | ||
2078 | /* We don't really know why we're doing this */ |
2078 | /* We don't really know why we're doing this */ |
2079 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2079 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2080 | 2080 | ||
2081 | if (HAS_PCH_IBX(dev) && |
2081 | if (HAS_PCH_IBX(dev) && |
2082 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
2082 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
2083 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
2083 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
2084 | 2084 | ||
2085 | /* Hardware workaround: leaving our transcoder select |
2085 | /* Hardware workaround: leaving our transcoder select |
2086 | * set to transcoder B while it's off will prevent the |
2086 | * set to transcoder B while it's off will prevent the |
2087 | * corresponding HDMI output on transcoder A. |
2087 | * corresponding HDMI output on transcoder A. |
2088 | * |
2088 | * |
2089 | * Combine this with another hardware workaround: |
2089 | * Combine this with another hardware workaround: |
2090 | * transcoder select bit can only be cleared while the |
2090 | * transcoder select bit can only be cleared while the |
2091 | * port is enabled. |
2091 | * port is enabled. |
2092 | */ |
2092 | */ |
2093 | DP &= ~DP_PIPEB_SELECT; |
2093 | DP &= ~DP_PIPEB_SELECT; |
2094 | I915_WRITE(intel_dp->output_reg, DP); |
2094 | I915_WRITE(intel_dp->output_reg, DP); |
2095 | 2095 | ||
2096 | /* Changes to enable or select take place the vblank |
2096 | /* Changes to enable or select take place the vblank |
2097 | * after being written. |
2097 | * after being written. |
2098 | */ |
2098 | */ |
2099 | if (WARN_ON(crtc == NULL)) { |
2099 | if (WARN_ON(crtc == NULL)) { |
2100 | /* We should never try to disable a port without a crtc |
2100 | /* We should never try to disable a port without a crtc |
2101 | * attached. For paranoia keep the code around for a |
2101 | * attached. For paranoia keep the code around for a |
2102 | * bit. */ |
2102 | * bit. */ |
2103 | POSTING_READ(intel_dp->output_reg); |
2103 | POSTING_READ(intel_dp->output_reg); |
2104 | msleep(50); |
2104 | msleep(50); |
2105 | } else |
2105 | } else |
2106 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2106 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2107 | } |
2107 | } |
2108 | 2108 | ||
2109 | DP &= ~DP_AUDIO_OUTPUT_ENABLE; |
2109 | DP &= ~DP_AUDIO_OUTPUT_ENABLE; |
2110 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
2110 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
2111 | POSTING_READ(intel_dp->output_reg); |
2111 | POSTING_READ(intel_dp->output_reg); |
2112 | msleep(intel_dp->panel_power_down_delay); |
2112 | msleep(intel_dp->panel_power_down_delay); |
2113 | } |
2113 | } |
2114 | 2114 | ||
2115 | static bool |
2115 | static bool |
2116 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
2116 | intel_dp_get_dpcd(struct intel_dp *intel_dp) |
2117 | { |
2117 | { |
2118 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
2118 | char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
2119 | 2119 | ||
2120 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, |
2120 | if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, |
2121 | sizeof(intel_dp->dpcd)) == 0) |
2121 | sizeof(intel_dp->dpcd)) == 0) |
2122 | return false; /* aux transfer failed */ |
2122 | return false; /* aux transfer failed */ |
2123 | 2123 | ||
2124 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
2124 | hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
2125 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
2125 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
2126 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
2126 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
2127 | 2127 | ||
2128 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
2128 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
2129 | return false; /* DPCD not present */ |
2129 | return false; /* DPCD not present */ |
2130 | 2130 | ||
2131 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
2131 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
2132 | DP_DWN_STRM_PORT_PRESENT)) |
2132 | DP_DWN_STRM_PORT_PRESENT)) |
2133 | return true; /* native DP sink */ |
2133 | return true; /* native DP sink */ |
2134 | 2134 | ||
2135 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
2135 | if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) |
2136 | return true; /* no per-port downstream info */ |
2136 | return true; /* no per-port downstream info */ |
2137 | 2137 | ||
2138 | if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, |
2138 | if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, |
2139 | intel_dp->downstream_ports, |
2139 | intel_dp->downstream_ports, |
2140 | DP_MAX_DOWNSTREAM_PORTS) == 0) |
2140 | DP_MAX_DOWNSTREAM_PORTS) == 0) |
2141 | return false; /* downstream port status fetch failed */ |
2141 | return false; /* downstream port status fetch failed */ |
2142 | 2142 | ||
2143 | return true; |
2143 | return true; |
2144 | } |
2144 | } |
2145 | 2145 | ||
2146 | static void |
2146 | static void |
2147 | intel_dp_probe_oui(struct intel_dp *intel_dp) |
2147 | intel_dp_probe_oui(struct intel_dp *intel_dp) |
2148 | { |
2148 | { |
2149 | u8 buf[3]; |
2149 | u8 buf[3]; |
2150 | 2150 | ||
2151 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
2151 | if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) |
2152 | return; |
2152 | return; |
2153 | 2153 | ||
2154 | ironlake_edp_panel_vdd_on(intel_dp); |
2154 | ironlake_edp_panel_vdd_on(intel_dp); |
2155 | 2155 | ||
2156 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) |
2156 | if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) |
2157 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
2157 | DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", |
2158 | buf[0], buf[1], buf[2]); |
2158 | buf[0], buf[1], buf[2]); |
2159 | 2159 | ||
2160 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) |
2160 | if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) |
2161 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
2161 | DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", |
2162 | buf[0], buf[1], buf[2]); |
2162 | buf[0], buf[1], buf[2]); |
2163 | 2163 | ||
2164 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2164 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2165 | } |
2165 | } |
2166 | 2166 | ||
2167 | static bool |
2167 | static bool |
2168 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
2168 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
2169 | { |
2169 | { |
2170 | int ret; |
2170 | int ret; |
2171 | 2171 | ||
2172 | ret = intel_dp_aux_native_read_retry(intel_dp, |
2172 | ret = intel_dp_aux_native_read_retry(intel_dp, |
2173 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2173 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2174 | sink_irq_vector, 1); |
2174 | sink_irq_vector, 1); |
2175 | if (!ret) |
2175 | if (!ret) |
2176 | return false; |
2176 | return false; |
2177 | 2177 | ||
2178 | return true; |
2178 | return true; |
2179 | } |
2179 | } |
2180 | 2180 | ||
2181 | static void |
2181 | static void |
2182 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
2182 | intel_dp_handle_test_request(struct intel_dp *intel_dp) |
2183 | { |
2183 | { |
2184 | /* NAK by default */ |
2184 | /* NAK by default */ |
2185 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); |
2185 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); |
2186 | } |
2186 | } |
2187 | 2187 | ||
2188 | /* |
2188 | /* |
2189 | * According to DP spec |
2189 | * According to DP spec |
2190 | * 5.1.2: |
2190 | * 5.1.2: |
2191 | * 1. Read DPCD |
2191 | * 1. Read DPCD |
2192 | * 2. Configure link according to Receiver Capabilities |
2192 | * 2. Configure link according to Receiver Capabilities |
2193 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 |
2193 | * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 |
2194 | * 4. Check link status on receipt of hot-plug interrupt |
2194 | * 4. Check link status on receipt of hot-plug interrupt |
2195 | */ |
2195 | */ |
2196 | 2196 | ||
2197 | void |
2197 | void |
2198 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
2198 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
2199 | { |
2199 | { |
2200 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
2200 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
2201 | u8 sink_irq_vector; |
2201 | u8 sink_irq_vector; |
2202 | u8 link_status[DP_LINK_STATUS_SIZE]; |
2202 | u8 link_status[DP_LINK_STATUS_SIZE]; |
2203 | 2203 | ||
2204 | if (!intel_encoder->connectors_active) |
2204 | if (!intel_encoder->connectors_active) |
2205 | return; |
2205 | return; |
2206 | 2206 | ||
2207 | if (WARN_ON(!intel_encoder->base.crtc)) |
2207 | if (WARN_ON(!intel_encoder->base.crtc)) |
2208 | return; |
2208 | return; |
2209 | 2209 | ||
2210 | /* Try to read receiver status if the link appears to be up */ |
2210 | /* Try to read receiver status if the link appears to be up */ |
2211 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
2211 | if (!intel_dp_get_link_status(intel_dp, link_status)) { |
2212 | intel_dp_link_down(intel_dp); |
2212 | intel_dp_link_down(intel_dp); |
2213 | return; |
2213 | return; |
2214 | } |
2214 | } |
2215 | 2215 | ||
2216 | /* Now read the DPCD to see if it's actually running */ |
2216 | /* Now read the DPCD to see if it's actually running */ |
2217 | if (!intel_dp_get_dpcd(intel_dp)) { |
2217 | if (!intel_dp_get_dpcd(intel_dp)) { |
2218 | intel_dp_link_down(intel_dp); |
2218 | intel_dp_link_down(intel_dp); |
2219 | return; |
2219 | return; |
2220 | } |
2220 | } |
2221 | 2221 | ||
2222 | /* Try to read the source of the interrupt */ |
2222 | /* Try to read the source of the interrupt */ |
2223 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
2223 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
2224 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { |
2224 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { |
2225 | /* Clear interrupt source */ |
2225 | /* Clear interrupt source */ |
2226 | intel_dp_aux_native_write_1(intel_dp, |
2226 | intel_dp_aux_native_write_1(intel_dp, |
2227 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2227 | DP_DEVICE_SERVICE_IRQ_VECTOR, |
2228 | sink_irq_vector); |
2228 | sink_irq_vector); |
2229 | 2229 | ||
2230 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) |
2230 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) |
2231 | intel_dp_handle_test_request(intel_dp); |
2231 | intel_dp_handle_test_request(intel_dp); |
2232 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) |
2232 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) |
2233 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
2233 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); |
2234 | } |
2234 | } |
2235 | 2235 | ||
2236 | if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2236 | if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { |
2237 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
2237 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
2238 | drm_get_encoder_name(&intel_encoder->base)); |
2238 | drm_get_encoder_name(&intel_encoder->base)); |
2239 | intel_dp_start_link_train(intel_dp); |
2239 | intel_dp_start_link_train(intel_dp); |
2240 | intel_dp_complete_link_train(intel_dp); |
2240 | intel_dp_complete_link_train(intel_dp); |
2241 | } |
2241 | } |
2242 | } |
2242 | } |
2243 | 2243 | ||
2244 | /* XXX this is probably wrong for multiple downstream ports */ |
2244 | /* XXX this is probably wrong for multiple downstream ports */ |
2245 | static enum drm_connector_status |
2245 | static enum drm_connector_status |
2246 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
2246 | intel_dp_detect_dpcd(struct intel_dp *intel_dp) |
2247 | { |
2247 | { |
2248 | uint8_t *dpcd = intel_dp->dpcd; |
2248 | uint8_t *dpcd = intel_dp->dpcd; |
2249 | bool hpd; |
2249 | bool hpd; |
2250 | uint8_t type; |
2250 | uint8_t type; |
2251 | 2251 | ||
2252 | if (!intel_dp_get_dpcd(intel_dp)) |
2252 | if (!intel_dp_get_dpcd(intel_dp)) |
2253 | return connector_status_disconnected; |
2253 | return connector_status_disconnected; |
2254 | 2254 | ||
2255 | /* if there's no downstream port, we're done */ |
2255 | /* if there's no downstream port, we're done */ |
2256 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) |
2256 | if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) |
2257 | return connector_status_connected; |
2257 | return connector_status_connected; |
2258 | 2258 | ||
2259 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
2259 | /* If we're HPD-aware, SINK_COUNT changes dynamically */ |
2260 | hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); |
2260 | hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD); |
2261 | if (hpd) { |
2261 | if (hpd) { |
2262 | uint8_t reg; |
2262 | uint8_t reg; |
2263 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, |
2263 | if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, |
2264 | ®, 1)) |
2264 | ®, 1)) |
2265 | return connector_status_unknown; |
2265 | return connector_status_unknown; |
2266 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected |
2266 | return DP_GET_SINK_COUNT(reg) ? connector_status_connected |
2267 | : connector_status_disconnected; |
2267 | : connector_status_disconnected; |
2268 | } |
2268 | } |
2269 | 2269 | ||
2270 | /* If no HPD, poke DDC gently */ |
2270 | /* If no HPD, poke DDC gently */ |
2271 | if (drm_probe_ddc(&intel_dp->adapter)) |
2271 | if (drm_probe_ddc(&intel_dp->adapter)) |
2272 | return connector_status_connected; |
2272 | return connector_status_connected; |
2273 | 2273 | ||
2274 | /* Well we tried, say unknown for unreliable port types */ |
2274 | /* Well we tried, say unknown for unreliable port types */ |
2275 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; |
2275 | type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; |
2276 | if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) |
2276 | if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID) |
2277 | return connector_status_unknown; |
2277 | return connector_status_unknown; |
2278 | 2278 | ||
2279 | /* Anything else is out of spec, warn and ignore */ |
2279 | /* Anything else is out of spec, warn and ignore */ |
2280 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); |
2280 | DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); |
2281 | return connector_status_disconnected; |
2281 | return connector_status_disconnected; |
2282 | } |
2282 | } |
2283 | 2283 | ||
2284 | static enum drm_connector_status |
2284 | static enum drm_connector_status |
2285 | ironlake_dp_detect(struct intel_dp *intel_dp) |
2285 | ironlake_dp_detect(struct intel_dp *intel_dp) |
2286 | { |
2286 | { |
2287 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2287 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2288 | struct drm_i915_private *dev_priv = dev->dev_private; |
2288 | struct drm_i915_private *dev_priv = dev->dev_private; |
2289 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2289 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2290 | enum drm_connector_status status; |
2290 | enum drm_connector_status status; |
2291 | 2291 | ||
2292 | /* Can't disconnect eDP, but you can close the lid... */ |
2292 | /* Can't disconnect eDP, but you can close the lid... */ |
2293 | if (is_edp(intel_dp)) { |
2293 | if (is_edp(intel_dp)) { |
2294 | status = intel_panel_detect(dev); |
2294 | status = intel_panel_detect(dev); |
2295 | if (status == connector_status_unknown) |
2295 | if (status == connector_status_unknown) |
2296 | status = connector_status_connected; |
2296 | status = connector_status_connected; |
2297 | return status; |
2297 | return status; |
2298 | } |
2298 | } |
2299 | 2299 | ||
2300 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) |
2300 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) |
2301 | return connector_status_disconnected; |
2301 | return connector_status_disconnected; |
2302 | 2302 | ||
2303 | return intel_dp_detect_dpcd(intel_dp); |
2303 | return intel_dp_detect_dpcd(intel_dp); |
2304 | } |
2304 | } |
2305 | 2305 | ||
2306 | static enum drm_connector_status |
2306 | static enum drm_connector_status |
2307 | g4x_dp_detect(struct intel_dp *intel_dp) |
2307 | g4x_dp_detect(struct intel_dp *intel_dp) |
2308 | { |
2308 | { |
2309 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2309 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2310 | struct drm_i915_private *dev_priv = dev->dev_private; |
2310 | struct drm_i915_private *dev_priv = dev->dev_private; |
2311 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2311 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2312 | uint32_t bit; |
2312 | uint32_t bit; |
2313 | 2313 | ||
2314 | switch (intel_dig_port->port) { |
2314 | switch (intel_dig_port->port) { |
2315 | case PORT_B: |
2315 | case PORT_B: |
2316 | bit = PORTB_HOTPLUG_LIVE_STATUS; |
2316 | bit = PORTB_HOTPLUG_LIVE_STATUS; |
2317 | break; |
2317 | break; |
2318 | case PORT_C: |
2318 | case PORT_C: |
2319 | bit = PORTC_HOTPLUG_LIVE_STATUS; |
2319 | bit = PORTC_HOTPLUG_LIVE_STATUS; |
2320 | break; |
2320 | break; |
2321 | case PORT_D: |
2321 | case PORT_D: |
2322 | bit = PORTD_HOTPLUG_LIVE_STATUS; |
2322 | bit = PORTD_HOTPLUG_LIVE_STATUS; |
2323 | break; |
2323 | break; |
2324 | default: |
2324 | default: |
2325 | return connector_status_unknown; |
2325 | return connector_status_unknown; |
2326 | } |
2326 | } |
2327 | 2327 | ||
2328 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) |
2328 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) |
2329 | return connector_status_disconnected; |
2329 | return connector_status_disconnected; |
2330 | 2330 | ||
2331 | return intel_dp_detect_dpcd(intel_dp); |
2331 | return intel_dp_detect_dpcd(intel_dp); |
2332 | } |
2332 | } |
2333 | 2333 | ||
2334 | static struct edid * |
2334 | static struct edid * |
2335 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
2335 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
2336 | { |
2336 | { |
2337 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2337 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2338 | 2338 | ||
2339 | /* use cached edid if we have one */ |
2339 | /* use cached edid if we have one */ |
2340 | if (intel_connector->edid) { |
2340 | if (intel_connector->edid) { |
2341 | struct edid *edid; |
2341 | struct edid *edid; |
2342 | int size; |
2342 | int size; |
2343 | 2343 | ||
2344 | /* invalid edid */ |
2344 | /* invalid edid */ |
2345 | if (IS_ERR(intel_connector->edid)) |
2345 | if (IS_ERR(intel_connector->edid)) |
2346 | return NULL; |
2346 | return NULL; |
2347 | 2347 | ||
2348 | size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; |
2348 | size = (intel_connector->edid->extensions + 1) * EDID_LENGTH; |
2349 | edid = kmalloc(size, GFP_KERNEL); |
2349 | edid = kmalloc(size, GFP_KERNEL); |
2350 | if (!edid) |
2350 | if (!edid) |
2351 | return NULL; |
2351 | return NULL; |
2352 | 2352 | ||
2353 | memcpy(edid, intel_connector->edid, size); |
2353 | memcpy(edid, intel_connector->edid, size); |
2354 | return edid; |
2354 | return edid; |
2355 | } |
2355 | } |
2356 | 2356 | ||
2357 | return drm_get_edid(connector, adapter); |
2357 | return drm_get_edid(connector, adapter); |
2358 | } |
2358 | } |
2359 | 2359 | ||
2360 | static int |
2360 | static int |
2361 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) |
2361 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) |
2362 | { |
2362 | { |
2363 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2363 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2364 | 2364 | ||
2365 | /* use cached edid if we have one */ |
2365 | /* use cached edid if we have one */ |
2366 | if (intel_connector->edid) { |
2366 | if (intel_connector->edid) { |
2367 | /* invalid edid */ |
2367 | /* invalid edid */ |
2368 | if (IS_ERR(intel_connector->edid)) |
2368 | if (IS_ERR(intel_connector->edid)) |
2369 | return 0; |
2369 | return 0; |
2370 | 2370 | ||
2371 | return intel_connector_update_modes(connector, |
2371 | return intel_connector_update_modes(connector, |
2372 | intel_connector->edid); |
2372 | intel_connector->edid); |
2373 | } |
2373 | } |
2374 | 2374 | ||
2375 | return intel_ddc_get_modes(connector, adapter); |
2375 | return intel_ddc_get_modes(connector, adapter); |
2376 | } |
2376 | } |
2377 | 2377 | ||
2378 | static enum drm_connector_status |
2378 | static enum drm_connector_status |
2379 | intel_dp_detect(struct drm_connector *connector, bool force) |
2379 | intel_dp_detect(struct drm_connector *connector, bool force) |
2380 | { |
2380 | { |
2381 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2381 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2382 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2382 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2383 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
2383 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
2384 | struct drm_device *dev = connector->dev; |
2384 | struct drm_device *dev = connector->dev; |
2385 | enum drm_connector_status status; |
2385 | enum drm_connector_status status; |
2386 | struct edid *edid = NULL; |
2386 | struct edid *edid = NULL; |
2387 | 2387 | ||
2388 | intel_dp->has_audio = false; |
2388 | intel_dp->has_audio = false; |
2389 | 2389 | ||
2390 | if (HAS_PCH_SPLIT(dev)) |
2390 | if (HAS_PCH_SPLIT(dev)) |
2391 | status = ironlake_dp_detect(intel_dp); |
2391 | status = ironlake_dp_detect(intel_dp); |
2392 | else |
2392 | else |
2393 | status = g4x_dp_detect(intel_dp); |
2393 | status = g4x_dp_detect(intel_dp); |
2394 | 2394 | ||
2395 | if (status != connector_status_connected) |
2395 | if (status != connector_status_connected) |
2396 | return status; |
2396 | return status; |
2397 | 2397 | ||
2398 | intel_dp_probe_oui(intel_dp); |
2398 | intel_dp_probe_oui(intel_dp); |
2399 | 2399 | ||
2400 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
2400 | if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { |
2401 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
2401 | intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); |
2402 | } else { |
2402 | } else { |
2403 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2403 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2404 | if (edid) { |
2404 | if (edid) { |
2405 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
2405 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
2406 | kfree(edid); |
2406 | kfree(edid); |
2407 | } |
2407 | } |
2408 | } |
2408 | } |
2409 | 2409 | ||
2410 | if (intel_encoder->type != INTEL_OUTPUT_EDP) |
2410 | if (intel_encoder->type != INTEL_OUTPUT_EDP) |
2411 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
2411 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
2412 | return connector_status_connected; |
2412 | return connector_status_connected; |
2413 | } |
2413 | } |
2414 | 2414 | ||
2415 | static int intel_dp_get_modes(struct drm_connector *connector) |
2415 | static int intel_dp_get_modes(struct drm_connector *connector) |
2416 | { |
2416 | { |
2417 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2417 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2418 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2418 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2419 | struct drm_device *dev = connector->dev; |
2419 | struct drm_device *dev = connector->dev; |
2420 | int ret; |
2420 | int ret; |
2421 | 2421 | ||
2422 | /* We should parse the EDID data and find out if it has an audio sink |
2422 | /* We should parse the EDID data and find out if it has an audio sink |
2423 | */ |
2423 | */ |
2424 | 2424 | ||
2425 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
2425 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
2426 | if (ret) |
2426 | if (ret) |
2427 | return ret; |
2427 | return ret; |
2428 | 2428 | ||
2429 | /* if eDP has no EDID, fall back to fixed mode */ |
2429 | /* if eDP has no EDID, fall back to fixed mode */ |
2430 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
2430 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
2431 | struct drm_display_mode *mode; |
2431 | struct drm_display_mode *mode; |
2432 | mode = drm_mode_duplicate(dev, |
2432 | mode = drm_mode_duplicate(dev, |
2433 | intel_connector->panel.fixed_mode); |
2433 | intel_connector->panel.fixed_mode); |
2434 | if (mode) { |
2434 | if (mode) { |
2435 | drm_mode_probed_add(connector, mode); |
2435 | drm_mode_probed_add(connector, mode); |
2436 | return 1; |
2436 | return 1; |
2437 | } |
2437 | } |
2438 | } |
2438 | } |
2439 | return 0; |
2439 | return 0; |
2440 | } |
2440 | } |
2441 | 2441 | ||
2442 | static bool |
2442 | static bool |
2443 | intel_dp_detect_audio(struct drm_connector *connector) |
2443 | intel_dp_detect_audio(struct drm_connector *connector) |
2444 | { |
2444 | { |
2445 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2445 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2446 | struct edid *edid; |
2446 | struct edid *edid; |
2447 | bool has_audio = false; |
2447 | bool has_audio = false; |
2448 | 2448 | ||
2449 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2449 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
2450 | if (edid) { |
2450 | if (edid) { |
2451 | has_audio = drm_detect_monitor_audio(edid); |
2451 | has_audio = drm_detect_monitor_audio(edid); |
2452 | kfree(edid); |
2452 | kfree(edid); |
2453 | } |
2453 | } |
2454 | 2454 | ||
2455 | return has_audio; |
2455 | return has_audio; |
2456 | } |
2456 | } |
2457 | 2457 | ||
2458 | static int |
2458 | static int |
2459 | intel_dp_set_property(struct drm_connector *connector, |
2459 | intel_dp_set_property(struct drm_connector *connector, |
2460 | struct drm_property *property, |
2460 | struct drm_property *property, |
2461 | uint64_t val) |
2461 | uint64_t val) |
2462 | { |
2462 | { |
2463 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
2463 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
2464 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2464 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2465 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
2465 | struct intel_encoder *intel_encoder = intel_attached_encoder(connector); |
2466 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2466 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2467 | int ret; |
2467 | int ret; |
2468 | 2468 | ||
2469 | ret = drm_object_property_set_value(&connector->base, property, val); |
2469 | ret = drm_object_property_set_value(&connector->base, property, val); |
2470 | if (ret) |
2470 | if (ret) |
2471 | return ret; |
2471 | return ret; |
2472 | 2472 | ||
2473 | if (property == dev_priv->force_audio_property) { |
2473 | if (property == dev_priv->force_audio_property) { |
2474 | int i = val; |
2474 | int i = val; |
2475 | bool has_audio; |
2475 | bool has_audio; |
2476 | 2476 | ||
2477 | if (i == intel_dp->force_audio) |
2477 | if (i == intel_dp->force_audio) |
2478 | return 0; |
2478 | return 0; |
2479 | 2479 | ||
2480 | intel_dp->force_audio = i; |
2480 | intel_dp->force_audio = i; |
2481 | 2481 | ||
2482 | if (i == HDMI_AUDIO_AUTO) |
2482 | if (i == HDMI_AUDIO_AUTO) |
2483 | has_audio = intel_dp_detect_audio(connector); |
2483 | has_audio = intel_dp_detect_audio(connector); |
2484 | else |
2484 | else |
2485 | has_audio = (i == HDMI_AUDIO_ON); |
2485 | has_audio = (i == HDMI_AUDIO_ON); |
2486 | 2486 | ||
2487 | if (has_audio == intel_dp->has_audio) |
2487 | if (has_audio == intel_dp->has_audio) |
2488 | return 0; |
2488 | return 0; |
2489 | 2489 | ||
2490 | intel_dp->has_audio = has_audio; |
2490 | intel_dp->has_audio = has_audio; |
2491 | goto done; |
2491 | goto done; |
2492 | } |
2492 | } |
2493 | 2493 | ||
2494 | if (property == dev_priv->broadcast_rgb_property) { |
2494 | if (property == dev_priv->broadcast_rgb_property) { |
2495 | switch (val) { |
2495 | switch (val) { |
2496 | case INTEL_BROADCAST_RGB_AUTO: |
2496 | case INTEL_BROADCAST_RGB_AUTO: |
2497 | intel_dp->color_range_auto = true; |
2497 | intel_dp->color_range_auto = true; |
2498 | break; |
2498 | break; |
2499 | case INTEL_BROADCAST_RGB_FULL: |
2499 | case INTEL_BROADCAST_RGB_FULL: |
2500 | intel_dp->color_range_auto = false; |
2500 | intel_dp->color_range_auto = false; |
2501 | intel_dp->color_range = 0; |
2501 | intel_dp->color_range = 0; |
2502 | break; |
2502 | break; |
2503 | case INTEL_BROADCAST_RGB_LIMITED: |
2503 | case INTEL_BROADCAST_RGB_LIMITED: |
2504 | intel_dp->color_range_auto = false; |
2504 | intel_dp->color_range_auto = false; |
2505 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
2505 | intel_dp->color_range = DP_COLOR_RANGE_16_235; |
2506 | break; |
2506 | break; |
2507 | default: |
2507 | default: |
2508 | return -EINVAL; |
2508 | return -EINVAL; |
2509 | } |
2509 | } |
2510 | goto done; |
2510 | goto done; |
2511 | } |
2511 | } |
2512 | 2512 | ||
2513 | if (is_edp(intel_dp) && |
2513 | if (is_edp(intel_dp) && |
2514 | property == connector->dev->mode_config.scaling_mode_property) { |
2514 | property == connector->dev->mode_config.scaling_mode_property) { |
2515 | if (val == DRM_MODE_SCALE_NONE) { |
2515 | if (val == DRM_MODE_SCALE_NONE) { |
2516 | DRM_DEBUG_KMS("no scaling not supported\n"); |
2516 | DRM_DEBUG_KMS("no scaling not supported\n"); |
2517 | return -EINVAL; |
2517 | return -EINVAL; |
2518 | } |
2518 | } |
2519 | 2519 | ||
2520 | if (intel_connector->panel.fitting_mode == val) { |
2520 | if (intel_connector->panel.fitting_mode == val) { |
2521 | /* the eDP scaling property is not changed */ |
2521 | /* the eDP scaling property is not changed */ |
2522 | return 0; |
2522 | return 0; |
2523 | } |
2523 | } |
2524 | intel_connector->panel.fitting_mode = val; |
2524 | intel_connector->panel.fitting_mode = val; |
2525 | 2525 | ||
2526 | goto done; |
2526 | goto done; |
2527 | } |
2527 | } |
2528 | 2528 | ||
2529 | return -EINVAL; |
2529 | return -EINVAL; |
2530 | 2530 | ||
2531 | done: |
2531 | done: |
2532 | if (intel_encoder->base.crtc) |
2532 | if (intel_encoder->base.crtc) |
2533 | intel_crtc_restore_mode(intel_encoder->base.crtc); |
2533 | intel_crtc_restore_mode(intel_encoder->base.crtc); |
2534 | 2534 | ||
2535 | return 0; |
2535 | return 0; |
2536 | } |
2536 | } |
2537 | 2537 | ||
2538 | static void |
2538 | static void |
2539 | intel_dp_destroy(struct drm_connector *connector) |
2539 | intel_dp_destroy(struct drm_connector *connector) |
2540 | { |
2540 | { |
2541 | struct drm_device *dev = connector->dev; |
2541 | struct drm_device *dev = connector->dev; |
2542 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2542 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2543 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2543 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2544 | 2544 | ||
2545 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
2545 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
2546 | kfree(intel_connector->edid); |
2546 | kfree(intel_connector->edid); |
2547 | 2547 | ||
2548 | if (is_edp(intel_dp)) { |
2548 | if (is_edp(intel_dp)) { |
2549 | intel_panel_destroy_backlight(dev); |
2549 | intel_panel_destroy_backlight(dev); |
2550 | intel_panel_fini(&intel_connector->panel); |
2550 | intel_panel_fini(&intel_connector->panel); |
2551 | } |
2551 | } |
2552 | 2552 | ||
2553 | drm_sysfs_connector_remove(connector); |
2553 | drm_sysfs_connector_remove(connector); |
2554 | drm_connector_cleanup(connector); |
2554 | drm_connector_cleanup(connector); |
2555 | kfree(connector); |
2555 | kfree(connector); |
2556 | } |
2556 | } |
2557 | 2557 | ||
2558 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
2558 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
2559 | { |
2559 | { |
2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2562 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2562 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
2563 | 2563 | ||
2564 | i2c_del_adapter(&intel_dp->adapter); |
2564 | i2c_del_adapter(&intel_dp->adapter); |
2565 | drm_encoder_cleanup(encoder); |
2565 | drm_encoder_cleanup(encoder); |
2566 | if (is_edp(intel_dp)) { |
2566 | if (is_edp(intel_dp)) { |
2567 | // cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2567 | // cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2568 | mutex_lock(&dev->mode_config.mutex); |
2568 | mutex_lock(&dev->mode_config.mutex); |
2569 | ironlake_panel_vdd_off_sync(intel_dp); |
2569 | ironlake_panel_vdd_off_sync(intel_dp); |
2570 | mutex_unlock(&dev->mode_config.mutex); |
2570 | mutex_unlock(&dev->mode_config.mutex); |
2571 | } |
2571 | } |
2572 | kfree(intel_dig_port); |
2572 | kfree(intel_dig_port); |
2573 | } |
2573 | } |
2574 | 2574 | ||
2575 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
2575 | static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
2576 | .mode_fixup = intel_dp_mode_fixup, |
2576 | .mode_fixup = intel_dp_mode_fixup, |
2577 | .mode_set = intel_dp_mode_set, |
2577 | .mode_set = intel_dp_mode_set, |
2578 | }; |
2578 | }; |
2579 | 2579 | ||
2580 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
2580 | static const struct drm_connector_funcs intel_dp_connector_funcs = { |
2581 | .dpms = intel_connector_dpms, |
2581 | .dpms = intel_connector_dpms, |
2582 | .detect = intel_dp_detect, |
2582 | .detect = intel_dp_detect, |
2583 | .fill_modes = drm_helper_probe_single_connector_modes, |
2583 | .fill_modes = drm_helper_probe_single_connector_modes, |
2584 | .set_property = intel_dp_set_property, |
2584 | .set_property = intel_dp_set_property, |
2585 | .destroy = intel_dp_destroy, |
2585 | .destroy = intel_dp_destroy, |
2586 | }; |
2586 | }; |
2587 | 2587 | ||
2588 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
2588 | static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { |
2589 | .get_modes = intel_dp_get_modes, |
2589 | .get_modes = intel_dp_get_modes, |
2590 | .mode_valid = intel_dp_mode_valid, |
2590 | .mode_valid = intel_dp_mode_valid, |
2591 | .best_encoder = intel_best_encoder, |
2591 | .best_encoder = intel_best_encoder, |
2592 | }; |
2592 | }; |
2593 | 2593 | ||
2594 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
2594 | static const struct drm_encoder_funcs intel_dp_enc_funcs = { |
2595 | .destroy = intel_dp_encoder_destroy, |
2595 | .destroy = intel_dp_encoder_destroy, |
2596 | }; |
2596 | }; |
2597 | 2597 | ||
2598 | static void |
2598 | static void |
2599 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
2599 | intel_dp_hot_plug(struct intel_encoder *intel_encoder) |
2600 | { |
2600 | { |
2601 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2601 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2602 | 2602 | ||
2603 | intel_dp_check_link_status(intel_dp); |
2603 | intel_dp_check_link_status(intel_dp); |
2604 | } |
2604 | } |
2605 | 2605 | ||
2606 | /* Return which DP Port should be selected for Transcoder DP control */ |
2606 | /* Return which DP Port should be selected for Transcoder DP control */ |
2607 | int |
2607 | int |
2608 | intel_trans_dp_port_sel(struct drm_crtc *crtc) |
2608 | intel_trans_dp_port_sel(struct drm_crtc *crtc) |
2609 | { |
2609 | { |
2610 | struct drm_device *dev = crtc->dev; |
2610 | struct drm_device *dev = crtc->dev; |
2611 | struct intel_encoder *intel_encoder; |
2611 | struct intel_encoder *intel_encoder; |
2612 | struct intel_dp *intel_dp; |
2612 | struct intel_dp *intel_dp; |
2613 | 2613 | ||
2614 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2614 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2615 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2615 | intel_dp = enc_to_intel_dp(&intel_encoder->base); |
2616 | 2616 | ||
2617 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
2617 | if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || |
2618 | intel_encoder->type == INTEL_OUTPUT_EDP) |
2618 | intel_encoder->type == INTEL_OUTPUT_EDP) |
2619 | return intel_dp->output_reg; |
2619 | return intel_dp->output_reg; |
2620 | } |
2620 | } |
2621 | 2621 | ||
2622 | return -1; |
2622 | return -1; |
2623 | } |
2623 | } |
2624 | 2624 | ||
2625 | /* check the VBT to see whether the eDP is on DP-D port */ |
2625 | /* check the VBT to see whether the eDP is on DP-D port */ |
2626 | bool intel_dpd_is_edp(struct drm_device *dev) |
2626 | bool intel_dpd_is_edp(struct drm_device *dev) |
2627 | { |
2627 | { |
2628 | struct drm_i915_private *dev_priv = dev->dev_private; |
2628 | struct drm_i915_private *dev_priv = dev->dev_private; |
2629 | struct child_device_config *p_child; |
2629 | struct child_device_config *p_child; |
2630 | int i; |
2630 | int i; |
2631 | 2631 | ||
2632 | if (!dev_priv->child_dev_num) |
2632 | if (!dev_priv->child_dev_num) |
2633 | return false; |
2633 | return false; |
2634 | 2634 | ||
2635 | for (i = 0; i < dev_priv->child_dev_num; i++) { |
2635 | for (i = 0; i < dev_priv->child_dev_num; i++) { |
2636 | p_child = dev_priv->child_dev + i; |
2636 | p_child = dev_priv->child_dev + i; |
2637 | 2637 | ||
2638 | if (p_child->dvo_port == PORT_IDPD && |
2638 | if (p_child->dvo_port == PORT_IDPD && |
2639 | p_child->device_type == DEVICE_TYPE_eDP) |
2639 | p_child->device_type == DEVICE_TYPE_eDP) |
2640 | return true; |
2640 | return true; |
2641 | } |
2641 | } |
2642 | return false; |
2642 | return false; |
2643 | } |
2643 | } |
2644 | 2644 | ||
2645 | static void |
2645 | static void |
2646 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
2646 | intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) |
2647 | { |
2647 | { |
2648 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2648 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2649 | 2649 | ||
2650 | intel_attach_force_audio_property(connector); |
2650 | intel_attach_force_audio_property(connector); |
2651 | intel_attach_broadcast_rgb_property(connector); |
2651 | intel_attach_broadcast_rgb_property(connector); |
2652 | intel_dp->color_range_auto = true; |
2652 | intel_dp->color_range_auto = true; |
2653 | 2653 | ||
2654 | if (is_edp(intel_dp)) { |
2654 | if (is_edp(intel_dp)) { |
2655 | drm_mode_create_scaling_mode_property(connector->dev); |
2655 | drm_mode_create_scaling_mode_property(connector->dev); |
2656 | drm_object_attach_property( |
2656 | drm_object_attach_property( |
2657 | &connector->base, |
2657 | &connector->base, |
2658 | connector->dev->mode_config.scaling_mode_property, |
2658 | connector->dev->mode_config.scaling_mode_property, |
2659 | DRM_MODE_SCALE_ASPECT); |
2659 | DRM_MODE_SCALE_ASPECT); |
2660 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
2660 | intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; |
2661 | } |
2661 | } |
2662 | } |
2662 | } |
2663 | 2663 | ||
2664 | static void |
2664 | static void |
2665 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
2665 | intel_dp_init_panel_power_sequencer(struct drm_device *dev, |
2666 | struct intel_dp *intel_dp, |
2666 | struct intel_dp *intel_dp, |
2667 | struct edp_power_seq *out) |
2667 | struct edp_power_seq *out) |
2668 | { |
2668 | { |
2669 | struct drm_i915_private *dev_priv = dev->dev_private; |
2669 | struct drm_i915_private *dev_priv = dev->dev_private; |
2670 | struct edp_power_seq cur, vbt, spec, final; |
2670 | struct edp_power_seq cur, vbt, spec, final; |
2671 | u32 pp_on, pp_off, pp_div, pp; |
2671 | u32 pp_on, pp_off, pp_div, pp; |
2672 | 2672 | ||
2673 | /* Workaround: Need to write PP_CONTROL with the unlock key as |
2673 | /* Workaround: Need to write PP_CONTROL with the unlock key as |
2674 | * the very first thing. */ |
2674 | * the very first thing. */ |
2675 | pp = ironlake_get_pp_control(dev_priv); |
2675 | pp = ironlake_get_pp_control(dev_priv); |
2676 | I915_WRITE(PCH_PP_CONTROL, pp); |
2676 | I915_WRITE(PCH_PP_CONTROL, pp); |
2677 | 2677 | ||
2678 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
2678 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
2679 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); |
2679 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); |
2680 | pp_div = I915_READ(PCH_PP_DIVISOR); |
2680 | pp_div = I915_READ(PCH_PP_DIVISOR); |
2681 | 2681 | ||
2682 | /* Pull timing values out of registers */ |
2682 | /* Pull timing values out of registers */ |
2683 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
2683 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
2684 | PANEL_POWER_UP_DELAY_SHIFT; |
2684 | PANEL_POWER_UP_DELAY_SHIFT; |
2685 | 2685 | ||
2686 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
2686 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
2687 | PANEL_LIGHT_ON_DELAY_SHIFT; |
2687 | PANEL_LIGHT_ON_DELAY_SHIFT; |
2688 | 2688 | ||
2689 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
2689 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> |
2690 | PANEL_LIGHT_OFF_DELAY_SHIFT; |
2690 | PANEL_LIGHT_OFF_DELAY_SHIFT; |
2691 | 2691 | ||
2692 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
2692 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> |
2693 | PANEL_POWER_DOWN_DELAY_SHIFT; |
2693 | PANEL_POWER_DOWN_DELAY_SHIFT; |
2694 | 2694 | ||
2695 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> |
2695 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> |
2696 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
2696 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; |
2697 | 2697 | ||
2698 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
2698 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
2699 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
2699 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); |
2700 | 2700 | ||
2701 | vbt = dev_priv->edp.pps; |
2701 | vbt = dev_priv->edp.pps; |
2702 | 2702 | ||
2703 | /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of |
2703 | /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of |
2704 | * our hw here, which are all in 100usec. */ |
2704 | * our hw here, which are all in 100usec. */ |
2705 | spec.t1_t3 = 210 * 10; |
2705 | spec.t1_t3 = 210 * 10; |
2706 | spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ |
2706 | spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ |
2707 | spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ |
2707 | spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ |
2708 | spec.t10 = 500 * 10; |
2708 | spec.t10 = 500 * 10; |
2709 | /* This one is special and actually in units of 100ms, but zero |
2709 | /* This one is special and actually in units of 100ms, but zero |
2710 | * based in the hw (so we need to add 100 ms). But the sw vbt |
2710 | * based in the hw (so we need to add 100 ms). But the sw vbt |
2711 | * table multiplies it with 1000 to make it in units of 100usec, |
2711 | * table multiplies it with 1000 to make it in units of 100usec, |
2712 | * too. */ |
2712 | * too. */ |
2713 | spec.t11_t12 = (510 + 100) * 10; |
2713 | spec.t11_t12 = (510 + 100) * 10; |
2714 | 2714 | ||
2715 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
2715 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", |
2716 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); |
2716 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); |
2717 | 2717 | ||
2718 | /* Use the max of the register settings and vbt. If both are |
2718 | /* Use the max of the register settings and vbt. If both are |
2719 | * unset, fall back to the spec limits. */ |
2719 | * unset, fall back to the spec limits. */ |
2720 | #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ |
2720 | #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ |
2721 | spec.field : \ |
2721 | spec.field : \ |
2722 | max(cur.field, vbt.field)) |
2722 | max(cur.field, vbt.field)) |
2723 | assign_final(t1_t3); |
2723 | assign_final(t1_t3); |
2724 | assign_final(t8); |
2724 | assign_final(t8); |
2725 | assign_final(t9); |
2725 | assign_final(t9); |
2726 | assign_final(t10); |
2726 | assign_final(t10); |
2727 | assign_final(t11_t12); |
2727 | assign_final(t11_t12); |
2728 | #undef assign_final |
2728 | #undef assign_final |
2729 | 2729 | ||
2730 | #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) |
2730 | #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) |
2731 | intel_dp->panel_power_up_delay = get_delay(t1_t3); |
2731 | intel_dp->panel_power_up_delay = get_delay(t1_t3); |
2732 | intel_dp->backlight_on_delay = get_delay(t8); |
2732 | intel_dp->backlight_on_delay = get_delay(t8); |
2733 | intel_dp->backlight_off_delay = get_delay(t9); |
2733 | intel_dp->backlight_off_delay = get_delay(t9); |
2734 | intel_dp->panel_power_down_delay = get_delay(t10); |
2734 | intel_dp->panel_power_down_delay = get_delay(t10); |
2735 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
2735 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); |
2736 | #undef get_delay |
2736 | #undef get_delay |
2737 | 2737 | ||
2738 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", |
2738 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", |
2739 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, |
2739 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, |
2740 | intel_dp->panel_power_cycle_delay); |
2740 | intel_dp->panel_power_cycle_delay); |
2741 | 2741 | ||
2742 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
2742 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", |
2743 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
2743 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); |
2744 | 2744 | ||
2745 | if (out) |
2745 | if (out) |
2746 | *out = final; |
2746 | *out = final; |
2747 | } |
2747 | } |
2748 | 2748 | ||
2749 | static void |
2749 | static void |
2750 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
2750 | intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, |
2751 | struct intel_dp *intel_dp, |
2751 | struct intel_dp *intel_dp, |
2752 | struct edp_power_seq *seq) |
2752 | struct edp_power_seq *seq) |
2753 | { |
2753 | { |
2754 | struct drm_i915_private *dev_priv = dev->dev_private; |
2754 | struct drm_i915_private *dev_priv = dev->dev_private; |
2755 | u32 pp_on, pp_off, pp_div; |
2755 | u32 pp_on, pp_off, pp_div; |
2756 | 2756 | ||
2757 | /* And finally store the new values in the power sequencer. */ |
2757 | /* And finally store the new values in the power sequencer. */ |
2758 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
2758 | pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | |
2759 | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); |
2759 | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); |
2760 | pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
2760 | pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | |
2761 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
2761 | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); |
2762 | /* Compute the divisor for the pp clock, simply match the Bspec |
2762 | /* Compute the divisor for the pp clock, simply match the Bspec |
2763 | * formula. */ |
2763 | * formula. */ |
2764 | pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) |
2764 | pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1) |
2765 | << PP_REFERENCE_DIVIDER_SHIFT; |
2765 | << PP_REFERENCE_DIVIDER_SHIFT; |
2766 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) |
2766 | pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) |
2767 | << PANEL_POWER_CYCLE_DELAY_SHIFT); |
2767 | << PANEL_POWER_CYCLE_DELAY_SHIFT); |
2768 | 2768 | ||
2769 | /* Haswell doesn't have any port selection bits for the panel |
2769 | /* Haswell doesn't have any port selection bits for the panel |
2770 | * power sequencer any more. */ |
2770 | * power sequencer any more. */ |
2771 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
2771 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
2772 | if (is_cpu_edp(intel_dp)) |
2772 | if (is_cpu_edp(intel_dp)) |
2773 | pp_on |= PANEL_POWER_PORT_DP_A; |
2773 | pp_on |= PANEL_POWER_PORT_DP_A; |
2774 | else |
2774 | else |
2775 | pp_on |= PANEL_POWER_PORT_DP_D; |
2775 | pp_on |= PANEL_POWER_PORT_DP_D; |
2776 | } |
2776 | } |
2777 | 2777 | ||
2778 | I915_WRITE(PCH_PP_ON_DELAYS, pp_on); |
2778 | I915_WRITE(PCH_PP_ON_DELAYS, pp_on); |
2779 | I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); |
2779 | I915_WRITE(PCH_PP_OFF_DELAYS, pp_off); |
2780 | I915_WRITE(PCH_PP_DIVISOR, pp_div); |
2780 | I915_WRITE(PCH_PP_DIVISOR, pp_div); |
2781 | 2781 | ||
2782 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
2782 | DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", |
2783 | I915_READ(PCH_PP_ON_DELAYS), |
2783 | I915_READ(PCH_PP_ON_DELAYS), |
2784 | I915_READ(PCH_PP_OFF_DELAYS), |
2784 | I915_READ(PCH_PP_OFF_DELAYS), |
2785 | I915_READ(PCH_PP_DIVISOR)); |
2785 | I915_READ(PCH_PP_DIVISOR)); |
2786 | } |
2786 | } |
2787 | 2787 | ||
2788 | void |
2788 | void |
2789 | intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
2789 | intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
2790 | struct intel_connector *intel_connector) |
2790 | struct intel_connector *intel_connector) |
2791 | { |
2791 | { |
2792 | struct drm_connector *connector = &intel_connector->base; |
2792 | struct drm_connector *connector = &intel_connector->base; |
2793 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2793 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2794 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
2794 | struct intel_encoder *intel_encoder = &intel_dig_port->base; |
2795 | struct drm_device *dev = intel_encoder->base.dev; |
2795 | struct drm_device *dev = intel_encoder->base.dev; |
2796 | struct drm_i915_private *dev_priv = dev->dev_private; |
2796 | struct drm_i915_private *dev_priv = dev->dev_private; |
2797 | struct drm_display_mode *fixed_mode = NULL; |
2797 | struct drm_display_mode *fixed_mode = NULL; |
2798 | struct edp_power_seq power_seq = { 0 }; |
2798 | struct edp_power_seq power_seq = { 0 }; |
2799 | enum port port = intel_dig_port->port; |
2799 | enum port port = intel_dig_port->port; |
2800 | const char *name = NULL; |
2800 | const char *name = NULL; |
2801 | int type; |
2801 | int type; |
2802 | 2802 | ||
2803 | /* Preserve the current hw state. */ |
2803 | /* Preserve the current hw state. */ |
2804 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
2804 | intel_dp->DP = I915_READ(intel_dp->output_reg); |
2805 | intel_dp->attached_connector = intel_connector; |
2805 | intel_dp->attached_connector = intel_connector; |
2806 | 2806 | ||
2807 | if (HAS_PCH_SPLIT(dev) && port == PORT_D) |
2807 | if (HAS_PCH_SPLIT(dev) && port == PORT_D) |
2808 | if (intel_dpd_is_edp(dev)) |
2808 | if (intel_dpd_is_edp(dev)) |
2809 | intel_dp->is_pch_edp = true; |
2809 | intel_dp->is_pch_edp = true; |
2810 | 2810 | ||
2811 | /* |
2811 | /* |
2812 | * FIXME : We need to initialize built-in panels before external panels. |
2812 | * FIXME : We need to initialize built-in panels before external panels. |
2813 | * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup |
2813 | * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup |
2814 | */ |
2814 | */ |
2815 | if (IS_VALLEYVIEW(dev) && port == PORT_C) { |
2815 | if (IS_VALLEYVIEW(dev) && port == PORT_C) { |
2816 | type = DRM_MODE_CONNECTOR_eDP; |
2816 | type = DRM_MODE_CONNECTOR_eDP; |
2817 | intel_encoder->type = INTEL_OUTPUT_EDP; |
2817 | intel_encoder->type = INTEL_OUTPUT_EDP; |
2818 | } else if (port == PORT_A || is_pch_edp(intel_dp)) { |
2818 | } else if (port == PORT_A || is_pch_edp(intel_dp)) { |
2819 | type = DRM_MODE_CONNECTOR_eDP; |
2819 | type = DRM_MODE_CONNECTOR_eDP; |
2820 | intel_encoder->type = INTEL_OUTPUT_EDP; |
2820 | intel_encoder->type = INTEL_OUTPUT_EDP; |
2821 | } else { |
2821 | } else { |
2822 | /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for |
2822 | /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for |
2823 | * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't |
2823 | * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't |
2824 | * rewrite it. |
2824 | * rewrite it. |
2825 | */ |
2825 | */ |
2826 | type = DRM_MODE_CONNECTOR_DisplayPort; |
2826 | type = DRM_MODE_CONNECTOR_DisplayPort; |
2827 | } |
2827 | } |
2828 | 2828 | ||
2829 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
2829 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
2830 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
2830 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
2831 | 2831 | ||
2832 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
2832 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
2833 | connector->interlace_allowed = true; |
2833 | connector->interlace_allowed = true; |
2834 | connector->doublescan_allowed = 0; |
2834 | connector->doublescan_allowed = 0; |
2835 | 2835 | ||
2836 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
2836 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, |
2837 | ironlake_panel_vdd_work); |
2837 | ironlake_panel_vdd_work); |
2838 | 2838 | ||
2839 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
2839 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
2840 | drm_sysfs_connector_add(connector); |
2840 | drm_sysfs_connector_add(connector); |
2841 | 2841 | ||
2842 | if (HAS_DDI(dev)) |
2842 | if (HAS_DDI(dev)) |
2843 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
2843 | intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
2844 | else |
2844 | else |
2845 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
2845 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
2846 | 2846 | ||
2847 | 2847 | ||
2848 | /* Set up the DDC bus. */ |
2848 | /* Set up the DDC bus. */ |
2849 | switch (port) { |
2849 | switch (port) { |
2850 | case PORT_A: |
2850 | case PORT_A: |
2851 | name = "DPDDC-A"; |
2851 | name = "DPDDC-A"; |
2852 | break; |
2852 | break; |
2853 | case PORT_B: |
2853 | case PORT_B: |
2854 | dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; |
2854 | dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; |
2855 | name = "DPDDC-B"; |
2855 | name = "DPDDC-B"; |
2856 | break; |
2856 | break; |
2857 | case PORT_C: |
2857 | case PORT_C: |
2858 | dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; |
2858 | dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; |
2859 | name = "DPDDC-C"; |
2859 | name = "DPDDC-C"; |
2860 | break; |
2860 | break; |
2861 | case PORT_D: |
2861 | case PORT_D: |
2862 | dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; |
2862 | dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; |
2863 | name = "DPDDC-D"; |
2863 | name = "DPDDC-D"; |
2864 | break; |
2864 | break; |
2865 | default: |
2865 | default: |
2866 | WARN(1, "Invalid port %c\n", port_name(port)); |
2866 | WARN(1, "Invalid port %c\n", port_name(port)); |
2867 | break; |
2867 | break; |
2868 | } |
2868 | } |
2869 | 2869 | ||
2870 | if (is_edp(intel_dp)) |
2870 | if (is_edp(intel_dp)) |
2871 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
2871 | intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); |
2872 | 2872 | ||
2873 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
2873 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
2874 | 2874 | ||
2875 | /* Cache DPCD and EDID for edp. */ |
2875 | /* Cache DPCD and EDID for edp. */ |
2876 | if (is_edp(intel_dp)) { |
2876 | if (is_edp(intel_dp)) { |
2877 | bool ret; |
2877 | bool ret; |
2878 | struct drm_display_mode *scan; |
2878 | struct drm_display_mode *scan; |
2879 | struct edid *edid; |
2879 | struct edid *edid; |
2880 | 2880 | ||
2881 | ironlake_edp_panel_vdd_on(intel_dp); |
2881 | ironlake_edp_panel_vdd_on(intel_dp); |
2882 | ret = intel_dp_get_dpcd(intel_dp); |
2882 | ret = intel_dp_get_dpcd(intel_dp); |
2883 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2883 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2884 | 2884 | ||
2885 | if (ret) { |
2885 | if (ret) { |
2886 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2886 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2887 | dev_priv->no_aux_handshake = |
2887 | dev_priv->no_aux_handshake = |
2888 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
2888 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
2889 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
2889 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
2890 | } else { |
2890 | } else { |
2891 | /* if this fails, presume the device is a ghost */ |
2891 | /* if this fails, presume the device is a ghost */ |
2892 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
2892 | DRM_INFO("failed to retrieve link info, disabling eDP\n"); |
2893 | intel_dp_encoder_destroy(&intel_encoder->base); |
2893 | intel_dp_encoder_destroy(&intel_encoder->base); |
2894 | intel_dp_destroy(connector); |
2894 | intel_dp_destroy(connector); |
2895 | return; |
2895 | return; |
2896 | } |
2896 | } |
2897 | 2897 | ||
2898 | /* We now know it's not a ghost, init power sequence regs. */ |
2898 | /* We now know it's not a ghost, init power sequence regs. */ |
2899 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
2899 | intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, |
2900 | &power_seq); |
2900 | &power_seq); |
2901 | 2901 | ||
2902 | ironlake_edp_panel_vdd_on(intel_dp); |
2902 | ironlake_edp_panel_vdd_on(intel_dp); |
2903 | edid = drm_get_edid(connector, &intel_dp->adapter); |
2903 | edid = drm_get_edid(connector, &intel_dp->adapter); |
2904 | if (edid) { |
2904 | if (edid) { |
2905 | if (drm_add_edid_modes(connector, edid)) { |
2905 | if (drm_add_edid_modes(connector, edid)) { |
2906 | drm_mode_connector_update_edid_property(connector, edid); |
2906 | drm_mode_connector_update_edid_property(connector, edid); |
2907 | drm_edid_to_eld(connector, edid); |
2907 | drm_edid_to_eld(connector, edid); |
2908 | } else { |
2908 | } else { |
2909 | kfree(edid); |
2909 | kfree(edid); |
2910 | edid = ERR_PTR(-EINVAL); |
2910 | edid = ERR_PTR(-EINVAL); |
2911 | } |
2911 | } |
2912 | } else { |
2912 | } else { |
2913 | edid = ERR_PTR(-ENOENT); |
2913 | edid = ERR_PTR(-ENOENT); |
2914 | } |
2914 | } |
2915 | intel_connector->edid = edid; |
2915 | intel_connector->edid = edid; |
2916 | 2916 | ||
2917 | /* prefer fixed mode from EDID if available */ |
2917 | /* prefer fixed mode from EDID if available */ |
2918 | list_for_each_entry(scan, &connector->probed_modes, head) { |
2918 | list_for_each_entry(scan, &connector->probed_modes, head) { |
2919 | if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { |
2919 | if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { |
2920 | fixed_mode = drm_mode_duplicate(dev, scan); |
2920 | fixed_mode = drm_mode_duplicate(dev, scan); |
2921 | break; |
2921 | break; |
2922 | } |
2922 | } |
2923 | } |
2923 | } |
2924 | 2924 | ||
2925 | /* fallback to VBT if available for eDP */ |
2925 | /* fallback to VBT if available for eDP */ |
2926 | if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { |
2926 | if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) { |
2927 | fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
2927 | fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); |
2928 | if (fixed_mode) |
2928 | if (fixed_mode) |
2929 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
2929 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
2930 | } |
2930 | } |
2931 | 2931 | ||
2932 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2932 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2933 | } |
2933 | } |
2934 | 2934 | ||
2935 | if (is_edp(intel_dp)) { |
2935 | if (is_edp(intel_dp)) { |
2936 | intel_panel_init(&intel_connector->panel, fixed_mode); |
2936 | intel_panel_init(&intel_connector->panel, fixed_mode); |
2937 | intel_panel_setup_backlight(connector); |
2937 | intel_panel_setup_backlight(connector); |
2938 | } |
2938 | } |
2939 | 2939 | ||
2940 | intel_dp_add_properties(intel_dp, connector); |
2940 | intel_dp_add_properties(intel_dp, connector); |
2941 | 2941 | ||
2942 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
2942 | /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written |
2943 | * 0xd. Failure to do so will result in spurious interrupts being |
2943 | * 0xd. Failure to do so will result in spurious interrupts being |
2944 | * generated on the port when a cable is not attached. |
2944 | * generated on the port when a cable is not attached. |
2945 | */ |
2945 | */ |
2946 | if (IS_G4X(dev) && !IS_GM45(dev)) { |
2946 | if (IS_G4X(dev) && !IS_GM45(dev)) { |
2947 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); |
2947 | u32 temp = I915_READ(PEG_BAND_GAP_DATA); |
2948 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
2948 | I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); |
2949 | } |
2949 | } |
2950 | } |
2950 | } |
2951 | 2951 | ||
2952 | void |
2952 | void |
2953 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
2953 | intel_dp_init(struct drm_device *dev, int output_reg, enum port port) |
2954 | { |
2954 | { |
2955 | struct intel_digital_port *intel_dig_port; |
2955 | struct intel_digital_port *intel_dig_port; |
2956 | struct intel_encoder *intel_encoder; |
2956 | struct intel_encoder *intel_encoder; |
2957 | struct drm_encoder *encoder; |
2957 | struct drm_encoder *encoder; |
2958 | struct intel_connector *intel_connector; |
2958 | struct intel_connector *intel_connector; |
2959 | 2959 | ||
2960 | intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); |
2960 | intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); |
2961 | if (!intel_dig_port) |
2961 | if (!intel_dig_port) |
2962 | return; |
2962 | return; |
2963 | 2963 | ||
2964 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
2964 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
2965 | if (!intel_connector) { |
2965 | if (!intel_connector) { |
2966 | kfree(intel_dig_port); |
2966 | kfree(intel_dig_port); |
2967 | return; |
2967 | return; |
2968 | } |
2968 | } |
2969 | 2969 | ||
2970 | intel_encoder = &intel_dig_port->base; |
2970 | intel_encoder = &intel_dig_port->base; |
2971 | encoder = &intel_encoder->base; |
2971 | encoder = &intel_encoder->base; |
2972 | 2972 | ||
2973 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
2973 | drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, |
2974 | DRM_MODE_ENCODER_TMDS); |
2974 | DRM_MODE_ENCODER_TMDS); |
2975 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
2975 | drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); |
2976 | 2976 | ||
2977 | intel_encoder->enable = intel_enable_dp; |
2977 | intel_encoder->enable = intel_enable_dp; |
2978 | intel_encoder->pre_enable = intel_pre_enable_dp; |
2978 | intel_encoder->pre_enable = intel_pre_enable_dp; |
2979 | intel_encoder->disable = intel_disable_dp; |
2979 | intel_encoder->disable = intel_disable_dp; |
2980 | intel_encoder->post_disable = intel_post_disable_dp; |
2980 | intel_encoder->post_disable = intel_post_disable_dp; |
2981 | intel_encoder->get_hw_state = intel_dp_get_hw_state; |
2981 | intel_encoder->get_hw_state = intel_dp_get_hw_state; |
2982 | 2982 | ||
2983 | intel_dig_port->port = port; |
2983 | intel_dig_port->port = port; |
2984 | intel_dig_port->dp.output_reg = output_reg; |
2984 | intel_dig_port->dp.output_reg = output_reg; |
2985 | 2985 | ||
2986 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
2986 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
2987 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2987 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2988 | intel_encoder->cloneable = false; |
2988 | intel_encoder->cloneable = false; |
2989 | intel_encoder->hot_plug = intel_dp_hot_plug; |
2989 | intel_encoder->hot_plug = intel_dp_hot_plug; |
2990 | 2990 | ||
2991 | intel_dp_init_connector(intel_dig_port, intel_connector); |
2991 | intel_dp_init_connector(intel_dig_port, intel_connector); |
2992 | }><>><>><>><>><>><>><>><>><>>>>>>>>>>><>>=>=><=>=>=>>>><>><>><>>><>>><>>><>><>><>>>>>><>>> |
2992 | }><>><>><>><>><>><>><>><>><>>>>>>>>>>><>>=>=><=>=>=>>>><>><>><>>><>>><>>><>><>><>>>>>><>>> |