Rev 3480 | Rev 3746 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3480 | Rev 3482 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2006-2007 Intel Corporation |
2 | * Copyright © 2006-2007 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | */ |
25 | */ |
26 | 26 | ||
27 | //#include |
27 | //#include |
28 | #include |
28 | #include |
29 | //#include |
29 | //#include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | //#include |
33 | //#include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include "intel_drv.h" |
36 | #include "intel_drv.h" |
37 | #include |
37 | #include |
38 | #include "i915_drv.h" |
38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" |
39 | #include "i915_trace.h" |
40 | #include |
40 | #include |
41 | #include |
41 | #include |
42 | //#include |
42 | //#include |
43 | 43 | ||
44 | phys_addr_t get_bus_addr(void); |
44 | phys_addr_t get_bus_addr(void); |
45 | 45 | ||
46 | static inline __attribute__((const)) |
46 | static inline __attribute__((const)) |
47 | bool is_power_of_2(unsigned long n) |
47 | bool is_power_of_2(unsigned long n) |
48 | { |
48 | { |
49 | return (n != 0 && ((n & (n - 1)) == 0)); |
49 | return (n != 0 && ((n & (n - 1)) == 0)); |
50 | } |
50 | } |
51 | 51 | ||
52 | #define MAX_ERRNO 4095 |
52 | #define MAX_ERRNO 4095 |
53 | 53 | ||
54 | 54 | ||
55 | 55 | ||
56 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
56 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
57 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
57 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
58 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
58 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
59 | 59 | ||
60 | typedef struct { |
60 | typedef struct { |
61 | /* given values */ |
61 | /* given values */ |
62 | int n; |
62 | int n; |
63 | int m1, m2; |
63 | int m1, m2; |
64 | int p1, p2; |
64 | int p1, p2; |
65 | /* derived values */ |
65 | /* derived values */ |
66 | int dot; |
66 | int dot; |
67 | int vco; |
67 | int vco; |
68 | int m; |
68 | int m; |
69 | int p; |
69 | int p; |
70 | } intel_clock_t; |
70 | } intel_clock_t; |
71 | 71 | ||
72 | typedef struct { |
72 | typedef struct { |
73 | int min, max; |
73 | int min, max; |
74 | } intel_range_t; |
74 | } intel_range_t; |
75 | 75 | ||
76 | typedef struct { |
76 | typedef struct { |
77 | int dot_limit; |
77 | int dot_limit; |
78 | int p2_slow, p2_fast; |
78 | int p2_slow, p2_fast; |
79 | } intel_p2_t; |
79 | } intel_p2_t; |
80 | 80 | ||
81 | #define INTEL_P2_NUM 2 |
81 | #define INTEL_P2_NUM 2 |
82 | typedef struct intel_limit intel_limit_t; |
82 | typedef struct intel_limit intel_limit_t; |
83 | struct intel_limit { |
83 | struct intel_limit { |
84 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
84 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
85 | intel_p2_t p2; |
85 | intel_p2_t p2; |
86 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
86 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
87 | int, int, intel_clock_t *, intel_clock_t *); |
87 | int, int, intel_clock_t *, intel_clock_t *); |
88 | }; |
88 | }; |
89 | 89 | ||
90 | /* FDI */ |
90 | /* FDI */ |
91 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
91 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
92 | 92 | ||
93 | int |
93 | int |
94 | intel_pch_rawclk(struct drm_device *dev) |
94 | intel_pch_rawclk(struct drm_device *dev) |
95 | { |
95 | { |
96 | struct drm_i915_private *dev_priv = dev->dev_private; |
96 | struct drm_i915_private *dev_priv = dev->dev_private; |
97 | 97 | ||
98 | WARN_ON(!HAS_PCH_SPLIT(dev)); |
98 | WARN_ON(!HAS_PCH_SPLIT(dev)); |
99 | 99 | ||
100 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; |
100 | return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; |
101 | } |
101 | } |
102 | 102 | ||
103 | static bool |
103 | static bool |
104 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
104 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
105 | int target, int refclk, intel_clock_t *match_clock, |
105 | int target, int refclk, intel_clock_t *match_clock, |
106 | intel_clock_t *best_clock); |
106 | intel_clock_t *best_clock); |
107 | static bool |
107 | static bool |
108 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
108 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
109 | int target, int refclk, intel_clock_t *match_clock, |
109 | int target, int refclk, intel_clock_t *match_clock, |
110 | intel_clock_t *best_clock); |
110 | intel_clock_t *best_clock); |
111 | 111 | ||
112 | static bool |
112 | static bool |
113 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
113 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
114 | int target, int refclk, intel_clock_t *match_clock, |
114 | int target, int refclk, intel_clock_t *match_clock, |
115 | intel_clock_t *best_clock); |
115 | intel_clock_t *best_clock); |
116 | static bool |
116 | static bool |
117 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
117 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
118 | int target, int refclk, intel_clock_t *match_clock, |
118 | int target, int refclk, intel_clock_t *match_clock, |
119 | intel_clock_t *best_clock); |
119 | intel_clock_t *best_clock); |
120 | 120 | ||
121 | static bool |
121 | static bool |
122 | intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, |
122 | intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, |
123 | int target, int refclk, intel_clock_t *match_clock, |
123 | int target, int refclk, intel_clock_t *match_clock, |
124 | intel_clock_t *best_clock); |
124 | intel_clock_t *best_clock); |
125 | 125 | ||
126 | static inline u32 /* units of 100MHz */ |
126 | static inline u32 /* units of 100MHz */ |
127 | intel_fdi_link_freq(struct drm_device *dev) |
127 | intel_fdi_link_freq(struct drm_device *dev) |
128 | { |
128 | { |
129 | if (IS_GEN5(dev)) { |
129 | if (IS_GEN5(dev)) { |
130 | struct drm_i915_private *dev_priv = dev->dev_private; |
130 | struct drm_i915_private *dev_priv = dev->dev_private; |
131 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; |
131 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; |
132 | } else |
132 | } else |
133 | return 27; |
133 | return 27; |
134 | } |
134 | } |
135 | 135 | ||
136 | static const intel_limit_t intel_limits_i8xx_dvo = { |
136 | static const intel_limit_t intel_limits_i8xx_dvo = { |
137 | .dot = { .min = 25000, .max = 350000 }, |
137 | .dot = { .min = 25000, .max = 350000 }, |
138 | .vco = { .min = 930000, .max = 1400000 }, |
138 | .vco = { .min = 930000, .max = 1400000 }, |
139 | .n = { .min = 3, .max = 16 }, |
139 | .n = { .min = 3, .max = 16 }, |
140 | .m = { .min = 96, .max = 140 }, |
140 | .m = { .min = 96, .max = 140 }, |
141 | .m1 = { .min = 18, .max = 26 }, |
141 | .m1 = { .min = 18, .max = 26 }, |
142 | .m2 = { .min = 6, .max = 16 }, |
142 | .m2 = { .min = 6, .max = 16 }, |
143 | .p = { .min = 4, .max = 128 }, |
143 | .p = { .min = 4, .max = 128 }, |
144 | .p1 = { .min = 2, .max = 33 }, |
144 | .p1 = { .min = 2, .max = 33 }, |
145 | .p2 = { .dot_limit = 165000, |
145 | .p2 = { .dot_limit = 165000, |
146 | .p2_slow = 4, .p2_fast = 2 }, |
146 | .p2_slow = 4, .p2_fast = 2 }, |
147 | .find_pll = intel_find_best_PLL, |
147 | .find_pll = intel_find_best_PLL, |
148 | }; |
148 | }; |
149 | 149 | ||
150 | static const intel_limit_t intel_limits_i8xx_lvds = { |
150 | static const intel_limit_t intel_limits_i8xx_lvds = { |
151 | .dot = { .min = 25000, .max = 350000 }, |
151 | .dot = { .min = 25000, .max = 350000 }, |
152 | .vco = { .min = 930000, .max = 1400000 }, |
152 | .vco = { .min = 930000, .max = 1400000 }, |
153 | .n = { .min = 3, .max = 16 }, |
153 | .n = { .min = 3, .max = 16 }, |
154 | .m = { .min = 96, .max = 140 }, |
154 | .m = { .min = 96, .max = 140 }, |
155 | .m1 = { .min = 18, .max = 26 }, |
155 | .m1 = { .min = 18, .max = 26 }, |
156 | .m2 = { .min = 6, .max = 16 }, |
156 | .m2 = { .min = 6, .max = 16 }, |
157 | .p = { .min = 4, .max = 128 }, |
157 | .p = { .min = 4, .max = 128 }, |
158 | .p1 = { .min = 1, .max = 6 }, |
158 | .p1 = { .min = 1, .max = 6 }, |
159 | .p2 = { .dot_limit = 165000, |
159 | .p2 = { .dot_limit = 165000, |
160 | .p2_slow = 14, .p2_fast = 7 }, |
160 | .p2_slow = 14, .p2_fast = 7 }, |
161 | .find_pll = intel_find_best_PLL, |
161 | .find_pll = intel_find_best_PLL, |
162 | }; |
162 | }; |
163 | 163 | ||
164 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
164 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
165 | .dot = { .min = 20000, .max = 400000 }, |
165 | .dot = { .min = 20000, .max = 400000 }, |
166 | .vco = { .min = 1400000, .max = 2800000 }, |
166 | .vco = { .min = 1400000, .max = 2800000 }, |
167 | .n = { .min = 1, .max = 6 }, |
167 | .n = { .min = 1, .max = 6 }, |
168 | .m = { .min = 70, .max = 120 }, |
168 | .m = { .min = 70, .max = 120 }, |
169 | .m1 = { .min = 8, .max = 18 }, |
169 | .m1 = { .min = 8, .max = 18 }, |
170 | .m2 = { .min = 3, .max = 7 }, |
170 | .m2 = { .min = 3, .max = 7 }, |
171 | .p = { .min = 5, .max = 80 }, |
171 | .p = { .min = 5, .max = 80 }, |
172 | .p1 = { .min = 1, .max = 8 }, |
172 | .p1 = { .min = 1, .max = 8 }, |
173 | .p2 = { .dot_limit = 200000, |
173 | .p2 = { .dot_limit = 200000, |
174 | .p2_slow = 10, .p2_fast = 5 }, |
174 | .p2_slow = 10, .p2_fast = 5 }, |
175 | .find_pll = intel_find_best_PLL, |
175 | .find_pll = intel_find_best_PLL, |
176 | }; |
176 | }; |
177 | 177 | ||
178 | static const intel_limit_t intel_limits_i9xx_lvds = { |
178 | static const intel_limit_t intel_limits_i9xx_lvds = { |
179 | .dot = { .min = 20000, .max = 400000 }, |
179 | .dot = { .min = 20000, .max = 400000 }, |
180 | .vco = { .min = 1400000, .max = 2800000 }, |
180 | .vco = { .min = 1400000, .max = 2800000 }, |
181 | .n = { .min = 1, .max = 6 }, |
181 | .n = { .min = 1, .max = 6 }, |
182 | .m = { .min = 70, .max = 120 }, |
182 | .m = { .min = 70, .max = 120 }, |
183 | .m1 = { .min = 8, .max = 18 }, |
183 | .m1 = { .min = 8, .max = 18 }, |
184 | .m2 = { .min = 3, .max = 7 }, |
184 | .m2 = { .min = 3, .max = 7 }, |
185 | .p = { .min = 7, .max = 98 }, |
185 | .p = { .min = 7, .max = 98 }, |
186 | .p1 = { .min = 1, .max = 8 }, |
186 | .p1 = { .min = 1, .max = 8 }, |
187 | .p2 = { .dot_limit = 112000, |
187 | .p2 = { .dot_limit = 112000, |
188 | .p2_slow = 14, .p2_fast = 7 }, |
188 | .p2_slow = 14, .p2_fast = 7 }, |
189 | .find_pll = intel_find_best_PLL, |
189 | .find_pll = intel_find_best_PLL, |
190 | }; |
190 | }; |
191 | 191 | ||
192 | 192 | ||
193 | static const intel_limit_t intel_limits_g4x_sdvo = { |
193 | static const intel_limit_t intel_limits_g4x_sdvo = { |
194 | .dot = { .min = 25000, .max = 270000 }, |
194 | .dot = { .min = 25000, .max = 270000 }, |
195 | .vco = { .min = 1750000, .max = 3500000}, |
195 | .vco = { .min = 1750000, .max = 3500000}, |
196 | .n = { .min = 1, .max = 4 }, |
196 | .n = { .min = 1, .max = 4 }, |
197 | .m = { .min = 104, .max = 138 }, |
197 | .m = { .min = 104, .max = 138 }, |
198 | .m1 = { .min = 17, .max = 23 }, |
198 | .m1 = { .min = 17, .max = 23 }, |
199 | .m2 = { .min = 5, .max = 11 }, |
199 | .m2 = { .min = 5, .max = 11 }, |
200 | .p = { .min = 10, .max = 30 }, |
200 | .p = { .min = 10, .max = 30 }, |
201 | .p1 = { .min = 1, .max = 3}, |
201 | .p1 = { .min = 1, .max = 3}, |
202 | .p2 = { .dot_limit = 270000, |
202 | .p2 = { .dot_limit = 270000, |
203 | .p2_slow = 10, |
203 | .p2_slow = 10, |
204 | .p2_fast = 10 |
204 | .p2_fast = 10 |
205 | }, |
205 | }, |
206 | .find_pll = intel_g4x_find_best_PLL, |
206 | .find_pll = intel_g4x_find_best_PLL, |
207 | }; |
207 | }; |
208 | 208 | ||
209 | static const intel_limit_t intel_limits_g4x_hdmi = { |
209 | static const intel_limit_t intel_limits_g4x_hdmi = { |
210 | .dot = { .min = 22000, .max = 400000 }, |
210 | .dot = { .min = 22000, .max = 400000 }, |
211 | .vco = { .min = 1750000, .max = 3500000}, |
211 | .vco = { .min = 1750000, .max = 3500000}, |
212 | .n = { .min = 1, .max = 4 }, |
212 | .n = { .min = 1, .max = 4 }, |
213 | .m = { .min = 104, .max = 138 }, |
213 | .m = { .min = 104, .max = 138 }, |
214 | .m1 = { .min = 16, .max = 23 }, |
214 | .m1 = { .min = 16, .max = 23 }, |
215 | .m2 = { .min = 5, .max = 11 }, |
215 | .m2 = { .min = 5, .max = 11 }, |
216 | .p = { .min = 5, .max = 80 }, |
216 | .p = { .min = 5, .max = 80 }, |
217 | .p1 = { .min = 1, .max = 8}, |
217 | .p1 = { .min = 1, .max = 8}, |
218 | .p2 = { .dot_limit = 165000, |
218 | .p2 = { .dot_limit = 165000, |
219 | .p2_slow = 10, .p2_fast = 5 }, |
219 | .p2_slow = 10, .p2_fast = 5 }, |
220 | .find_pll = intel_g4x_find_best_PLL, |
220 | .find_pll = intel_g4x_find_best_PLL, |
221 | }; |
221 | }; |
222 | 222 | ||
223 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
223 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
224 | .dot = { .min = 20000, .max = 115000 }, |
224 | .dot = { .min = 20000, .max = 115000 }, |
225 | .vco = { .min = 1750000, .max = 3500000 }, |
225 | .vco = { .min = 1750000, .max = 3500000 }, |
226 | .n = { .min = 1, .max = 3 }, |
226 | .n = { .min = 1, .max = 3 }, |
227 | .m = { .min = 104, .max = 138 }, |
227 | .m = { .min = 104, .max = 138 }, |
228 | .m1 = { .min = 17, .max = 23 }, |
228 | .m1 = { .min = 17, .max = 23 }, |
229 | .m2 = { .min = 5, .max = 11 }, |
229 | .m2 = { .min = 5, .max = 11 }, |
230 | .p = { .min = 28, .max = 112 }, |
230 | .p = { .min = 28, .max = 112 }, |
231 | .p1 = { .min = 2, .max = 8 }, |
231 | .p1 = { .min = 2, .max = 8 }, |
232 | .p2 = { .dot_limit = 0, |
232 | .p2 = { .dot_limit = 0, |
233 | .p2_slow = 14, .p2_fast = 14 |
233 | .p2_slow = 14, .p2_fast = 14 |
234 | }, |
234 | }, |
235 | .find_pll = intel_g4x_find_best_PLL, |
235 | .find_pll = intel_g4x_find_best_PLL, |
236 | }; |
236 | }; |
237 | 237 | ||
238 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
238 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
239 | .dot = { .min = 80000, .max = 224000 }, |
239 | .dot = { .min = 80000, .max = 224000 }, |
240 | .vco = { .min = 1750000, .max = 3500000 }, |
240 | .vco = { .min = 1750000, .max = 3500000 }, |
241 | .n = { .min = 1, .max = 3 }, |
241 | .n = { .min = 1, .max = 3 }, |
242 | .m = { .min = 104, .max = 138 }, |
242 | .m = { .min = 104, .max = 138 }, |
243 | .m1 = { .min = 17, .max = 23 }, |
243 | .m1 = { .min = 17, .max = 23 }, |
244 | .m2 = { .min = 5, .max = 11 }, |
244 | .m2 = { .min = 5, .max = 11 }, |
245 | .p = { .min = 14, .max = 42 }, |
245 | .p = { .min = 14, .max = 42 }, |
246 | .p1 = { .min = 2, .max = 6 }, |
246 | .p1 = { .min = 2, .max = 6 }, |
247 | .p2 = { .dot_limit = 0, |
247 | .p2 = { .dot_limit = 0, |
248 | .p2_slow = 7, .p2_fast = 7 |
248 | .p2_slow = 7, .p2_fast = 7 |
249 | }, |
249 | }, |
250 | .find_pll = intel_g4x_find_best_PLL, |
250 | .find_pll = intel_g4x_find_best_PLL, |
251 | }; |
251 | }; |
252 | 252 | ||
253 | static const intel_limit_t intel_limits_g4x_display_port = { |
253 | static const intel_limit_t intel_limits_g4x_display_port = { |
254 | .dot = { .min = 161670, .max = 227000 }, |
254 | .dot = { .min = 161670, .max = 227000 }, |
255 | .vco = { .min = 1750000, .max = 3500000}, |
255 | .vco = { .min = 1750000, .max = 3500000}, |
256 | .n = { .min = 1, .max = 2 }, |
256 | .n = { .min = 1, .max = 2 }, |
257 | .m = { .min = 97, .max = 108 }, |
257 | .m = { .min = 97, .max = 108 }, |
258 | .m1 = { .min = 0x10, .max = 0x12 }, |
258 | .m1 = { .min = 0x10, .max = 0x12 }, |
259 | .m2 = { .min = 0x05, .max = 0x06 }, |
259 | .m2 = { .min = 0x05, .max = 0x06 }, |
260 | .p = { .min = 10, .max = 20 }, |
260 | .p = { .min = 10, .max = 20 }, |
261 | .p1 = { .min = 1, .max = 2}, |
261 | .p1 = { .min = 1, .max = 2}, |
262 | .p2 = { .dot_limit = 0, |
262 | .p2 = { .dot_limit = 0, |
263 | .p2_slow = 10, .p2_fast = 10 }, |
263 | .p2_slow = 10, .p2_fast = 10 }, |
264 | .find_pll = intel_find_pll_g4x_dp, |
264 | .find_pll = intel_find_pll_g4x_dp, |
265 | }; |
265 | }; |
266 | 266 | ||
267 | static const intel_limit_t intel_limits_pineview_sdvo = { |
267 | static const intel_limit_t intel_limits_pineview_sdvo = { |
268 | .dot = { .min = 20000, .max = 400000}, |
268 | .dot = { .min = 20000, .max = 400000}, |
269 | .vco = { .min = 1700000, .max = 3500000 }, |
269 | .vco = { .min = 1700000, .max = 3500000 }, |
270 | /* Pineview's Ncounter is a ring counter */ |
270 | /* Pineview's Ncounter is a ring counter */ |
271 | .n = { .min = 3, .max = 6 }, |
271 | .n = { .min = 3, .max = 6 }, |
272 | .m = { .min = 2, .max = 256 }, |
272 | .m = { .min = 2, .max = 256 }, |
273 | /* Pineview only has one combined m divider, which we treat as m2. */ |
273 | /* Pineview only has one combined m divider, which we treat as m2. */ |
274 | .m1 = { .min = 0, .max = 0 }, |
274 | .m1 = { .min = 0, .max = 0 }, |
275 | .m2 = { .min = 0, .max = 254 }, |
275 | .m2 = { .min = 0, .max = 254 }, |
276 | .p = { .min = 5, .max = 80 }, |
276 | .p = { .min = 5, .max = 80 }, |
277 | .p1 = { .min = 1, .max = 8 }, |
277 | .p1 = { .min = 1, .max = 8 }, |
278 | .p2 = { .dot_limit = 200000, |
278 | .p2 = { .dot_limit = 200000, |
279 | .p2_slow = 10, .p2_fast = 5 }, |
279 | .p2_slow = 10, .p2_fast = 5 }, |
280 | .find_pll = intel_find_best_PLL, |
280 | .find_pll = intel_find_best_PLL, |
281 | }; |
281 | }; |
282 | 282 | ||
283 | static const intel_limit_t intel_limits_pineview_lvds = { |
283 | static const intel_limit_t intel_limits_pineview_lvds = { |
284 | .dot = { .min = 20000, .max = 400000 }, |
284 | .dot = { .min = 20000, .max = 400000 }, |
285 | .vco = { .min = 1700000, .max = 3500000 }, |
285 | .vco = { .min = 1700000, .max = 3500000 }, |
286 | .n = { .min = 3, .max = 6 }, |
286 | .n = { .min = 3, .max = 6 }, |
287 | .m = { .min = 2, .max = 256 }, |
287 | .m = { .min = 2, .max = 256 }, |
288 | .m1 = { .min = 0, .max = 0 }, |
288 | .m1 = { .min = 0, .max = 0 }, |
289 | .m2 = { .min = 0, .max = 254 }, |
289 | .m2 = { .min = 0, .max = 254 }, |
290 | .p = { .min = 7, .max = 112 }, |
290 | .p = { .min = 7, .max = 112 }, |
291 | .p1 = { .min = 1, .max = 8 }, |
291 | .p1 = { .min = 1, .max = 8 }, |
292 | .p2 = { .dot_limit = 112000, |
292 | .p2 = { .dot_limit = 112000, |
293 | .p2_slow = 14, .p2_fast = 14 }, |
293 | .p2_slow = 14, .p2_fast = 14 }, |
294 | .find_pll = intel_find_best_PLL, |
294 | .find_pll = intel_find_best_PLL, |
295 | }; |
295 | }; |
296 | 296 | ||
297 | /* Ironlake / Sandybridge |
297 | /* Ironlake / Sandybridge |
298 | * |
298 | * |
299 | * We calculate clock using (register_value + 2) for N/M1/M2, so here |
299 | * We calculate clock using (register_value + 2) for N/M1/M2, so here |
300 | * the range value for them is (actual_value - 2). |
300 | * the range value for them is (actual_value - 2). |
301 | */ |
301 | */ |
302 | static const intel_limit_t intel_limits_ironlake_dac = { |
302 | static const intel_limit_t intel_limits_ironlake_dac = { |
303 | .dot = { .min = 25000, .max = 350000 }, |
303 | .dot = { .min = 25000, .max = 350000 }, |
304 | .vco = { .min = 1760000, .max = 3510000 }, |
304 | .vco = { .min = 1760000, .max = 3510000 }, |
305 | .n = { .min = 1, .max = 5 }, |
305 | .n = { .min = 1, .max = 5 }, |
306 | .m = { .min = 79, .max = 127 }, |
306 | .m = { .min = 79, .max = 127 }, |
307 | .m1 = { .min = 12, .max = 22 }, |
307 | .m1 = { .min = 12, .max = 22 }, |
308 | .m2 = { .min = 5, .max = 9 }, |
308 | .m2 = { .min = 5, .max = 9 }, |
309 | .p = { .min = 5, .max = 80 }, |
309 | .p = { .min = 5, .max = 80 }, |
310 | .p1 = { .min = 1, .max = 8 }, |
310 | .p1 = { .min = 1, .max = 8 }, |
311 | .p2 = { .dot_limit = 225000, |
311 | .p2 = { .dot_limit = 225000, |
312 | .p2_slow = 10, .p2_fast = 5 }, |
312 | .p2_slow = 10, .p2_fast = 5 }, |
313 | .find_pll = intel_g4x_find_best_PLL, |
313 | .find_pll = intel_g4x_find_best_PLL, |
314 | }; |
314 | }; |
315 | 315 | ||
316 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
316 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
317 | .dot = { .min = 25000, .max = 350000 }, |
317 | .dot = { .min = 25000, .max = 350000 }, |
318 | .vco = { .min = 1760000, .max = 3510000 }, |
318 | .vco = { .min = 1760000, .max = 3510000 }, |
319 | .n = { .min = 1, .max = 3 }, |
319 | .n = { .min = 1, .max = 3 }, |
320 | .m = { .min = 79, .max = 118 }, |
320 | .m = { .min = 79, .max = 118 }, |
321 | .m1 = { .min = 12, .max = 22 }, |
321 | .m1 = { .min = 12, .max = 22 }, |
322 | .m2 = { .min = 5, .max = 9 }, |
322 | .m2 = { .min = 5, .max = 9 }, |
323 | .p = { .min = 28, .max = 112 }, |
323 | .p = { .min = 28, .max = 112 }, |
324 | .p1 = { .min = 2, .max = 8 }, |
324 | .p1 = { .min = 2, .max = 8 }, |
325 | .p2 = { .dot_limit = 225000, |
325 | .p2 = { .dot_limit = 225000, |
326 | .p2_slow = 14, .p2_fast = 14 }, |
326 | .p2_slow = 14, .p2_fast = 14 }, |
327 | .find_pll = intel_g4x_find_best_PLL, |
327 | .find_pll = intel_g4x_find_best_PLL, |
328 | }; |
328 | }; |
329 | 329 | ||
330 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
330 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
331 | .dot = { .min = 25000, .max = 350000 }, |
331 | .dot = { .min = 25000, .max = 350000 }, |
332 | .vco = { .min = 1760000, .max = 3510000 }, |
332 | .vco = { .min = 1760000, .max = 3510000 }, |
333 | .n = { .min = 1, .max = 3 }, |
333 | .n = { .min = 1, .max = 3 }, |
334 | .m = { .min = 79, .max = 127 }, |
334 | .m = { .min = 79, .max = 127 }, |
335 | .m1 = { .min = 12, .max = 22 }, |
335 | .m1 = { .min = 12, .max = 22 }, |
336 | .m2 = { .min = 5, .max = 9 }, |
336 | .m2 = { .min = 5, .max = 9 }, |
337 | .p = { .min = 14, .max = 56 }, |
337 | .p = { .min = 14, .max = 56 }, |
338 | .p1 = { .min = 2, .max = 8 }, |
338 | .p1 = { .min = 2, .max = 8 }, |
339 | .p2 = { .dot_limit = 225000, |
339 | .p2 = { .dot_limit = 225000, |
340 | .p2_slow = 7, .p2_fast = 7 }, |
340 | .p2_slow = 7, .p2_fast = 7 }, |
341 | .find_pll = intel_g4x_find_best_PLL, |
341 | .find_pll = intel_g4x_find_best_PLL, |
342 | }; |
342 | }; |
343 | 343 | ||
344 | /* LVDS 100mhz refclk limits. */ |
344 | /* LVDS 100mhz refclk limits. */ |
345 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
345 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
346 | .dot = { .min = 25000, .max = 350000 }, |
346 | .dot = { .min = 25000, .max = 350000 }, |
347 | .vco = { .min = 1760000, .max = 3510000 }, |
347 | .vco = { .min = 1760000, .max = 3510000 }, |
348 | .n = { .min = 1, .max = 2 }, |
348 | .n = { .min = 1, .max = 2 }, |
349 | .m = { .min = 79, .max = 126 }, |
349 | .m = { .min = 79, .max = 126 }, |
350 | .m1 = { .min = 12, .max = 22 }, |
350 | .m1 = { .min = 12, .max = 22 }, |
351 | .m2 = { .min = 5, .max = 9 }, |
351 | .m2 = { .min = 5, .max = 9 }, |
352 | .p = { .min = 28, .max = 112 }, |
352 | .p = { .min = 28, .max = 112 }, |
353 | .p1 = { .min = 2, .max = 8 }, |
353 | .p1 = { .min = 2, .max = 8 }, |
354 | .p2 = { .dot_limit = 225000, |
354 | .p2 = { .dot_limit = 225000, |
355 | .p2_slow = 14, .p2_fast = 14 }, |
355 | .p2_slow = 14, .p2_fast = 14 }, |
356 | .find_pll = intel_g4x_find_best_PLL, |
356 | .find_pll = intel_g4x_find_best_PLL, |
357 | }; |
357 | }; |
358 | 358 | ||
359 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
359 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
360 | .dot = { .min = 25000, .max = 350000 }, |
360 | .dot = { .min = 25000, .max = 350000 }, |
361 | .vco = { .min = 1760000, .max = 3510000 }, |
361 | .vco = { .min = 1760000, .max = 3510000 }, |
362 | .n = { .min = 1, .max = 3 }, |
362 | .n = { .min = 1, .max = 3 }, |
363 | .m = { .min = 79, .max = 126 }, |
363 | .m = { .min = 79, .max = 126 }, |
364 | .m1 = { .min = 12, .max = 22 }, |
364 | .m1 = { .min = 12, .max = 22 }, |
365 | .m2 = { .min = 5, .max = 9 }, |
365 | .m2 = { .min = 5, .max = 9 }, |
366 | .p = { .min = 14, .max = 42 }, |
366 | .p = { .min = 14, .max = 42 }, |
367 | .p1 = { .min = 2, .max = 6 }, |
367 | .p1 = { .min = 2, .max = 6 }, |
368 | .p2 = { .dot_limit = 225000, |
368 | .p2 = { .dot_limit = 225000, |
369 | .p2_slow = 7, .p2_fast = 7 }, |
369 | .p2_slow = 7, .p2_fast = 7 }, |
370 | .find_pll = intel_g4x_find_best_PLL, |
370 | .find_pll = intel_g4x_find_best_PLL, |
371 | }; |
371 | }; |
372 | 372 | ||
373 | static const intel_limit_t intel_limits_ironlake_display_port = { |
373 | static const intel_limit_t intel_limits_ironlake_display_port = { |
374 | .dot = { .min = 25000, .max = 350000 }, |
374 | .dot = { .min = 25000, .max = 350000 }, |
375 | .vco = { .min = 1760000, .max = 3510000}, |
375 | .vco = { .min = 1760000, .max = 3510000}, |
376 | .n = { .min = 1, .max = 2 }, |
376 | .n = { .min = 1, .max = 2 }, |
377 | .m = { .min = 81, .max = 90 }, |
377 | .m = { .min = 81, .max = 90 }, |
378 | .m1 = { .min = 12, .max = 22 }, |
378 | .m1 = { .min = 12, .max = 22 }, |
379 | .m2 = { .min = 5, .max = 9 }, |
379 | .m2 = { .min = 5, .max = 9 }, |
380 | .p = { .min = 10, .max = 20 }, |
380 | .p = { .min = 10, .max = 20 }, |
381 | .p1 = { .min = 1, .max = 2}, |
381 | .p1 = { .min = 1, .max = 2}, |
382 | .p2 = { .dot_limit = 0, |
382 | .p2 = { .dot_limit = 0, |
383 | .p2_slow = 10, .p2_fast = 10 }, |
383 | .p2_slow = 10, .p2_fast = 10 }, |
384 | .find_pll = intel_find_pll_ironlake_dp, |
384 | .find_pll = intel_find_pll_ironlake_dp, |
385 | }; |
385 | }; |
386 | 386 | ||
387 | static const intel_limit_t intel_limits_vlv_dac = { |
387 | static const intel_limit_t intel_limits_vlv_dac = { |
388 | .dot = { .min = 25000, .max = 270000 }, |
388 | .dot = { .min = 25000, .max = 270000 }, |
389 | .vco = { .min = 4000000, .max = 6000000 }, |
389 | .vco = { .min = 4000000, .max = 6000000 }, |
390 | .n = { .min = 1, .max = 7 }, |
390 | .n = { .min = 1, .max = 7 }, |
391 | .m = { .min = 22, .max = 450 }, /* guess */ |
391 | .m = { .min = 22, .max = 450 }, /* guess */ |
392 | .m1 = { .min = 2, .max = 3 }, |
392 | .m1 = { .min = 2, .max = 3 }, |
393 | .m2 = { .min = 11, .max = 156 }, |
393 | .m2 = { .min = 11, .max = 156 }, |
394 | .p = { .min = 10, .max = 30 }, |
394 | .p = { .min = 10, .max = 30 }, |
395 | .p1 = { .min = 2, .max = 3 }, |
395 | .p1 = { .min = 2, .max = 3 }, |
396 | .p2 = { .dot_limit = 270000, |
396 | .p2 = { .dot_limit = 270000, |
397 | .p2_slow = 2, .p2_fast = 20 }, |
397 | .p2_slow = 2, .p2_fast = 20 }, |
398 | .find_pll = intel_vlv_find_best_pll, |
398 | .find_pll = intel_vlv_find_best_pll, |
399 | }; |
399 | }; |
400 | 400 | ||
401 | static const intel_limit_t intel_limits_vlv_hdmi = { |
401 | static const intel_limit_t intel_limits_vlv_hdmi = { |
402 | .dot = { .min = 20000, .max = 165000 }, |
402 | .dot = { .min = 20000, .max = 165000 }, |
403 | .vco = { .min = 4000000, .max = 5994000}, |
403 | .vco = { .min = 4000000, .max = 5994000}, |
404 | .n = { .min = 1, .max = 7 }, |
404 | .n = { .min = 1, .max = 7 }, |
405 | .m = { .min = 60, .max = 300 }, /* guess */ |
405 | .m = { .min = 60, .max = 300 }, /* guess */ |
406 | .m1 = { .min = 2, .max = 3 }, |
406 | .m1 = { .min = 2, .max = 3 }, |
407 | .m2 = { .min = 11, .max = 156 }, |
407 | .m2 = { .min = 11, .max = 156 }, |
408 | .p = { .min = 10, .max = 30 }, |
408 | .p = { .min = 10, .max = 30 }, |
409 | .p1 = { .min = 2, .max = 3 }, |
409 | .p1 = { .min = 2, .max = 3 }, |
410 | .p2 = { .dot_limit = 270000, |
410 | .p2 = { .dot_limit = 270000, |
411 | .p2_slow = 2, .p2_fast = 20 }, |
411 | .p2_slow = 2, .p2_fast = 20 }, |
412 | .find_pll = intel_vlv_find_best_pll, |
412 | .find_pll = intel_vlv_find_best_pll, |
413 | }; |
413 | }; |
414 | 414 | ||
415 | static const intel_limit_t intel_limits_vlv_dp = { |
415 | static const intel_limit_t intel_limits_vlv_dp = { |
416 | .dot = { .min = 25000, .max = 270000 }, |
416 | .dot = { .min = 25000, .max = 270000 }, |
417 | .vco = { .min = 4000000, .max = 6000000 }, |
417 | .vco = { .min = 4000000, .max = 6000000 }, |
418 | .n = { .min = 1, .max = 7 }, |
418 | .n = { .min = 1, .max = 7 }, |
419 | .m = { .min = 22, .max = 450 }, |
419 | .m = { .min = 22, .max = 450 }, |
420 | .m1 = { .min = 2, .max = 3 }, |
420 | .m1 = { .min = 2, .max = 3 }, |
421 | .m2 = { .min = 11, .max = 156 }, |
421 | .m2 = { .min = 11, .max = 156 }, |
422 | .p = { .min = 10, .max = 30 }, |
422 | .p = { .min = 10, .max = 30 }, |
423 | .p1 = { .min = 2, .max = 3 }, |
423 | .p1 = { .min = 2, .max = 3 }, |
424 | .p2 = { .dot_limit = 270000, |
424 | .p2 = { .dot_limit = 270000, |
425 | .p2_slow = 2, .p2_fast = 20 }, |
425 | .p2_slow = 2, .p2_fast = 20 }, |
426 | .find_pll = intel_vlv_find_best_pll, |
426 | .find_pll = intel_vlv_find_best_pll, |
427 | }; |
427 | }; |
428 | 428 | ||
429 | u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) |
429 | u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) |
430 | { |
430 | { |
431 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
431 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
432 | 432 | ||
433 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
433 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
434 | DRM_ERROR("DPIO idle wait timed out\n"); |
434 | DRM_ERROR("DPIO idle wait timed out\n"); |
435 | return 0; |
435 | return 0; |
436 | } |
436 | } |
437 | 437 | ||
438 | I915_WRITE(DPIO_REG, reg); |
438 | I915_WRITE(DPIO_REG, reg); |
439 | I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | |
439 | I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID | |
440 | DPIO_BYTE); |
440 | DPIO_BYTE); |
441 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
441 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
442 | DRM_ERROR("DPIO read wait timed out\n"); |
442 | DRM_ERROR("DPIO read wait timed out\n"); |
443 | return 0; |
443 | return 0; |
444 | } |
444 | } |
445 | 445 | ||
446 | return I915_READ(DPIO_DATA); |
446 | return I915_READ(DPIO_DATA); |
447 | } |
447 | } |
448 | 448 | ||
449 | static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, |
449 | static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, |
450 | u32 val) |
450 | u32 val) |
451 | { |
451 | { |
452 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
452 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
453 | 453 | ||
454 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
454 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
455 | DRM_ERROR("DPIO idle wait timed out\n"); |
455 | DRM_ERROR("DPIO idle wait timed out\n"); |
456 | return; |
456 | return; |
457 | } |
457 | } |
458 | 458 | ||
459 | I915_WRITE(DPIO_DATA, val); |
459 | I915_WRITE(DPIO_DATA, val); |
460 | I915_WRITE(DPIO_REG, reg); |
460 | I915_WRITE(DPIO_REG, reg); |
461 | I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | |
461 | I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID | |
462 | DPIO_BYTE); |
462 | DPIO_BYTE); |
463 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) |
463 | if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) |
464 | DRM_ERROR("DPIO write wait timed out\n"); |
464 | DRM_ERROR("DPIO write wait timed out\n"); |
465 | } |
465 | } |
466 | 466 | ||
467 | static void vlv_init_dpio(struct drm_device *dev) |
467 | static void vlv_init_dpio(struct drm_device *dev) |
468 | { |
468 | { |
469 | struct drm_i915_private *dev_priv = dev->dev_private; |
469 | struct drm_i915_private *dev_priv = dev->dev_private; |
470 | 470 | ||
471 | /* Reset the DPIO config */ |
471 | /* Reset the DPIO config */ |
472 | I915_WRITE(DPIO_CTL, 0); |
472 | I915_WRITE(DPIO_CTL, 0); |
473 | POSTING_READ(DPIO_CTL); |
473 | POSTING_READ(DPIO_CTL); |
474 | I915_WRITE(DPIO_CTL, 1); |
474 | I915_WRITE(DPIO_CTL, 1); |
475 | POSTING_READ(DPIO_CTL); |
475 | POSTING_READ(DPIO_CTL); |
476 | } |
476 | } |
477 | 477 | ||
478 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
478 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
479 | int refclk) |
479 | int refclk) |
480 | { |
480 | { |
481 | struct drm_device *dev = crtc->dev; |
481 | struct drm_device *dev = crtc->dev; |
482 | const intel_limit_t *limit; |
482 | const intel_limit_t *limit; |
483 | 483 | ||
484 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
484 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
485 | if (intel_is_dual_link_lvds(dev)) { |
485 | if (intel_is_dual_link_lvds(dev)) { |
486 | /* LVDS dual channel */ |
486 | /* LVDS dual channel */ |
487 | if (refclk == 100000) |
487 | if (refclk == 100000) |
488 | limit = &intel_limits_ironlake_dual_lvds_100m; |
488 | limit = &intel_limits_ironlake_dual_lvds_100m; |
489 | else |
489 | else |
490 | limit = &intel_limits_ironlake_dual_lvds; |
490 | limit = &intel_limits_ironlake_dual_lvds; |
491 | } else { |
491 | } else { |
492 | if (refclk == 100000) |
492 | if (refclk == 100000) |
493 | limit = &intel_limits_ironlake_single_lvds_100m; |
493 | limit = &intel_limits_ironlake_single_lvds_100m; |
494 | else |
494 | else |
495 | limit = &intel_limits_ironlake_single_lvds; |
495 | limit = &intel_limits_ironlake_single_lvds; |
496 | } |
496 | } |
497 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
497 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
498 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
498 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
499 | limit = &intel_limits_ironlake_display_port; |
499 | limit = &intel_limits_ironlake_display_port; |
500 | else |
500 | else |
501 | limit = &intel_limits_ironlake_dac; |
501 | limit = &intel_limits_ironlake_dac; |
502 | 502 | ||
503 | return limit; |
503 | return limit; |
504 | } |
504 | } |
505 | 505 | ||
506 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
506 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
507 | { |
507 | { |
508 | struct drm_device *dev = crtc->dev; |
508 | struct drm_device *dev = crtc->dev; |
509 | const intel_limit_t *limit; |
509 | const intel_limit_t *limit; |
510 | 510 | ||
511 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
511 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
512 | if (intel_is_dual_link_lvds(dev)) |
512 | if (intel_is_dual_link_lvds(dev)) |
513 | /* LVDS with dual channel */ |
513 | /* LVDS with dual channel */ |
514 | limit = &intel_limits_g4x_dual_channel_lvds; |
514 | limit = &intel_limits_g4x_dual_channel_lvds; |
515 | else |
515 | else |
516 | /* LVDS with dual channel */ |
516 | /* LVDS with dual channel */ |
517 | limit = &intel_limits_g4x_single_channel_lvds; |
517 | limit = &intel_limits_g4x_single_channel_lvds; |
518 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
518 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
519 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
519 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
520 | limit = &intel_limits_g4x_hdmi; |
520 | limit = &intel_limits_g4x_hdmi; |
521 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
521 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
522 | limit = &intel_limits_g4x_sdvo; |
522 | limit = &intel_limits_g4x_sdvo; |
523 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
523 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
524 | limit = &intel_limits_g4x_display_port; |
524 | limit = &intel_limits_g4x_display_port; |
525 | } else /* The option is for other outputs */ |
525 | } else /* The option is for other outputs */ |
526 | limit = &intel_limits_i9xx_sdvo; |
526 | limit = &intel_limits_i9xx_sdvo; |
527 | 527 | ||
528 | return limit; |
528 | return limit; |
529 | } |
529 | } |
530 | 530 | ||
531 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
531 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
532 | { |
532 | { |
533 | struct drm_device *dev = crtc->dev; |
533 | struct drm_device *dev = crtc->dev; |
534 | const intel_limit_t *limit; |
534 | const intel_limit_t *limit; |
535 | 535 | ||
536 | if (HAS_PCH_SPLIT(dev)) |
536 | if (HAS_PCH_SPLIT(dev)) |
537 | limit = intel_ironlake_limit(crtc, refclk); |
537 | limit = intel_ironlake_limit(crtc, refclk); |
538 | else if (IS_G4X(dev)) { |
538 | else if (IS_G4X(dev)) { |
539 | limit = intel_g4x_limit(crtc); |
539 | limit = intel_g4x_limit(crtc); |
540 | } else if (IS_PINEVIEW(dev)) { |
540 | } else if (IS_PINEVIEW(dev)) { |
541 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
541 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
542 | limit = &intel_limits_pineview_lvds; |
542 | limit = &intel_limits_pineview_lvds; |
543 | else |
543 | else |
544 | limit = &intel_limits_pineview_sdvo; |
544 | limit = &intel_limits_pineview_sdvo; |
545 | } else if (IS_VALLEYVIEW(dev)) { |
545 | } else if (IS_VALLEYVIEW(dev)) { |
546 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) |
546 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) |
547 | limit = &intel_limits_vlv_dac; |
547 | limit = &intel_limits_vlv_dac; |
548 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) |
548 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) |
549 | limit = &intel_limits_vlv_hdmi; |
549 | limit = &intel_limits_vlv_hdmi; |
550 | else |
550 | else |
551 | limit = &intel_limits_vlv_dp; |
551 | limit = &intel_limits_vlv_dp; |
552 | } else if (!IS_GEN2(dev)) { |
552 | } else if (!IS_GEN2(dev)) { |
553 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
553 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
554 | limit = &intel_limits_i9xx_lvds; |
554 | limit = &intel_limits_i9xx_lvds; |
555 | else |
555 | else |
556 | limit = &intel_limits_i9xx_sdvo; |
556 | limit = &intel_limits_i9xx_sdvo; |
557 | } else { |
557 | } else { |
558 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
558 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
559 | limit = &intel_limits_i8xx_lvds; |
559 | limit = &intel_limits_i8xx_lvds; |
560 | else |
560 | else |
561 | limit = &intel_limits_i8xx_dvo; |
561 | limit = &intel_limits_i8xx_dvo; |
562 | } |
562 | } |
563 | return limit; |
563 | return limit; |
564 | } |
564 | } |
565 | 565 | ||
566 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
566 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
567 | static void pineview_clock(int refclk, intel_clock_t *clock) |
567 | static void pineview_clock(int refclk, intel_clock_t *clock) |
568 | { |
568 | { |
569 | clock->m = clock->m2 + 2; |
569 | clock->m = clock->m2 + 2; |
570 | clock->p = clock->p1 * clock->p2; |
570 | clock->p = clock->p1 * clock->p2; |
571 | clock->vco = refclk * clock->m / clock->n; |
571 | clock->vco = refclk * clock->m / clock->n; |
572 | clock->dot = clock->vco / clock->p; |
572 | clock->dot = clock->vco / clock->p; |
573 | } |
573 | } |
574 | 574 | ||
575 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) |
575 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) |
576 | { |
576 | { |
577 | if (IS_PINEVIEW(dev)) { |
577 | if (IS_PINEVIEW(dev)) { |
578 | pineview_clock(refclk, clock); |
578 | pineview_clock(refclk, clock); |
579 | return; |
579 | return; |
580 | } |
580 | } |
581 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
581 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
582 | clock->p = clock->p1 * clock->p2; |
582 | clock->p = clock->p1 * clock->p2; |
583 | clock->vco = refclk * clock->m / (clock->n + 2); |
583 | clock->vco = refclk * clock->m / (clock->n + 2); |
584 | clock->dot = clock->vco / clock->p; |
584 | clock->dot = clock->vco / clock->p; |
585 | } |
585 | } |
586 | 586 | ||
587 | /** |
587 | /** |
588 | * Returns whether any output on the specified pipe is of the specified type |
588 | * Returns whether any output on the specified pipe is of the specified type |
589 | */ |
589 | */ |
590 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
590 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
591 | { |
591 | { |
592 | struct drm_device *dev = crtc->dev; |
592 | struct drm_device *dev = crtc->dev; |
593 | struct intel_encoder *encoder; |
593 | struct intel_encoder *encoder; |
594 | 594 | ||
595 | for_each_encoder_on_crtc(dev, crtc, encoder) |
595 | for_each_encoder_on_crtc(dev, crtc, encoder) |
596 | if (encoder->type == type) |
596 | if (encoder->type == type) |
597 | return true; |
597 | return true; |
598 | 598 | ||
599 | return false; |
599 | return false; |
600 | } |
600 | } |
601 | 601 | ||
602 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
602 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
603 | /** |
603 | /** |
604 | * Returns whether the given set of divisors are valid for a given refclk with |
604 | * Returns whether the given set of divisors are valid for a given refclk with |
605 | * the given connectors. |
605 | * the given connectors. |
606 | */ |
606 | */ |
607 | 607 | ||
608 | static bool intel_PLL_is_valid(struct drm_device *dev, |
608 | static bool intel_PLL_is_valid(struct drm_device *dev, |
609 | const intel_limit_t *limit, |
609 | const intel_limit_t *limit, |
610 | const intel_clock_t *clock) |
610 | const intel_clock_t *clock) |
611 | { |
611 | { |
612 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
612 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
613 | INTELPllInvalid("p1 out of range\n"); |
613 | INTELPllInvalid("p1 out of range\n"); |
614 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
614 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
615 | INTELPllInvalid("p out of range\n"); |
615 | INTELPllInvalid("p out of range\n"); |
616 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
616 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
617 | INTELPllInvalid("m2 out of range\n"); |
617 | INTELPllInvalid("m2 out of range\n"); |
618 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
618 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
619 | INTELPllInvalid("m1 out of range\n"); |
619 | INTELPllInvalid("m1 out of range\n"); |
620 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
620 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
621 | INTELPllInvalid("m1 <= m2\n"); |
621 | INTELPllInvalid("m1 <= m2\n"); |
622 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
622 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
623 | INTELPllInvalid("m out of range\n"); |
623 | INTELPllInvalid("m out of range\n"); |
624 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
624 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
625 | INTELPllInvalid("n out of range\n"); |
625 | INTELPllInvalid("n out of range\n"); |
626 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
626 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
627 | INTELPllInvalid("vco out of range\n"); |
627 | INTELPllInvalid("vco out of range\n"); |
628 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
628 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
629 | * connector, etc., rather than just a single range. |
629 | * connector, etc., rather than just a single range. |
630 | */ |
630 | */ |
631 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
631 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
632 | INTELPllInvalid("dot out of range\n"); |
632 | INTELPllInvalid("dot out of range\n"); |
633 | 633 | ||
634 | return true; |
634 | return true; |
635 | } |
635 | } |
636 | 636 | ||
637 | static bool |
637 | static bool |
638 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
638 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
639 | int target, int refclk, intel_clock_t *match_clock, |
639 | int target, int refclk, intel_clock_t *match_clock, |
640 | intel_clock_t *best_clock) |
640 | intel_clock_t *best_clock) |
641 | 641 | ||
642 | { |
642 | { |
643 | struct drm_device *dev = crtc->dev; |
643 | struct drm_device *dev = crtc->dev; |
644 | intel_clock_t clock; |
644 | intel_clock_t clock; |
645 | int err = target; |
645 | int err = target; |
646 | 646 | ||
647 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
647 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
648 | /* |
648 | /* |
649 | * For LVDS just rely on its current settings for dual-channel. |
649 | * For LVDS just rely on its current settings for dual-channel. |
650 | * We haven't figured out how to reliably set up different |
650 | * We haven't figured out how to reliably set up different |
651 | * single/dual channel state, if we even can. |
651 | * single/dual channel state, if we even can. |
652 | */ |
652 | */ |
653 | if (intel_is_dual_link_lvds(dev)) |
653 | if (intel_is_dual_link_lvds(dev)) |
654 | clock.p2 = limit->p2.p2_fast; |
654 | clock.p2 = limit->p2.p2_fast; |
655 | else |
655 | else |
656 | clock.p2 = limit->p2.p2_slow; |
656 | clock.p2 = limit->p2.p2_slow; |
657 | } else { |
657 | } else { |
658 | if (target < limit->p2.dot_limit) |
658 | if (target < limit->p2.dot_limit) |
659 | clock.p2 = limit->p2.p2_slow; |
659 | clock.p2 = limit->p2.p2_slow; |
660 | else |
660 | else |
661 | clock.p2 = limit->p2.p2_fast; |
661 | clock.p2 = limit->p2.p2_fast; |
662 | } |
662 | } |
663 | 663 | ||
664 | memset(best_clock, 0, sizeof(*best_clock)); |
664 | memset(best_clock, 0, sizeof(*best_clock)); |
665 | 665 | ||
666 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
666 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
667 | clock.m1++) { |
667 | clock.m1++) { |
668 | for (clock.m2 = limit->m2.min; |
668 | for (clock.m2 = limit->m2.min; |
669 | clock.m2 <= limit->m2.max; clock.m2++) { |
669 | clock.m2 <= limit->m2.max; clock.m2++) { |
670 | /* m1 is always 0 in Pineview */ |
670 | /* m1 is always 0 in Pineview */ |
671 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
671 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
672 | break; |
672 | break; |
673 | for (clock.n = limit->n.min; |
673 | for (clock.n = limit->n.min; |
674 | clock.n <= limit->n.max; clock.n++) { |
674 | clock.n <= limit->n.max; clock.n++) { |
675 | for (clock.p1 = limit->p1.min; |
675 | for (clock.p1 = limit->p1.min; |
676 | clock.p1 <= limit->p1.max; clock.p1++) { |
676 | clock.p1 <= limit->p1.max; clock.p1++) { |
677 | int this_err; |
677 | int this_err; |
678 | 678 | ||
679 | intel_clock(dev, refclk, &clock); |
679 | intel_clock(dev, refclk, &clock); |
680 | if (!intel_PLL_is_valid(dev, limit, |
680 | if (!intel_PLL_is_valid(dev, limit, |
681 | &clock)) |
681 | &clock)) |
682 | continue; |
682 | continue; |
683 | if (match_clock && |
683 | if (match_clock && |
684 | clock.p != match_clock->p) |
684 | clock.p != match_clock->p) |
685 | continue; |
685 | continue; |
686 | 686 | ||
687 | this_err = abs(clock.dot - target); |
687 | this_err = abs(clock.dot - target); |
688 | if (this_err < err) { |
688 | if (this_err < err) { |
689 | *best_clock = clock; |
689 | *best_clock = clock; |
690 | err = this_err; |
690 | err = this_err; |
691 | } |
691 | } |
692 | } |
692 | } |
693 | } |
693 | } |
694 | } |
694 | } |
695 | } |
695 | } |
696 | 696 | ||
697 | return (err != target); |
697 | return (err != target); |
698 | } |
698 | } |
699 | 699 | ||
700 | static bool |
700 | static bool |
701 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
701 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
702 | int target, int refclk, intel_clock_t *match_clock, |
702 | int target, int refclk, intel_clock_t *match_clock, |
703 | intel_clock_t *best_clock) |
703 | intel_clock_t *best_clock) |
704 | { |
704 | { |
705 | struct drm_device *dev = crtc->dev; |
705 | struct drm_device *dev = crtc->dev; |
706 | intel_clock_t clock; |
706 | intel_clock_t clock; |
707 | int max_n; |
707 | int max_n; |
708 | bool found; |
708 | bool found; |
709 | /* approximately equals target * 0.00585 */ |
709 | /* approximately equals target * 0.00585 */ |
710 | int err_most = (target >> 8) + (target >> 9); |
710 | int err_most = (target >> 8) + (target >> 9); |
711 | found = false; |
711 | found = false; |
712 | 712 | ||
713 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
713 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
714 | int lvds_reg; |
714 | int lvds_reg; |
715 | 715 | ||
716 | if (HAS_PCH_SPLIT(dev)) |
716 | if (HAS_PCH_SPLIT(dev)) |
717 | lvds_reg = PCH_LVDS; |
717 | lvds_reg = PCH_LVDS; |
718 | else |
718 | else |
719 | lvds_reg = LVDS; |
719 | lvds_reg = LVDS; |
720 | if (intel_is_dual_link_lvds(dev)) |
720 | if (intel_is_dual_link_lvds(dev)) |
721 | clock.p2 = limit->p2.p2_fast; |
721 | clock.p2 = limit->p2.p2_fast; |
722 | else |
722 | else |
723 | clock.p2 = limit->p2.p2_slow; |
723 | clock.p2 = limit->p2.p2_slow; |
724 | } else { |
724 | } else { |
725 | if (target < limit->p2.dot_limit) |
725 | if (target < limit->p2.dot_limit) |
726 | clock.p2 = limit->p2.p2_slow; |
726 | clock.p2 = limit->p2.p2_slow; |
727 | else |
727 | else |
728 | clock.p2 = limit->p2.p2_fast; |
728 | clock.p2 = limit->p2.p2_fast; |
729 | } |
729 | } |
730 | 730 | ||
731 | memset(best_clock, 0, sizeof(*best_clock)); |
731 | memset(best_clock, 0, sizeof(*best_clock)); |
732 | max_n = limit->n.max; |
732 | max_n = limit->n.max; |
733 | /* based on hardware requirement, prefer smaller n to precision */ |
733 | /* based on hardware requirement, prefer smaller n to precision */ |
734 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
734 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
735 | /* based on hardware requirement, prefere larger m1,m2 */ |
735 | /* based on hardware requirement, prefere larger m1,m2 */ |
736 | for (clock.m1 = limit->m1.max; |
736 | for (clock.m1 = limit->m1.max; |
737 | clock.m1 >= limit->m1.min; clock.m1--) { |
737 | clock.m1 >= limit->m1.min; clock.m1--) { |
738 | for (clock.m2 = limit->m2.max; |
738 | for (clock.m2 = limit->m2.max; |
739 | clock.m2 >= limit->m2.min; clock.m2--) { |
739 | clock.m2 >= limit->m2.min; clock.m2--) { |
740 | for (clock.p1 = limit->p1.max; |
740 | for (clock.p1 = limit->p1.max; |
741 | clock.p1 >= limit->p1.min; clock.p1--) { |
741 | clock.p1 >= limit->p1.min; clock.p1--) { |
742 | int this_err; |
742 | int this_err; |
743 | 743 | ||
744 | intel_clock(dev, refclk, &clock); |
744 | intel_clock(dev, refclk, &clock); |
745 | if (!intel_PLL_is_valid(dev, limit, |
745 | if (!intel_PLL_is_valid(dev, limit, |
746 | &clock)) |
746 | &clock)) |
747 | continue; |
747 | continue; |
748 | if (match_clock && |
748 | if (match_clock && |
749 | clock.p != match_clock->p) |
749 | clock.p != match_clock->p) |
750 | continue; |
750 | continue; |
751 | 751 | ||
752 | this_err = abs(clock.dot - target); |
752 | this_err = abs(clock.dot - target); |
753 | if (this_err < err_most) { |
753 | if (this_err < err_most) { |
754 | *best_clock = clock; |
754 | *best_clock = clock; |
755 | err_most = this_err; |
755 | err_most = this_err; |
756 | max_n = clock.n; |
756 | max_n = clock.n; |
757 | found = true; |
757 | found = true; |
758 | } |
758 | } |
759 | } |
759 | } |
760 | } |
760 | } |
761 | } |
761 | } |
762 | } |
762 | } |
763 | return found; |
763 | return found; |
764 | } |
764 | } |
765 | 765 | ||
766 | static bool |
766 | static bool |
767 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
767 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
768 | int target, int refclk, intel_clock_t *match_clock, |
768 | int target, int refclk, intel_clock_t *match_clock, |
769 | intel_clock_t *best_clock) |
769 | intel_clock_t *best_clock) |
770 | { |
770 | { |
771 | struct drm_device *dev = crtc->dev; |
771 | struct drm_device *dev = crtc->dev; |
772 | intel_clock_t clock; |
772 | intel_clock_t clock; |
773 | 773 | ||
774 | if (target < 200000) { |
774 | if (target < 200000) { |
775 | clock.n = 1; |
775 | clock.n = 1; |
776 | clock.p1 = 2; |
776 | clock.p1 = 2; |
777 | clock.p2 = 10; |
777 | clock.p2 = 10; |
778 | clock.m1 = 12; |
778 | clock.m1 = 12; |
779 | clock.m2 = 9; |
779 | clock.m2 = 9; |
780 | } else { |
780 | } else { |
781 | clock.n = 2; |
781 | clock.n = 2; |
782 | clock.p1 = 1; |
782 | clock.p1 = 1; |
783 | clock.p2 = 10; |
783 | clock.p2 = 10; |
784 | clock.m1 = 14; |
784 | clock.m1 = 14; |
785 | clock.m2 = 8; |
785 | clock.m2 = 8; |
786 | } |
786 | } |
787 | intel_clock(dev, refclk, &clock); |
787 | intel_clock(dev, refclk, &clock); |
788 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
788 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
789 | return true; |
789 | return true; |
790 | } |
790 | } |
791 | 791 | ||
792 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
792 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
793 | static bool |
793 | static bool |
794 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
794 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
795 | int target, int refclk, intel_clock_t *match_clock, |
795 | int target, int refclk, intel_clock_t *match_clock, |
796 | intel_clock_t *best_clock) |
796 | intel_clock_t *best_clock) |
797 | { |
797 | { |
798 | intel_clock_t clock; |
798 | intel_clock_t clock; |
799 | if (target < 200000) { |
799 | if (target < 200000) { |
800 | clock.p1 = 2; |
800 | clock.p1 = 2; |
801 | clock.p2 = 10; |
801 | clock.p2 = 10; |
802 | clock.n = 2; |
802 | clock.n = 2; |
803 | clock.m1 = 23; |
803 | clock.m1 = 23; |
804 | clock.m2 = 8; |
804 | clock.m2 = 8; |
805 | } else { |
805 | } else { |
806 | clock.p1 = 1; |
806 | clock.p1 = 1; |
807 | clock.p2 = 10; |
807 | clock.p2 = 10; |
808 | clock.n = 1; |
808 | clock.n = 1; |
809 | clock.m1 = 14; |
809 | clock.m1 = 14; |
810 | clock.m2 = 2; |
810 | clock.m2 = 2; |
811 | } |
811 | } |
812 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
812 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
813 | clock.p = (clock.p1 * clock.p2); |
813 | clock.p = (clock.p1 * clock.p2); |
814 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
814 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
815 | clock.vco = 0; |
815 | clock.vco = 0; |
816 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
816 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
817 | return true; |
817 | return true; |
818 | } |
818 | } |
819 | static bool |
819 | static bool |
820 | intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, |
820 | intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, |
821 | int target, int refclk, intel_clock_t *match_clock, |
821 | int target, int refclk, intel_clock_t *match_clock, |
822 | intel_clock_t *best_clock) |
822 | intel_clock_t *best_clock) |
823 | { |
823 | { |
824 | u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; |
824 | u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; |
825 | u32 m, n, fastclk; |
825 | u32 m, n, fastclk; |
826 | u32 updrate, minupdate, fracbits, p; |
826 | u32 updrate, minupdate, fracbits, p; |
827 | unsigned long bestppm, ppm, absppm; |
827 | unsigned long bestppm, ppm, absppm; |
828 | int dotclk, flag; |
828 | int dotclk, flag; |
829 | 829 | ||
830 | flag = 0; |
830 | flag = 0; |
831 | dotclk = target * 1000; |
831 | dotclk = target * 1000; |
832 | bestppm = 1000000; |
832 | bestppm = 1000000; |
833 | ppm = absppm = 0; |
833 | ppm = absppm = 0; |
834 | fastclk = dotclk / (2*100); |
834 | fastclk = dotclk / (2*100); |
835 | updrate = 0; |
835 | updrate = 0; |
836 | minupdate = 19200; |
836 | minupdate = 19200; |
837 | fracbits = 1; |
837 | fracbits = 1; |
838 | n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; |
838 | n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; |
839 | bestm1 = bestm2 = bestp1 = bestp2 = 0; |
839 | bestm1 = bestm2 = bestp1 = bestp2 = 0; |
840 | 840 | ||
841 | /* based on hardware requirement, prefer smaller n to precision */ |
841 | /* based on hardware requirement, prefer smaller n to precision */ |
842 | for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { |
842 | for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { |
843 | updrate = refclk / n; |
843 | updrate = refclk / n; |
844 | for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { |
844 | for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { |
845 | for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { |
845 | for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { |
846 | if (p2 > 10) |
846 | if (p2 > 10) |
847 | p2 = p2 - 1; |
847 | p2 = p2 - 1; |
848 | p = p1 * p2; |
848 | p = p1 * p2; |
849 | /* based on hardware requirement, prefer bigger m1,m2 values */ |
849 | /* based on hardware requirement, prefer bigger m1,m2 values */ |
850 | for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { |
850 | for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { |
851 | m2 = (((2*(fastclk * p * n / m1 )) + |
851 | m2 = (((2*(fastclk * p * n / m1 )) + |
852 | refclk) / (2*refclk)); |
852 | refclk) / (2*refclk)); |
853 | m = m1 * m2; |
853 | m = m1 * m2; |
854 | vco = updrate * m; |
854 | vco = updrate * m; |
855 | if (vco >= limit->vco.min && vco < limit->vco.max) { |
855 | if (vco >= limit->vco.min && vco < limit->vco.max) { |
856 | ppm = 1000000 * ((vco / p) - fastclk) / fastclk; |
856 | ppm = 1000000 * ((vco / p) - fastclk) / fastclk; |
857 | absppm = (ppm > 0) ? ppm : (-ppm); |
857 | absppm = (ppm > 0) ? ppm : (-ppm); |
858 | if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { |
858 | if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { |
859 | bestppm = 0; |
859 | bestppm = 0; |
860 | flag = 1; |
860 | flag = 1; |
861 | } |
861 | } |
862 | if (absppm < bestppm - 10) { |
862 | if (absppm < bestppm - 10) { |
863 | bestppm = absppm; |
863 | bestppm = absppm; |
864 | flag = 1; |
864 | flag = 1; |
865 | } |
865 | } |
866 | if (flag) { |
866 | if (flag) { |
867 | bestn = n; |
867 | bestn = n; |
868 | bestm1 = m1; |
868 | bestm1 = m1; |
869 | bestm2 = m2; |
869 | bestm2 = m2; |
870 | bestp1 = p1; |
870 | bestp1 = p1; |
871 | bestp2 = p2; |
871 | bestp2 = p2; |
872 | flag = 0; |
872 | flag = 0; |
873 | } |
873 | } |
874 | } |
874 | } |
875 | } |
875 | } |
876 | } |
876 | } |
877 | } |
877 | } |
878 | } |
878 | } |
879 | best_clock->n = bestn; |
879 | best_clock->n = bestn; |
880 | best_clock->m1 = bestm1; |
880 | best_clock->m1 = bestm1; |
881 | best_clock->m2 = bestm2; |
881 | best_clock->m2 = bestm2; |
882 | best_clock->p1 = bestp1; |
882 | best_clock->p1 = bestp1; |
883 | best_clock->p2 = bestp2; |
883 | best_clock->p2 = bestp2; |
884 | 884 | ||
885 | return true; |
885 | return true; |
886 | } |
886 | } |
887 | 887 | ||
888 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
888 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
889 | enum pipe pipe) |
889 | enum pipe pipe) |
890 | { |
890 | { |
891 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
891 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
892 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
892 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
893 | 893 | ||
894 | return intel_crtc->cpu_transcoder; |
894 | return intel_crtc->cpu_transcoder; |
895 | } |
895 | } |
896 | 896 | ||
897 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) |
897 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) |
898 | { |
898 | { |
899 | struct drm_i915_private *dev_priv = dev->dev_private; |
899 | struct drm_i915_private *dev_priv = dev->dev_private; |
900 | u32 frame, frame_reg = PIPEFRAME(pipe); |
900 | u32 frame, frame_reg = PIPEFRAME(pipe); |
901 | 901 | ||
902 | frame = I915_READ(frame_reg); |
902 | frame = I915_READ(frame_reg); |
903 | 903 | ||
904 | if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) |
904 | if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) |
905 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
905 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
906 | } |
906 | } |
907 | 907 | ||
908 | /** |
908 | /** |
909 | * intel_wait_for_vblank - wait for vblank on a given pipe |
909 | * intel_wait_for_vblank - wait for vblank on a given pipe |
910 | * @dev: drm device |
910 | * @dev: drm device |
911 | * @pipe: pipe to wait for |
911 | * @pipe: pipe to wait for |
912 | * |
912 | * |
913 | * Wait for vblank to occur on a given pipe. Needed for various bits of |
913 | * Wait for vblank to occur on a given pipe. Needed for various bits of |
914 | * mode setting code. |
914 | * mode setting code. |
915 | */ |
915 | */ |
916 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
916 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
917 | { |
917 | { |
918 | struct drm_i915_private *dev_priv = dev->dev_private; |
918 | struct drm_i915_private *dev_priv = dev->dev_private; |
919 | int pipestat_reg = PIPESTAT(pipe); |
919 | int pipestat_reg = PIPESTAT(pipe); |
920 | 920 | ||
921 | if (INTEL_INFO(dev)->gen >= 5) { |
921 | if (INTEL_INFO(dev)->gen >= 5) { |
922 | ironlake_wait_for_vblank(dev, pipe); |
922 | ironlake_wait_for_vblank(dev, pipe); |
923 | return; |
923 | return; |
924 | } |
924 | } |
925 | 925 | ||
926 | /* Clear existing vblank status. Note this will clear any other |
926 | /* Clear existing vblank status. Note this will clear any other |
927 | * sticky status fields as well. |
927 | * sticky status fields as well. |
928 | * |
928 | * |
929 | * This races with i915_driver_irq_handler() with the result |
929 | * This races with i915_driver_irq_handler() with the result |
930 | * that either function could miss a vblank event. Here it is not |
930 | * that either function could miss a vblank event. Here it is not |
931 | * fatal, as we will either wait upon the next vblank interrupt or |
931 | * fatal, as we will either wait upon the next vblank interrupt or |
932 | * timeout. Generally speaking intel_wait_for_vblank() is only |
932 | * timeout. Generally speaking intel_wait_for_vblank() is only |
933 | * called during modeset at which time the GPU should be idle and |
933 | * called during modeset at which time the GPU should be idle and |
934 | * should *not* be performing page flips and thus not waiting on |
934 | * should *not* be performing page flips and thus not waiting on |
935 | * vblanks... |
935 | * vblanks... |
936 | * Currently, the result of us stealing a vblank from the irq |
936 | * Currently, the result of us stealing a vblank from the irq |
937 | * handler is that a single frame will be skipped during swapbuffers. |
937 | * handler is that a single frame will be skipped during swapbuffers. |
938 | */ |
938 | */ |
939 | I915_WRITE(pipestat_reg, |
939 | I915_WRITE(pipestat_reg, |
940 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
940 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
941 | 941 | ||
942 | /* Wait for vblank interrupt bit to set */ |
942 | /* Wait for vblank interrupt bit to set */ |
943 | if (wait_for(I915_READ(pipestat_reg) & |
943 | if (wait_for(I915_READ(pipestat_reg) & |
944 | PIPE_VBLANK_INTERRUPT_STATUS, |
944 | PIPE_VBLANK_INTERRUPT_STATUS, |
945 | 50)) |
945 | 50)) |
946 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
946 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
947 | } |
947 | } |
948 | 948 | ||
949 | /* |
949 | /* |
950 | * intel_wait_for_pipe_off - wait for pipe to turn off |
950 | * intel_wait_for_pipe_off - wait for pipe to turn off |
951 | * @dev: drm device |
951 | * @dev: drm device |
952 | * @pipe: pipe to wait for |
952 | * @pipe: pipe to wait for |
953 | * |
953 | * |
954 | * After disabling a pipe, we can't wait for vblank in the usual way, |
954 | * After disabling a pipe, we can't wait for vblank in the usual way, |
955 | * spinning on the vblank interrupt status bit, since we won't actually |
955 | * spinning on the vblank interrupt status bit, since we won't actually |
956 | * see an interrupt when the pipe is disabled. |
956 | * see an interrupt when the pipe is disabled. |
957 | * |
957 | * |
958 | * On Gen4 and above: |
958 | * On Gen4 and above: |
959 | * wait for the pipe register state bit to turn off |
959 | * wait for the pipe register state bit to turn off |
960 | * |
960 | * |
961 | * Otherwise: |
961 | * Otherwise: |
962 | * wait for the display line value to settle (it usually |
962 | * wait for the display line value to settle (it usually |
963 | * ends up stopping at the start of the next frame). |
963 | * ends up stopping at the start of the next frame). |
964 | * |
964 | * |
965 | */ |
965 | */ |
966 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
966 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
967 | { |
967 | { |
968 | struct drm_i915_private *dev_priv = dev->dev_private; |
968 | struct drm_i915_private *dev_priv = dev->dev_private; |
969 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
969 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
970 | pipe); |
970 | pipe); |
971 | 971 | ||
972 | if (INTEL_INFO(dev)->gen >= 4) { |
972 | if (INTEL_INFO(dev)->gen >= 4) { |
973 | int reg = PIPECONF(cpu_transcoder); |
973 | int reg = PIPECONF(cpu_transcoder); |
974 | 974 | ||
975 | /* Wait for the Pipe State to go off */ |
975 | /* Wait for the Pipe State to go off */ |
976 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
976 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
977 | 100)) |
977 | 100)) |
978 | WARN(1, "pipe_off wait timed out\n"); |
978 | WARN(1, "pipe_off wait timed out\n"); |
979 | } else { |
979 | } else { |
980 | u32 last_line, line_mask; |
980 | u32 last_line, line_mask; |
981 | int reg = PIPEDSL(pipe); |
981 | int reg = PIPEDSL(pipe); |
982 | unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100); |
982 | unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100); |
983 | 983 | ||
984 | if (IS_GEN2(dev)) |
984 | if (IS_GEN2(dev)) |
985 | line_mask = DSL_LINEMASK_GEN2; |
985 | line_mask = DSL_LINEMASK_GEN2; |
986 | else |
986 | else |
987 | line_mask = DSL_LINEMASK_GEN3; |
987 | line_mask = DSL_LINEMASK_GEN3; |
988 | 988 | ||
989 | /* Wait for the display line to settle */ |
989 | /* Wait for the display line to settle */ |
990 | do { |
990 | do { |
991 | last_line = I915_READ(reg) & line_mask; |
991 | last_line = I915_READ(reg) & line_mask; |
992 | mdelay(5); |
992 | mdelay(5); |
993 | } while (((I915_READ(reg) & line_mask) != last_line) && |
993 | } while (((I915_READ(reg) & line_mask) != last_line) && |
994 | time_after(timeout, GetTimerTicks())); |
994 | time_after(timeout, GetTimerTicks())); |
995 | if (time_after(GetTimerTicks(), timeout)) |
995 | if (time_after(GetTimerTicks(), timeout)) |
996 | WARN(1, "pipe_off wait timed out\n"); |
996 | WARN(1, "pipe_off wait timed out\n"); |
997 | } |
997 | } |
998 | } |
998 | } |
999 | 999 | ||
1000 | /* |
1000 | /* |
1001 | * ibx_digital_port_connected - is the specified port connected? |
1001 | * ibx_digital_port_connected - is the specified port connected? |
1002 | * @dev_priv: i915 private structure |
1002 | * @dev_priv: i915 private structure |
1003 | * @port: the port to test |
1003 | * @port: the port to test |
1004 | * |
1004 | * |
1005 | * Returns true if @port is connected, false otherwise. |
1005 | * Returns true if @port is connected, false otherwise. |
1006 | */ |
1006 | */ |
1007 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
1007 | bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
1008 | struct intel_digital_port *port) |
1008 | struct intel_digital_port *port) |
1009 | { |
1009 | { |
1010 | u32 bit; |
1010 | u32 bit; |
1011 | 1011 | ||
1012 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1012 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1013 | switch(port->port) { |
1013 | switch(port->port) { |
1014 | case PORT_B: |
1014 | case PORT_B: |
1015 | bit = SDE_PORTB_HOTPLUG; |
1015 | bit = SDE_PORTB_HOTPLUG; |
1016 | break; |
1016 | break; |
1017 | case PORT_C: |
1017 | case PORT_C: |
1018 | bit = SDE_PORTC_HOTPLUG; |
1018 | bit = SDE_PORTC_HOTPLUG; |
1019 | break; |
1019 | break; |
1020 | case PORT_D: |
1020 | case PORT_D: |
1021 | bit = SDE_PORTD_HOTPLUG; |
1021 | bit = SDE_PORTD_HOTPLUG; |
1022 | break; |
1022 | break; |
1023 | default: |
1023 | default: |
1024 | return true; |
1024 | return true; |
1025 | } |
1025 | } |
1026 | } else { |
1026 | } else { |
1027 | switch(port->port) { |
1027 | switch(port->port) { |
1028 | case PORT_B: |
1028 | case PORT_B: |
1029 | bit = SDE_PORTB_HOTPLUG_CPT; |
1029 | bit = SDE_PORTB_HOTPLUG_CPT; |
1030 | break; |
1030 | break; |
1031 | case PORT_C: |
1031 | case PORT_C: |
1032 | bit = SDE_PORTC_HOTPLUG_CPT; |
1032 | bit = SDE_PORTC_HOTPLUG_CPT; |
1033 | break; |
1033 | break; |
1034 | case PORT_D: |
1034 | case PORT_D: |
1035 | bit = SDE_PORTD_HOTPLUG_CPT; |
1035 | bit = SDE_PORTD_HOTPLUG_CPT; |
1036 | break; |
1036 | break; |
1037 | default: |
1037 | default: |
1038 | return true; |
1038 | return true; |
1039 | } |
1039 | } |
1040 | } |
1040 | } |
1041 | 1041 | ||
1042 | return I915_READ(SDEISR) & bit; |
1042 | return I915_READ(SDEISR) & bit; |
1043 | } |
1043 | } |
1044 | 1044 | ||
1045 | static const char *state_string(bool enabled) |
1045 | static const char *state_string(bool enabled) |
1046 | { |
1046 | { |
1047 | return enabled ? "on" : "off"; |
1047 | return enabled ? "on" : "off"; |
1048 | } |
1048 | } |
1049 | 1049 | ||
1050 | /* Only for pre-ILK configs */ |
1050 | /* Only for pre-ILK configs */ |
1051 | static void assert_pll(struct drm_i915_private *dev_priv, |
1051 | static void assert_pll(struct drm_i915_private *dev_priv, |
1052 | enum pipe pipe, bool state) |
1052 | enum pipe pipe, bool state) |
1053 | { |
1053 | { |
1054 | int reg; |
1054 | int reg; |
1055 | u32 val; |
1055 | u32 val; |
1056 | bool cur_state; |
1056 | bool cur_state; |
1057 | 1057 | ||
1058 | reg = DPLL(pipe); |
1058 | reg = DPLL(pipe); |
1059 | val = I915_READ(reg); |
1059 | val = I915_READ(reg); |
1060 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1060 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1061 | WARN(cur_state != state, |
1061 | WARN(cur_state != state, |
1062 | "PLL state assertion failure (expected %s, current %s)\n", |
1062 | "PLL state assertion failure (expected %s, current %s)\n", |
1063 | state_string(state), state_string(cur_state)); |
1063 | state_string(state), state_string(cur_state)); |
1064 | } |
1064 | } |
1065 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) |
1065 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) |
1066 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) |
1066 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) |
1067 | 1067 | ||
1068 | /* For ILK+ */ |
1068 | /* For ILK+ */ |
1069 | static void assert_pch_pll(struct drm_i915_private *dev_priv, |
1069 | static void assert_pch_pll(struct drm_i915_private *dev_priv, |
1070 | struct intel_pch_pll *pll, |
1070 | struct intel_pch_pll *pll, |
1071 | struct intel_crtc *crtc, |
1071 | struct intel_crtc *crtc, |
1072 | bool state) |
1072 | bool state) |
1073 | { |
1073 | { |
1074 | u32 val; |
1074 | u32 val; |
1075 | bool cur_state; |
1075 | bool cur_state; |
1076 | 1076 | ||
1077 | if (HAS_PCH_LPT(dev_priv->dev)) { |
1077 | if (HAS_PCH_LPT(dev_priv->dev)) { |
1078 | DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); |
1078 | DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); |
1079 | return; |
1079 | return; |
1080 | } |
1080 | } |
1081 | 1081 | ||
1082 | if (WARN (!pll, |
1082 | if (WARN (!pll, |
1083 | "asserting PCH PLL %s with no PLL\n", state_string(state))) |
1083 | "asserting PCH PLL %s with no PLL\n", state_string(state))) |
1084 | return; |
1084 | return; |
1085 | 1085 | ||
1086 | val = I915_READ(pll->pll_reg); |
1086 | val = I915_READ(pll->pll_reg); |
1087 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1087 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1088 | WARN(cur_state != state, |
1088 | WARN(cur_state != state, |
1089 | "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", |
1089 | "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", |
1090 | pll->pll_reg, state_string(state), state_string(cur_state), val); |
1090 | pll->pll_reg, state_string(state), state_string(cur_state), val); |
1091 | 1091 | ||
1092 | /* Make sure the selected PLL is correctly attached to the transcoder */ |
1092 | /* Make sure the selected PLL is correctly attached to the transcoder */ |
1093 | if (crtc && HAS_PCH_CPT(dev_priv->dev)) { |
1093 | if (crtc && HAS_PCH_CPT(dev_priv->dev)) { |
1094 | u32 pch_dpll; |
1094 | u32 pch_dpll; |
1095 | 1095 | ||
1096 | pch_dpll = I915_READ(PCH_DPLL_SEL); |
1096 | pch_dpll = I915_READ(PCH_DPLL_SEL); |
1097 | cur_state = pll->pll_reg == _PCH_DPLL_B; |
1097 | cur_state = pll->pll_reg == _PCH_DPLL_B; |
1098 | if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, |
1098 | if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, |
1099 | "PLL[%d] not attached to this transcoder %d: %08x\n", |
1099 | "PLL[%d] not attached to this transcoder %d: %08x\n", |
1100 | cur_state, crtc->pipe, pch_dpll)) { |
1100 | cur_state, crtc->pipe, pch_dpll)) { |
1101 | cur_state = !!(val >> (4*crtc->pipe + 3)); |
1101 | cur_state = !!(val >> (4*crtc->pipe + 3)); |
1102 | WARN(cur_state != state, |
1102 | WARN(cur_state != state, |
1103 | "PLL[%d] not %s on this transcoder %d: %08x\n", |
1103 | "PLL[%d] not %s on this transcoder %d: %08x\n", |
1104 | pll->pll_reg == _PCH_DPLL_B, |
1104 | pll->pll_reg == _PCH_DPLL_B, |
1105 | state_string(state), |
1105 | state_string(state), |
1106 | crtc->pipe, |
1106 | crtc->pipe, |
1107 | val); |
1107 | val); |
1108 | } |
1108 | } |
1109 | } |
1109 | } |
1110 | } |
1110 | } |
1111 | #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) |
1111 | #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) |
1112 | #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) |
1112 | #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) |
1113 | 1113 | ||
1114 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
1114 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
1115 | enum pipe pipe, bool state) |
1115 | enum pipe pipe, bool state) |
1116 | { |
1116 | { |
1117 | int reg; |
1117 | int reg; |
1118 | u32 val; |
1118 | u32 val; |
1119 | bool cur_state; |
1119 | bool cur_state; |
1120 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1120 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1121 | pipe); |
1121 | pipe); |
1122 | 1122 | ||
1123 | if (HAS_DDI(dev_priv->dev)) { |
1123 | if (HAS_DDI(dev_priv->dev)) { |
1124 | /* DDI does not have a specific FDI_TX register */ |
1124 | /* DDI does not have a specific FDI_TX register */ |
1125 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
1125 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
1126 | val = I915_READ(reg); |
1126 | val = I915_READ(reg); |
1127 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1127 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1128 | } else { |
1128 | } else { |
1129 | reg = FDI_TX_CTL(pipe); |
1129 | reg = FDI_TX_CTL(pipe); |
1130 | val = I915_READ(reg); |
1130 | val = I915_READ(reg); |
1131 | cur_state = !!(val & FDI_TX_ENABLE); |
1131 | cur_state = !!(val & FDI_TX_ENABLE); |
1132 | } |
1132 | } |
1133 | WARN(cur_state != state, |
1133 | WARN(cur_state != state, |
1134 | "FDI TX state assertion failure (expected %s, current %s)\n", |
1134 | "FDI TX state assertion failure (expected %s, current %s)\n", |
1135 | state_string(state), state_string(cur_state)); |
1135 | state_string(state), state_string(cur_state)); |
1136 | } |
1136 | } |
1137 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) |
1137 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) |
1138 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) |
1138 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) |
1139 | 1139 | ||
1140 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
1140 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
1141 | enum pipe pipe, bool state) |
1141 | enum pipe pipe, bool state) |
1142 | { |
1142 | { |
1143 | int reg; |
1143 | int reg; |
1144 | u32 val; |
1144 | u32 val; |
1145 | bool cur_state; |
1145 | bool cur_state; |
1146 | 1146 | ||
1147 | reg = FDI_RX_CTL(pipe); |
1147 | reg = FDI_RX_CTL(pipe); |
1148 | val = I915_READ(reg); |
1148 | val = I915_READ(reg); |
1149 | cur_state = !!(val & FDI_RX_ENABLE); |
1149 | cur_state = !!(val & FDI_RX_ENABLE); |
1150 | WARN(cur_state != state, |
1150 | WARN(cur_state != state, |
1151 | "FDI RX state assertion failure (expected %s, current %s)\n", |
1151 | "FDI RX state assertion failure (expected %s, current %s)\n", |
1152 | state_string(state), state_string(cur_state)); |
1152 | state_string(state), state_string(cur_state)); |
1153 | } |
1153 | } |
1154 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) |
1154 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) |
1155 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) |
1155 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) |
1156 | 1156 | ||
1157 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
1157 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
1158 | enum pipe pipe) |
1158 | enum pipe pipe) |
1159 | { |
1159 | { |
1160 | int reg; |
1160 | int reg; |
1161 | u32 val; |
1161 | u32 val; |
1162 | 1162 | ||
1163 | /* ILK FDI PLL is always enabled */ |
1163 | /* ILK FDI PLL is always enabled */ |
1164 | if (dev_priv->info->gen == 5) |
1164 | if (dev_priv->info->gen == 5) |
1165 | return; |
1165 | return; |
1166 | 1166 | ||
1167 | /* On Haswell, DDI ports are responsible for the FDI PLL setup */ |
1167 | /* On Haswell, DDI ports are responsible for the FDI PLL setup */ |
1168 | if (HAS_DDI(dev_priv->dev)) |
1168 | if (HAS_DDI(dev_priv->dev)) |
1169 | return; |
1169 | return; |
1170 | 1170 | ||
1171 | reg = FDI_TX_CTL(pipe); |
1171 | reg = FDI_TX_CTL(pipe); |
1172 | val = I915_READ(reg); |
1172 | val = I915_READ(reg); |
1173 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
1173 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
1174 | } |
1174 | } |
1175 | 1175 | ||
1176 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, |
1176 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, |
1177 | enum pipe pipe) |
1177 | enum pipe pipe) |
1178 | { |
1178 | { |
1179 | int reg; |
1179 | int reg; |
1180 | u32 val; |
1180 | u32 val; |
1181 | 1181 | ||
1182 | reg = FDI_RX_CTL(pipe); |
1182 | reg = FDI_RX_CTL(pipe); |
1183 | val = I915_READ(reg); |
1183 | val = I915_READ(reg); |
1184 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); |
1184 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); |
1185 | } |
1185 | } |
1186 | 1186 | ||
1187 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
1187 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
1188 | enum pipe pipe) |
1188 | enum pipe pipe) |
1189 | { |
1189 | { |
1190 | int pp_reg, lvds_reg; |
1190 | int pp_reg, lvds_reg; |
1191 | u32 val; |
1191 | u32 val; |
1192 | enum pipe panel_pipe = PIPE_A; |
1192 | enum pipe panel_pipe = PIPE_A; |
1193 | bool locked = true; |
1193 | bool locked = true; |
1194 | 1194 | ||
1195 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
1195 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
1196 | pp_reg = PCH_PP_CONTROL; |
1196 | pp_reg = PCH_PP_CONTROL; |
1197 | lvds_reg = PCH_LVDS; |
1197 | lvds_reg = PCH_LVDS; |
1198 | } else { |
1198 | } else { |
1199 | pp_reg = PP_CONTROL; |
1199 | pp_reg = PP_CONTROL; |
1200 | lvds_reg = LVDS; |
1200 | lvds_reg = LVDS; |
1201 | } |
1201 | } |
1202 | 1202 | ||
1203 | val = I915_READ(pp_reg); |
1203 | val = I915_READ(pp_reg); |
1204 | if (!(val & PANEL_POWER_ON) || |
1204 | if (!(val & PANEL_POWER_ON) || |
1205 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) |
1205 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) |
1206 | locked = false; |
1206 | locked = false; |
1207 | 1207 | ||
1208 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) |
1208 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) |
1209 | panel_pipe = PIPE_B; |
1209 | panel_pipe = PIPE_B; |
1210 | 1210 | ||
1211 | WARN(panel_pipe == pipe && locked, |
1211 | WARN(panel_pipe == pipe && locked, |
1212 | "panel assertion failure, pipe %c regs locked\n", |
1212 | "panel assertion failure, pipe %c regs locked\n", |
1213 | pipe_name(pipe)); |
1213 | pipe_name(pipe)); |
1214 | } |
1214 | } |
1215 | 1215 | ||
1216 | void assert_pipe(struct drm_i915_private *dev_priv, |
1216 | void assert_pipe(struct drm_i915_private *dev_priv, |
1217 | enum pipe pipe, bool state) |
1217 | enum pipe pipe, bool state) |
1218 | { |
1218 | { |
1219 | int reg; |
1219 | int reg; |
1220 | u32 val; |
1220 | u32 val; |
1221 | bool cur_state; |
1221 | bool cur_state; |
1222 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1222 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1223 | pipe); |
1223 | pipe); |
1224 | 1224 | ||
1225 | /* if we need the pipe A quirk it must be always on */ |
1225 | /* if we need the pipe A quirk it must be always on */ |
1226 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1226 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1227 | state = true; |
1227 | state = true; |
1228 | 1228 | ||
1229 | if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && |
1229 | if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && |
1230 | !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { |
1230 | !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { |
1231 | cur_state = false; |
1231 | cur_state = false; |
1232 | } else { |
1232 | } else { |
1233 | reg = PIPECONF(cpu_transcoder); |
1233 | reg = PIPECONF(cpu_transcoder); |
1234 | val = I915_READ(reg); |
1234 | val = I915_READ(reg); |
1235 | cur_state = !!(val & PIPECONF_ENABLE); |
1235 | cur_state = !!(val & PIPECONF_ENABLE); |
1236 | } |
1236 | } |
1237 | 1237 | ||
1238 | WARN(cur_state != state, |
1238 | WARN(cur_state != state, |
1239 | "pipe %c assertion failure (expected %s, current %s)\n", |
1239 | "pipe %c assertion failure (expected %s, current %s)\n", |
1240 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1240 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1241 | } |
1241 | } |
1242 | 1242 | ||
1243 | static void assert_plane(struct drm_i915_private *dev_priv, |
1243 | static void assert_plane(struct drm_i915_private *dev_priv, |
1244 | enum plane plane, bool state) |
1244 | enum plane plane, bool state) |
1245 | { |
1245 | { |
1246 | int reg; |
1246 | int reg; |
1247 | u32 val; |
1247 | u32 val; |
1248 | bool cur_state; |
1248 | bool cur_state; |
1249 | 1249 | ||
1250 | reg = DSPCNTR(plane); |
1250 | reg = DSPCNTR(plane); |
1251 | val = I915_READ(reg); |
1251 | val = I915_READ(reg); |
1252 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
1252 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
1253 | WARN(cur_state != state, |
1253 | WARN(cur_state != state, |
1254 | "plane %c assertion failure (expected %s, current %s)\n", |
1254 | "plane %c assertion failure (expected %s, current %s)\n", |
1255 | plane_name(plane), state_string(state), state_string(cur_state)); |
1255 | plane_name(plane), state_string(state), state_string(cur_state)); |
1256 | } |
1256 | } |
1257 | 1257 | ||
1258 | #define assert_plane_enabled(d, p) assert_plane(d, p, true) |
1258 | #define assert_plane_enabled(d, p) assert_plane(d, p, true) |
1259 | #define assert_plane_disabled(d, p) assert_plane(d, p, false) |
1259 | #define assert_plane_disabled(d, p) assert_plane(d, p, false) |
1260 | 1260 | ||
1261 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
1261 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
1262 | enum pipe pipe) |
1262 | enum pipe pipe) |
1263 | { |
1263 | { |
1264 | int reg, i; |
1264 | int reg, i; |
1265 | u32 val; |
1265 | u32 val; |
1266 | int cur_pipe; |
1266 | int cur_pipe; |
1267 | 1267 | ||
1268 | /* Planes are fixed to pipes on ILK+ */ |
1268 | /* Planes are fixed to pipes on ILK+ */ |
1269 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
1269 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
1270 | reg = DSPCNTR(pipe); |
1270 | reg = DSPCNTR(pipe); |
1271 | val = I915_READ(reg); |
1271 | val = I915_READ(reg); |
1272 | WARN((val & DISPLAY_PLANE_ENABLE), |
1272 | WARN((val & DISPLAY_PLANE_ENABLE), |
1273 | "plane %c assertion failure, should be disabled but not\n", |
1273 | "plane %c assertion failure, should be disabled but not\n", |
1274 | plane_name(pipe)); |
1274 | plane_name(pipe)); |
1275 | return; |
1275 | return; |
1276 | } |
1276 | } |
1277 | 1277 | ||
1278 | /* Need to check both planes against the pipe */ |
1278 | /* Need to check both planes against the pipe */ |
1279 | for (i = 0; i < 2; i++) { |
1279 | for (i = 0; i < 2; i++) { |
1280 | reg = DSPCNTR(i); |
1280 | reg = DSPCNTR(i); |
1281 | val = I915_READ(reg); |
1281 | val = I915_READ(reg); |
1282 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
1282 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
1283 | DISPPLANE_SEL_PIPE_SHIFT; |
1283 | DISPPLANE_SEL_PIPE_SHIFT; |
1284 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
1284 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
1285 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
1285 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
1286 | plane_name(i), pipe_name(pipe)); |
1286 | plane_name(i), pipe_name(pipe)); |
1287 | } |
1287 | } |
1288 | } |
1288 | } |
1289 | 1289 | ||
1290 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
1290 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
1291 | { |
1291 | { |
1292 | u32 val; |
1292 | u32 val; |
1293 | bool enabled; |
1293 | bool enabled; |
1294 | 1294 | ||
1295 | if (HAS_PCH_LPT(dev_priv->dev)) { |
1295 | if (HAS_PCH_LPT(dev_priv->dev)) { |
1296 | DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); |
1296 | DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n"); |
1297 | return; |
1297 | return; |
1298 | } |
1298 | } |
1299 | 1299 | ||
1300 | val = I915_READ(PCH_DREF_CONTROL); |
1300 | val = I915_READ(PCH_DREF_CONTROL); |
1301 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1301 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1302 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1302 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1303 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1303 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1304 | } |
1304 | } |
1305 | 1305 | ||
1306 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, |
1306 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, |
1307 | enum pipe pipe) |
1307 | enum pipe pipe) |
1308 | { |
1308 | { |
1309 | int reg; |
1309 | int reg; |
1310 | u32 val; |
1310 | u32 val; |
1311 | bool enabled; |
1311 | bool enabled; |
1312 | 1312 | ||
1313 | reg = TRANSCONF(pipe); |
1313 | reg = TRANSCONF(pipe); |
1314 | val = I915_READ(reg); |
1314 | val = I915_READ(reg); |
1315 | enabled = !!(val & TRANS_ENABLE); |
1315 | enabled = !!(val & TRANS_ENABLE); |
1316 | WARN(enabled, |
1316 | WARN(enabled, |
1317 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1317 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1318 | pipe_name(pipe)); |
1318 | pipe_name(pipe)); |
1319 | } |
1319 | } |
1320 | 1320 | ||
1321 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
1321 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
1322 | enum pipe pipe, u32 port_sel, u32 val) |
1322 | enum pipe pipe, u32 port_sel, u32 val) |
1323 | { |
1323 | { |
1324 | if ((val & DP_PORT_EN) == 0) |
1324 | if ((val & DP_PORT_EN) == 0) |
1325 | return false; |
1325 | return false; |
1326 | 1326 | ||
1327 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1327 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1328 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); |
1328 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); |
1329 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); |
1329 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); |
1330 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
1330 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
1331 | return false; |
1331 | return false; |
1332 | } else { |
1332 | } else { |
1333 | if ((val & DP_PIPE_MASK) != (pipe << 30)) |
1333 | if ((val & DP_PIPE_MASK) != (pipe << 30)) |
1334 | return false; |
1334 | return false; |
1335 | } |
1335 | } |
1336 | return true; |
1336 | return true; |
1337 | } |
1337 | } |
1338 | 1338 | ||
1339 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1339 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1340 | enum pipe pipe, u32 val) |
1340 | enum pipe pipe, u32 val) |
1341 | { |
1341 | { |
1342 | if ((val & PORT_ENABLE) == 0) |
1342 | if ((val & PORT_ENABLE) == 0) |
1343 | return false; |
1343 | return false; |
1344 | 1344 | ||
1345 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1345 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1346 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1346 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1347 | return false; |
1347 | return false; |
1348 | } else { |
1348 | } else { |
1349 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) |
1349 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) |
1350 | return false; |
1350 | return false; |
1351 | } |
1351 | } |
1352 | return true; |
1352 | return true; |
1353 | } |
1353 | } |
1354 | 1354 | ||
1355 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, |
1355 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, |
1356 | enum pipe pipe, u32 val) |
1356 | enum pipe pipe, u32 val) |
1357 | { |
1357 | { |
1358 | if ((val & LVDS_PORT_EN) == 0) |
1358 | if ((val & LVDS_PORT_EN) == 0) |
1359 | return false; |
1359 | return false; |
1360 | 1360 | ||
1361 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1361 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1362 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1362 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1363 | return false; |
1363 | return false; |
1364 | } else { |
1364 | } else { |
1365 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) |
1365 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) |
1366 | return false; |
1366 | return false; |
1367 | } |
1367 | } |
1368 | return true; |
1368 | return true; |
1369 | } |
1369 | } |
1370 | 1370 | ||
1371 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, |
1371 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, |
1372 | enum pipe pipe, u32 val) |
1372 | enum pipe pipe, u32 val) |
1373 | { |
1373 | { |
1374 | if ((val & ADPA_DAC_ENABLE) == 0) |
1374 | if ((val & ADPA_DAC_ENABLE) == 0) |
1375 | return false; |
1375 | return false; |
1376 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1376 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1377 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1377 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1378 | return false; |
1378 | return false; |
1379 | } else { |
1379 | } else { |
1380 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) |
1380 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) |
1381 | return false; |
1381 | return false; |
1382 | } |
1382 | } |
1383 | return true; |
1383 | return true; |
1384 | } |
1384 | } |
1385 | 1385 | ||
1386 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1386 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1387 | enum pipe pipe, int reg, u32 port_sel) |
1387 | enum pipe pipe, int reg, u32 port_sel) |
1388 | { |
1388 | { |
1389 | u32 val = I915_READ(reg); |
1389 | u32 val = I915_READ(reg); |
1390 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1390 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1391 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1391 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1392 | reg, pipe_name(pipe)); |
1392 | reg, pipe_name(pipe)); |
1393 | 1393 | ||
1394 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 |
1394 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 |
1395 | && (val & DP_PIPEB_SELECT), |
1395 | && (val & DP_PIPEB_SELECT), |
1396 | "IBX PCH dp port still using transcoder B\n"); |
1396 | "IBX PCH dp port still using transcoder B\n"); |
1397 | } |
1397 | } |
1398 | 1398 | ||
1399 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
1399 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
1400 | enum pipe pipe, int reg) |
1400 | enum pipe pipe, int reg) |
1401 | { |
1401 | { |
1402 | u32 val = I915_READ(reg); |
1402 | u32 val = I915_READ(reg); |
1403 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1403 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1404 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1404 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1405 | reg, pipe_name(pipe)); |
1405 | reg, pipe_name(pipe)); |
1406 | 1406 | ||
1407 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 |
1407 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 |
1408 | && (val & SDVO_PIPE_B_SELECT), |
1408 | && (val & SDVO_PIPE_B_SELECT), |
1409 | "IBX PCH hdmi port still using transcoder B\n"); |
1409 | "IBX PCH hdmi port still using transcoder B\n"); |
1410 | } |
1410 | } |
1411 | 1411 | ||
1412 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1412 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1413 | enum pipe pipe) |
1413 | enum pipe pipe) |
1414 | { |
1414 | { |
1415 | int reg; |
1415 | int reg; |
1416 | u32 val; |
1416 | u32 val; |
1417 | 1417 | ||
1418 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1418 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1419 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1419 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1420 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1420 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1421 | 1421 | ||
1422 | reg = PCH_ADPA; |
1422 | reg = PCH_ADPA; |
1423 | val = I915_READ(reg); |
1423 | val = I915_READ(reg); |
1424 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1424 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1425 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1425 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1426 | pipe_name(pipe)); |
1426 | pipe_name(pipe)); |
1427 | 1427 | ||
1428 | reg = PCH_LVDS; |
1428 | reg = PCH_LVDS; |
1429 | val = I915_READ(reg); |
1429 | val = I915_READ(reg); |
1430 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1430 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1431 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1431 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1432 | pipe_name(pipe)); |
1432 | pipe_name(pipe)); |
1433 | 1433 | ||
1434 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); |
1434 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); |
1435 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); |
1435 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); |
1436 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); |
1436 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); |
1437 | } |
1437 | } |
1438 | 1438 | ||
1439 | /** |
1439 | /** |
1440 | * intel_enable_pll - enable a PLL |
1440 | * intel_enable_pll - enable a PLL |
1441 | * @dev_priv: i915 private structure |
1441 | * @dev_priv: i915 private structure |
1442 | * @pipe: pipe PLL to enable |
1442 | * @pipe: pipe PLL to enable |
1443 | * |
1443 | * |
1444 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to |
1444 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to |
1445 | * make sure the PLL reg is writable first though, since the panel write |
1445 | * make sure the PLL reg is writable first though, since the panel write |
1446 | * protect mechanism may be enabled. |
1446 | * protect mechanism may be enabled. |
1447 | * |
1447 | * |
1448 | * Note! This is for pre-ILK only. |
1448 | * Note! This is for pre-ILK only. |
1449 | * |
1449 | * |
1450 | * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. |
1450 | * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. |
1451 | */ |
1451 | */ |
1452 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1452 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1453 | { |
1453 | { |
1454 | int reg; |
1454 | int reg; |
1455 | u32 val; |
1455 | u32 val; |
1456 | 1456 | ||
1457 | /* No really, not for ILK+ */ |
1457 | /* No really, not for ILK+ */ |
1458 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); |
1458 | BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); |
1459 | 1459 | ||
1460 | /* PLL is protected by panel, make sure we can write it */ |
1460 | /* PLL is protected by panel, make sure we can write it */ |
1461 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) |
1461 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) |
1462 | assert_panel_unlocked(dev_priv, pipe); |
1462 | assert_panel_unlocked(dev_priv, pipe); |
1463 | 1463 | ||
1464 | reg = DPLL(pipe); |
1464 | reg = DPLL(pipe); |
1465 | val = I915_READ(reg); |
1465 | val = I915_READ(reg); |
1466 | val |= DPLL_VCO_ENABLE; |
1466 | val |= DPLL_VCO_ENABLE; |
1467 | 1467 | ||
1468 | /* We do this three times for luck */ |
1468 | /* We do this three times for luck */ |
1469 | I915_WRITE(reg, val); |
1469 | I915_WRITE(reg, val); |
1470 | POSTING_READ(reg); |
1470 | POSTING_READ(reg); |
1471 | udelay(150); /* wait for warmup */ |
1471 | udelay(150); /* wait for warmup */ |
1472 | I915_WRITE(reg, val); |
1472 | I915_WRITE(reg, val); |
1473 | POSTING_READ(reg); |
1473 | POSTING_READ(reg); |
1474 | udelay(150); /* wait for warmup */ |
1474 | udelay(150); /* wait for warmup */ |
1475 | I915_WRITE(reg, val); |
1475 | I915_WRITE(reg, val); |
1476 | POSTING_READ(reg); |
1476 | POSTING_READ(reg); |
1477 | udelay(150); /* wait for warmup */ |
1477 | udelay(150); /* wait for warmup */ |
1478 | } |
1478 | } |
1479 | 1479 | ||
1480 | /** |
1480 | /** |
1481 | * intel_disable_pll - disable a PLL |
1481 | * intel_disable_pll - disable a PLL |
1482 | * @dev_priv: i915 private structure |
1482 | * @dev_priv: i915 private structure |
1483 | * @pipe: pipe PLL to disable |
1483 | * @pipe: pipe PLL to disable |
1484 | * |
1484 | * |
1485 | * Disable the PLL for @pipe, making sure the pipe is off first. |
1485 | * Disable the PLL for @pipe, making sure the pipe is off first. |
1486 | * |
1486 | * |
1487 | * Note! This is for pre-ILK only. |
1487 | * Note! This is for pre-ILK only. |
1488 | */ |
1488 | */ |
1489 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1489 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1490 | { |
1490 | { |
1491 | int reg; |
1491 | int reg; |
1492 | u32 val; |
1492 | u32 val; |
1493 | 1493 | ||
1494 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1494 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1495 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1495 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1496 | return; |
1496 | return; |
1497 | 1497 | ||
1498 | /* Make sure the pipe isn't still relying on us */ |
1498 | /* Make sure the pipe isn't still relying on us */ |
1499 | assert_pipe_disabled(dev_priv, pipe); |
1499 | assert_pipe_disabled(dev_priv, pipe); |
1500 | 1500 | ||
1501 | reg = DPLL(pipe); |
1501 | reg = DPLL(pipe); |
1502 | val = I915_READ(reg); |
1502 | val = I915_READ(reg); |
1503 | val &= ~DPLL_VCO_ENABLE; |
1503 | val &= ~DPLL_VCO_ENABLE; |
1504 | I915_WRITE(reg, val); |
1504 | I915_WRITE(reg, val); |
1505 | POSTING_READ(reg); |
1505 | POSTING_READ(reg); |
1506 | } |
1506 | } |
1507 | 1507 | ||
1508 | /* SBI access */ |
1508 | /* SBI access */ |
1509 | static void |
1509 | static void |
1510 | intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
1510 | intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
1511 | enum intel_sbi_destination destination) |
1511 | enum intel_sbi_destination destination) |
1512 | { |
1512 | { |
1513 | u32 tmp; |
1513 | u32 tmp; |
1514 | 1514 | ||
1515 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1515 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1516 | 1516 | ||
1517 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1517 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1518 | 100)) { |
1518 | 100)) { |
1519 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1519 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1520 | return; |
1520 | return; |
1521 | } |
1521 | } |
1522 | 1522 | ||
1523 | I915_WRITE(SBI_ADDR, (reg << 16)); |
1523 | I915_WRITE(SBI_ADDR, (reg << 16)); |
1524 | I915_WRITE(SBI_DATA, value); |
1524 | I915_WRITE(SBI_DATA, value); |
1525 | 1525 | ||
1526 | if (destination == SBI_ICLK) |
1526 | if (destination == SBI_ICLK) |
1527 | tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR; |
1527 | tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR; |
1528 | else |
1528 | else |
1529 | tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; |
1529 | tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; |
1530 | I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); |
1530 | I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); |
1531 | 1531 | ||
1532 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1532 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1533 | 100)) { |
1533 | 100)) { |
1534 | DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); |
1534 | DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); |
1535 | return; |
1535 | return; |
1536 | } |
1536 | } |
1537 | } |
1537 | } |
1538 | 1538 | ||
1539 | static u32 |
1539 | static u32 |
1540 | intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
1540 | intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
1541 | enum intel_sbi_destination destination) |
1541 | enum intel_sbi_destination destination) |
1542 | { |
1542 | { |
1543 | u32 value = 0; |
1543 | u32 value = 0; |
1544 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1544 | WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
1545 | 1545 | ||
1546 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1546 | if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
1547 | 100)) { |
1547 | 100)) { |
1548 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1548 | DRM_ERROR("timeout waiting for SBI to become ready\n"); |
1549 | return 0; |
1549 | return 0; |
1550 | } |
1550 | } |
1551 | 1551 | ||
1552 | I915_WRITE(SBI_ADDR, (reg << 16)); |
1552 | I915_WRITE(SBI_ADDR, (reg << 16)); |
1553 | 1553 | ||
1554 | if (destination == SBI_ICLK) |
1554 | if (destination == SBI_ICLK) |
1555 | value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; |
1555 | value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; |
1556 | else |
1556 | else |
1557 | value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; |
1557 | value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; |
1558 | I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); |
1558 | I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); |
1559 | 1559 | ||
1560 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1560 | if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
1561 | 100)) { |
1561 | 100)) { |
1562 | DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); |
1562 | DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); |
1563 | return 0; |
1563 | return 0; |
1564 | } |
1564 | } |
1565 | 1565 | ||
1566 | return I915_READ(SBI_DATA); |
1566 | return I915_READ(SBI_DATA); |
1567 | } |
1567 | } |
1568 | 1568 | ||
1569 | /** |
1569 | /** |
1570 | * ironlake_enable_pch_pll - enable PCH PLL |
1570 | * ironlake_enable_pch_pll - enable PCH PLL |
1571 | * @dev_priv: i915 private structure |
1571 | * @dev_priv: i915 private structure |
1572 | * @pipe: pipe PLL to enable |
1572 | * @pipe: pipe PLL to enable |
1573 | * |
1573 | * |
1574 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1574 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1575 | * drives the transcoder clock. |
1575 | * drives the transcoder clock. |
1576 | */ |
1576 | */ |
1577 | static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) |
1577 | static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) |
1578 | { |
1578 | { |
1579 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1579 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1580 | struct intel_pch_pll *pll; |
1580 | struct intel_pch_pll *pll; |
1581 | int reg; |
1581 | int reg; |
1582 | u32 val; |
1582 | u32 val; |
1583 | 1583 | ||
1584 | /* PCH PLLs only available on ILK, SNB and IVB */ |
1584 | /* PCH PLLs only available on ILK, SNB and IVB */ |
1585 | BUG_ON(dev_priv->info->gen < 5); |
1585 | BUG_ON(dev_priv->info->gen < 5); |
1586 | pll = intel_crtc->pch_pll; |
1586 | pll = intel_crtc->pch_pll; |
1587 | if (pll == NULL) |
1587 | if (pll == NULL) |
1588 | return; |
1588 | return; |
1589 | 1589 | ||
1590 | if (WARN_ON(pll->refcount == 0)) |
1590 | if (WARN_ON(pll->refcount == 0)) |
1591 | return; |
1591 | return; |
1592 | 1592 | ||
1593 | DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", |
1593 | DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", |
1594 | pll->pll_reg, pll->active, pll->on, |
1594 | pll->pll_reg, pll->active, pll->on, |
1595 | intel_crtc->base.base.id); |
1595 | intel_crtc->base.base.id); |
1596 | 1596 | ||
1597 | /* PCH refclock must be enabled first */ |
1597 | /* PCH refclock must be enabled first */ |
1598 | assert_pch_refclk_enabled(dev_priv); |
1598 | assert_pch_refclk_enabled(dev_priv); |
1599 | 1599 | ||
1600 | if (pll->active++ && pll->on) { |
1600 | if (pll->active++ && pll->on) { |
1601 | assert_pch_pll_enabled(dev_priv, pll, NULL); |
1601 | assert_pch_pll_enabled(dev_priv, pll, NULL); |
1602 | return; |
1602 | return; |
1603 | } |
1603 | } |
1604 | 1604 | ||
1605 | DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); |
1605 | DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); |
1606 | 1606 | ||
1607 | reg = pll->pll_reg; |
1607 | reg = pll->pll_reg; |
1608 | val = I915_READ(reg); |
1608 | val = I915_READ(reg); |
1609 | val |= DPLL_VCO_ENABLE; |
1609 | val |= DPLL_VCO_ENABLE; |
1610 | I915_WRITE(reg, val); |
1610 | I915_WRITE(reg, val); |
1611 | POSTING_READ(reg); |
1611 | POSTING_READ(reg); |
1612 | udelay(200); |
1612 | udelay(200); |
1613 | 1613 | ||
1614 | pll->on = true; |
1614 | pll->on = true; |
1615 | } |
1615 | } |
1616 | 1616 | ||
1617 | static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) |
1617 | static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) |
1618 | { |
1618 | { |
1619 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1619 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1620 | struct intel_pch_pll *pll = intel_crtc->pch_pll; |
1620 | struct intel_pch_pll *pll = intel_crtc->pch_pll; |
1621 | int reg; |
1621 | int reg; |
1622 | u32 val; |
1622 | u32 val; |
1623 | 1623 | ||
1624 | /* PCH only available on ILK+ */ |
1624 | /* PCH only available on ILK+ */ |
1625 | BUG_ON(dev_priv->info->gen < 5); |
1625 | BUG_ON(dev_priv->info->gen < 5); |
1626 | if (pll == NULL) |
1626 | if (pll == NULL) |
1627 | return; |
1627 | return; |
1628 | 1628 | ||
1629 | if (WARN_ON(pll->refcount == 0)) |
1629 | if (WARN_ON(pll->refcount == 0)) |
1630 | return; |
1630 | return; |
1631 | 1631 | ||
1632 | DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", |
1632 | DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", |
1633 | pll->pll_reg, pll->active, pll->on, |
1633 | pll->pll_reg, pll->active, pll->on, |
1634 | intel_crtc->base.base.id); |
1634 | intel_crtc->base.base.id); |
1635 | 1635 | ||
1636 | if (WARN_ON(pll->active == 0)) { |
1636 | if (WARN_ON(pll->active == 0)) { |
1637 | assert_pch_pll_disabled(dev_priv, pll, NULL); |
1637 | assert_pch_pll_disabled(dev_priv, pll, NULL); |
1638 | return; |
1638 | return; |
1639 | } |
1639 | } |
1640 | 1640 | ||
1641 | if (--pll->active) { |
1641 | if (--pll->active) { |
1642 | assert_pch_pll_enabled(dev_priv, pll, NULL); |
1642 | assert_pch_pll_enabled(dev_priv, pll, NULL); |
1643 | return; |
1643 | return; |
1644 | } |
1644 | } |
1645 | 1645 | ||
1646 | DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); |
1646 | DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); |
1647 | 1647 | ||
1648 | /* Make sure transcoder isn't still depending on us */ |
1648 | /* Make sure transcoder isn't still depending on us */ |
1649 | assert_transcoder_disabled(dev_priv, intel_crtc->pipe); |
1649 | assert_transcoder_disabled(dev_priv, intel_crtc->pipe); |
1650 | 1650 | ||
1651 | reg = pll->pll_reg; |
1651 | reg = pll->pll_reg; |
1652 | val = I915_READ(reg); |
1652 | val = I915_READ(reg); |
1653 | val &= ~DPLL_VCO_ENABLE; |
1653 | val &= ~DPLL_VCO_ENABLE; |
1654 | I915_WRITE(reg, val); |
1654 | I915_WRITE(reg, val); |
1655 | POSTING_READ(reg); |
1655 | POSTING_READ(reg); |
1656 | udelay(200); |
1656 | udelay(200); |
1657 | 1657 | ||
1658 | pll->on = false; |
1658 | pll->on = false; |
1659 | } |
1659 | } |
1660 | 1660 | ||
1661 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1661 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1662 | enum pipe pipe) |
1662 | enum pipe pipe) |
1663 | { |
1663 | { |
1664 | struct drm_device *dev = dev_priv->dev; |
1664 | struct drm_device *dev = dev_priv->dev; |
1665 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
1665 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
1666 | uint32_t reg, val, pipeconf_val; |
1666 | uint32_t reg, val, pipeconf_val; |
1667 | 1667 | ||
1668 | /* PCH only available on ILK+ */ |
1668 | /* PCH only available on ILK+ */ |
1669 | BUG_ON(dev_priv->info->gen < 5); |
1669 | BUG_ON(dev_priv->info->gen < 5); |
1670 | 1670 | ||
1671 | /* Make sure PCH DPLL is enabled */ |
1671 | /* Make sure PCH DPLL is enabled */ |
1672 | assert_pch_pll_enabled(dev_priv, |
1672 | assert_pch_pll_enabled(dev_priv, |
1673 | to_intel_crtc(crtc)->pch_pll, |
1673 | to_intel_crtc(crtc)->pch_pll, |
1674 | to_intel_crtc(crtc)); |
1674 | to_intel_crtc(crtc)); |
1675 | 1675 | ||
1676 | /* FDI must be feeding us bits for PCH ports */ |
1676 | /* FDI must be feeding us bits for PCH ports */ |
1677 | assert_fdi_tx_enabled(dev_priv, pipe); |
1677 | assert_fdi_tx_enabled(dev_priv, pipe); |
1678 | assert_fdi_rx_enabled(dev_priv, pipe); |
1678 | assert_fdi_rx_enabled(dev_priv, pipe); |
1679 | 1679 | ||
1680 | if (HAS_PCH_CPT(dev)) { |
1680 | if (HAS_PCH_CPT(dev)) { |
1681 | /* Workaround: Set the timing override bit before enabling the |
1681 | /* Workaround: Set the timing override bit before enabling the |
1682 | * pch transcoder. */ |
1682 | * pch transcoder. */ |
1683 | reg = TRANS_CHICKEN2(pipe); |
1683 | reg = TRANS_CHICKEN2(pipe); |
1684 | val = I915_READ(reg); |
1684 | val = I915_READ(reg); |
1685 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1685 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1686 | I915_WRITE(reg, val); |
1686 | I915_WRITE(reg, val); |
1687 | } |
1687 | } |
1688 | 1688 | ||
1689 | reg = TRANSCONF(pipe); |
1689 | reg = TRANSCONF(pipe); |
1690 | val = I915_READ(reg); |
1690 | val = I915_READ(reg); |
1691 | pipeconf_val = I915_READ(PIPECONF(pipe)); |
1691 | pipeconf_val = I915_READ(PIPECONF(pipe)); |
1692 | 1692 | ||
1693 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1693 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1694 | /* |
1694 | /* |
1695 | * make the BPC in transcoder be consistent with |
1695 | * make the BPC in transcoder be consistent with |
1696 | * that in pipeconf reg. |
1696 | * that in pipeconf reg. |
1697 | */ |
1697 | */ |
1698 | val &= ~PIPECONF_BPC_MASK; |
1698 | val &= ~PIPECONF_BPC_MASK; |
1699 | val |= pipeconf_val & PIPECONF_BPC_MASK; |
1699 | val |= pipeconf_val & PIPECONF_BPC_MASK; |
1700 | } |
1700 | } |
1701 | 1701 | ||
1702 | val &= ~TRANS_INTERLACE_MASK; |
1702 | val &= ~TRANS_INTERLACE_MASK; |
1703 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) |
1703 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) |
1704 | if (HAS_PCH_IBX(dev_priv->dev) && |
1704 | if (HAS_PCH_IBX(dev_priv->dev) && |
1705 | intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) |
1705 | intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) |
1706 | val |= TRANS_LEGACY_INTERLACED_ILK; |
1706 | val |= TRANS_LEGACY_INTERLACED_ILK; |
1707 | else |
1707 | else |
1708 | val |= TRANS_INTERLACED; |
1708 | val |= TRANS_INTERLACED; |
1709 | else |
1709 | else |
1710 | val |= TRANS_PROGRESSIVE; |
1710 | val |= TRANS_PROGRESSIVE; |
1711 | 1711 | ||
1712 | I915_WRITE(reg, val | TRANS_ENABLE); |
1712 | I915_WRITE(reg, val | TRANS_ENABLE); |
1713 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1713 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1714 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
1714 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
1715 | } |
1715 | } |
1716 | 1716 | ||
1717 | static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1717 | static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, |
1718 | enum transcoder cpu_transcoder) |
1718 | enum transcoder cpu_transcoder) |
1719 | { |
1719 | { |
1720 | u32 val, pipeconf_val; |
1720 | u32 val, pipeconf_val; |
1721 | 1721 | ||
1722 | /* PCH only available on ILK+ */ |
1722 | /* PCH only available on ILK+ */ |
1723 | BUG_ON(dev_priv->info->gen < 5); |
1723 | BUG_ON(dev_priv->info->gen < 5); |
1724 | 1724 | ||
1725 | /* FDI must be feeding us bits for PCH ports */ |
1725 | /* FDI must be feeding us bits for PCH ports */ |
1726 | assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); |
1726 | assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); |
1727 | assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); |
1727 | assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); |
1728 | 1728 | ||
1729 | /* Workaround: set timing override bit. */ |
1729 | /* Workaround: set timing override bit. */ |
1730 | val = I915_READ(_TRANSA_CHICKEN2); |
1730 | val = I915_READ(_TRANSA_CHICKEN2); |
1731 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1731 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; |
1732 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1732 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1733 | 1733 | ||
1734 | val = TRANS_ENABLE; |
1734 | val = TRANS_ENABLE; |
1735 | pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); |
1735 | pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); |
1736 | 1736 | ||
1737 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == |
1737 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == |
1738 | PIPECONF_INTERLACED_ILK) |
1738 | PIPECONF_INTERLACED_ILK) |
1739 | val |= TRANS_INTERLACED; |
1739 | val |= TRANS_INTERLACED; |
1740 | else |
1740 | else |
1741 | val |= TRANS_PROGRESSIVE; |
1741 | val |= TRANS_PROGRESSIVE; |
1742 | 1742 | ||
1743 | I915_WRITE(TRANSCONF(TRANSCODER_A), val); |
1743 | I915_WRITE(TRANSCONF(TRANSCODER_A), val); |
1744 | if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) |
1744 | if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100)) |
1745 | DRM_ERROR("Failed to enable PCH transcoder\n"); |
1745 | DRM_ERROR("Failed to enable PCH transcoder\n"); |
1746 | } |
1746 | } |
1747 | 1747 | ||
1748 | static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, |
1748 | static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, |
1749 | enum pipe pipe) |
1749 | enum pipe pipe) |
1750 | { |
1750 | { |
1751 | struct drm_device *dev = dev_priv->dev; |
1751 | struct drm_device *dev = dev_priv->dev; |
1752 | uint32_t reg, val; |
1752 | uint32_t reg, val; |
1753 | 1753 | ||
1754 | /* FDI relies on the transcoder */ |
1754 | /* FDI relies on the transcoder */ |
1755 | assert_fdi_tx_disabled(dev_priv, pipe); |
1755 | assert_fdi_tx_disabled(dev_priv, pipe); |
1756 | assert_fdi_rx_disabled(dev_priv, pipe); |
1756 | assert_fdi_rx_disabled(dev_priv, pipe); |
1757 | 1757 | ||
1758 | /* Ports must be off as well */ |
1758 | /* Ports must be off as well */ |
1759 | assert_pch_ports_disabled(dev_priv, pipe); |
1759 | assert_pch_ports_disabled(dev_priv, pipe); |
1760 | 1760 | ||
1761 | reg = TRANSCONF(pipe); |
1761 | reg = TRANSCONF(pipe); |
1762 | val = I915_READ(reg); |
1762 | val = I915_READ(reg); |
1763 | val &= ~TRANS_ENABLE; |
1763 | val &= ~TRANS_ENABLE; |
1764 | I915_WRITE(reg, val); |
1764 | I915_WRITE(reg, val); |
1765 | /* wait for PCH transcoder off, transcoder state */ |
1765 | /* wait for PCH transcoder off, transcoder state */ |
1766 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1766 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1767 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1767 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1768 | 1768 | ||
1769 | if (!HAS_PCH_IBX(dev)) { |
1769 | if (!HAS_PCH_IBX(dev)) { |
1770 | /* Workaround: Clear the timing override chicken bit again. */ |
1770 | /* Workaround: Clear the timing override chicken bit again. */ |
1771 | reg = TRANS_CHICKEN2(pipe); |
1771 | reg = TRANS_CHICKEN2(pipe); |
1772 | val = I915_READ(reg); |
1772 | val = I915_READ(reg); |
1773 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1773 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1774 | I915_WRITE(reg, val); |
1774 | I915_WRITE(reg, val); |
1775 | } |
1775 | } |
1776 | } |
1776 | } |
1777 | 1777 | ||
1778 | static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) |
1778 | static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) |
1779 | { |
1779 | { |
1780 | u32 val; |
1780 | u32 val; |
1781 | 1781 | ||
1782 | val = I915_READ(_TRANSACONF); |
1782 | val = I915_READ(_TRANSACONF); |
1783 | val &= ~TRANS_ENABLE; |
1783 | val &= ~TRANS_ENABLE; |
1784 | I915_WRITE(_TRANSACONF, val); |
1784 | I915_WRITE(_TRANSACONF, val); |
1785 | /* wait for PCH transcoder off, transcoder state */ |
1785 | /* wait for PCH transcoder off, transcoder state */ |
1786 | if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) |
1786 | if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50)) |
1787 | DRM_ERROR("Failed to disable PCH transcoder\n"); |
1787 | DRM_ERROR("Failed to disable PCH transcoder\n"); |
1788 | 1788 | ||
1789 | /* Workaround: clear timing override bit. */ |
1789 | /* Workaround: clear timing override bit. */ |
1790 | val = I915_READ(_TRANSA_CHICKEN2); |
1790 | val = I915_READ(_TRANSA_CHICKEN2); |
1791 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1791 | val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; |
1792 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1792 | I915_WRITE(_TRANSA_CHICKEN2, val); |
1793 | } |
1793 | } |
1794 | 1794 | ||
1795 | /** |
1795 | /** |
1796 | * intel_enable_pipe - enable a pipe, asserting requirements |
1796 | * intel_enable_pipe - enable a pipe, asserting requirements |
1797 | * @dev_priv: i915 private structure |
1797 | * @dev_priv: i915 private structure |
1798 | * @pipe: pipe to enable |
1798 | * @pipe: pipe to enable |
1799 | * @pch_port: on ILK+, is this pipe driving a PCH port or not |
1799 | * @pch_port: on ILK+, is this pipe driving a PCH port or not |
1800 | * |
1800 | * |
1801 | * Enable @pipe, making sure that various hardware specific requirements |
1801 | * Enable @pipe, making sure that various hardware specific requirements |
1802 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. |
1802 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. |
1803 | * |
1803 | * |
1804 | * @pipe should be %PIPE_A or %PIPE_B. |
1804 | * @pipe should be %PIPE_A or %PIPE_B. |
1805 | * |
1805 | * |
1806 | * Will wait until the pipe is actually running (i.e. first vblank) before |
1806 | * Will wait until the pipe is actually running (i.e. first vblank) before |
1807 | * returning. |
1807 | * returning. |
1808 | */ |
1808 | */ |
1809 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1809 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1810 | bool pch_port) |
1810 | bool pch_port) |
1811 | { |
1811 | { |
1812 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1812 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1813 | pipe); |
1813 | pipe); |
1814 | enum pipe pch_transcoder; |
1814 | enum pipe pch_transcoder; |
1815 | int reg; |
1815 | int reg; |
1816 | u32 val; |
1816 | u32 val; |
1817 | 1817 | ||
1818 | if (HAS_PCH_LPT(dev_priv->dev)) |
1818 | if (HAS_PCH_LPT(dev_priv->dev)) |
1819 | pch_transcoder = TRANSCODER_A; |
1819 | pch_transcoder = TRANSCODER_A; |
1820 | else |
1820 | else |
1821 | pch_transcoder = pipe; |
1821 | pch_transcoder = pipe; |
1822 | 1822 | ||
1823 | /* |
1823 | /* |
1824 | * A pipe without a PLL won't actually be able to drive bits from |
1824 | * A pipe without a PLL won't actually be able to drive bits from |
1825 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1825 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1826 | * need the check. |
1826 | * need the check. |
1827 | */ |
1827 | */ |
1828 | if (!HAS_PCH_SPLIT(dev_priv->dev)) |
1828 | if (!HAS_PCH_SPLIT(dev_priv->dev)) |
1829 | assert_pll_enabled(dev_priv, pipe); |
1829 | assert_pll_enabled(dev_priv, pipe); |
1830 | else { |
1830 | else { |
1831 | if (pch_port) { |
1831 | if (pch_port) { |
1832 | /* if driving the PCH, we need FDI enabled */ |
1832 | /* if driving the PCH, we need FDI enabled */ |
1833 | assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); |
1833 | assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); |
1834 | assert_fdi_tx_pll_enabled(dev_priv, |
1834 | assert_fdi_tx_pll_enabled(dev_priv, |
1835 | (enum pipe) cpu_transcoder); |
1835 | (enum pipe) cpu_transcoder); |
1836 | } |
1836 | } |
1837 | /* FIXME: assert CPU port conditions for SNB+ */ |
1837 | /* FIXME: assert CPU port conditions for SNB+ */ |
1838 | } |
1838 | } |
1839 | 1839 | ||
1840 | reg = PIPECONF(cpu_transcoder); |
1840 | reg = PIPECONF(cpu_transcoder); |
1841 | val = I915_READ(reg); |
1841 | val = I915_READ(reg); |
1842 | if (val & PIPECONF_ENABLE) |
1842 | if (val & PIPECONF_ENABLE) |
1843 | return; |
1843 | return; |
1844 | 1844 | ||
1845 | I915_WRITE(reg, val | PIPECONF_ENABLE); |
1845 | I915_WRITE(reg, val | PIPECONF_ENABLE); |
1846 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1846 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1847 | } |
1847 | } |
1848 | 1848 | ||
1849 | /** |
1849 | /** |
1850 | * intel_disable_pipe - disable a pipe, asserting requirements |
1850 | * intel_disable_pipe - disable a pipe, asserting requirements |
1851 | * @dev_priv: i915 private structure |
1851 | * @dev_priv: i915 private structure |
1852 | * @pipe: pipe to disable |
1852 | * @pipe: pipe to disable |
1853 | * |
1853 | * |
1854 | * Disable @pipe, making sure that various hardware specific requirements |
1854 | * Disable @pipe, making sure that various hardware specific requirements |
1855 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. |
1855 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. |
1856 | * |
1856 | * |
1857 | * @pipe should be %PIPE_A or %PIPE_B. |
1857 | * @pipe should be %PIPE_A or %PIPE_B. |
1858 | * |
1858 | * |
1859 | * Will wait until the pipe has shut down before returning. |
1859 | * Will wait until the pipe has shut down before returning. |
1860 | */ |
1860 | */ |
1861 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1861 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1862 | enum pipe pipe) |
1862 | enum pipe pipe) |
1863 | { |
1863 | { |
1864 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1864 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1865 | pipe); |
1865 | pipe); |
1866 | int reg; |
1866 | int reg; |
1867 | u32 val; |
1867 | u32 val; |
1868 | 1868 | ||
1869 | /* |
1869 | /* |
1870 | * Make sure planes won't keep trying to pump pixels to us, |
1870 | * Make sure planes won't keep trying to pump pixels to us, |
1871 | * or we might hang the display. |
1871 | * or we might hang the display. |
1872 | */ |
1872 | */ |
1873 | assert_planes_disabled(dev_priv, pipe); |
1873 | assert_planes_disabled(dev_priv, pipe); |
1874 | 1874 | ||
1875 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1875 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1876 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1876 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1877 | return; |
1877 | return; |
1878 | 1878 | ||
1879 | reg = PIPECONF(cpu_transcoder); |
1879 | reg = PIPECONF(cpu_transcoder); |
1880 | val = I915_READ(reg); |
1880 | val = I915_READ(reg); |
1881 | if ((val & PIPECONF_ENABLE) == 0) |
1881 | if ((val & PIPECONF_ENABLE) == 0) |
1882 | return; |
1882 | return; |
1883 | 1883 | ||
1884 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); |
1884 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); |
1885 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1885 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1886 | } |
1886 | } |
1887 | 1887 | ||
1888 | /* |
1888 | /* |
1889 | * Plane regs are double buffered, going from enabled->disabled needs a |
1889 | * Plane regs are double buffered, going from enabled->disabled needs a |
1890 | * trigger in order to latch. The display address reg provides this. |
1890 | * trigger in order to latch. The display address reg provides this. |
1891 | */ |
1891 | */ |
1892 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
1892 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
1893 | enum plane plane) |
1893 | enum plane plane) |
1894 | { |
1894 | { |
1895 | if (dev_priv->info->gen >= 4) |
1895 | if (dev_priv->info->gen >= 4) |
1896 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
1896 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
1897 | else |
1897 | else |
1898 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
1898 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
1899 | } |
1899 | } |
1900 | 1900 | ||
1901 | /** |
1901 | /** |
1902 | * intel_enable_plane - enable a display plane on a given pipe |
1902 | * intel_enable_plane - enable a display plane on a given pipe |
1903 | * @dev_priv: i915 private structure |
1903 | * @dev_priv: i915 private structure |
1904 | * @plane: plane to enable |
1904 | * @plane: plane to enable |
1905 | * @pipe: pipe being fed |
1905 | * @pipe: pipe being fed |
1906 | * |
1906 | * |
1907 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1907 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1908 | */ |
1908 | */ |
1909 | static void intel_enable_plane(struct drm_i915_private *dev_priv, |
1909 | static void intel_enable_plane(struct drm_i915_private *dev_priv, |
1910 | enum plane plane, enum pipe pipe) |
1910 | enum plane plane, enum pipe pipe) |
1911 | { |
1911 | { |
1912 | int reg; |
1912 | int reg; |
1913 | u32 val; |
1913 | u32 val; |
1914 | 1914 | ||
1915 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1915 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1916 | assert_pipe_enabled(dev_priv, pipe); |
1916 | assert_pipe_enabled(dev_priv, pipe); |
1917 | 1917 | ||
1918 | reg = DSPCNTR(plane); |
1918 | reg = DSPCNTR(plane); |
1919 | val = I915_READ(reg); |
1919 | val = I915_READ(reg); |
1920 | if (val & DISPLAY_PLANE_ENABLE) |
1920 | if (val & DISPLAY_PLANE_ENABLE) |
1921 | return; |
1921 | return; |
1922 | 1922 | ||
1923 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1923 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1924 | intel_flush_display_plane(dev_priv, plane); |
1924 | intel_flush_display_plane(dev_priv, plane); |
1925 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1925 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1926 | } |
1926 | } |
1927 | 1927 | ||
1928 | /** |
1928 | /** |
1929 | * intel_disable_plane - disable a display plane |
1929 | * intel_disable_plane - disable a display plane |
1930 | * @dev_priv: i915 private structure |
1930 | * @dev_priv: i915 private structure |
1931 | * @plane: plane to disable |
1931 | * @plane: plane to disable |
1932 | * @pipe: pipe consuming the data |
1932 | * @pipe: pipe consuming the data |
1933 | * |
1933 | * |
1934 | * Disable @plane; should be an independent operation. |
1934 | * Disable @plane; should be an independent operation. |
1935 | */ |
1935 | */ |
1936 | static void intel_disable_plane(struct drm_i915_private *dev_priv, |
1936 | static void intel_disable_plane(struct drm_i915_private *dev_priv, |
1937 | enum plane plane, enum pipe pipe) |
1937 | enum plane plane, enum pipe pipe) |
1938 | { |
1938 | { |
1939 | int reg; |
1939 | int reg; |
1940 | u32 val; |
1940 | u32 val; |
1941 | 1941 | ||
1942 | reg = DSPCNTR(plane); |
1942 | reg = DSPCNTR(plane); |
1943 | val = I915_READ(reg); |
1943 | val = I915_READ(reg); |
1944 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1944 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1945 | return; |
1945 | return; |
1946 | 1946 | ||
1947 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1947 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1948 | intel_flush_display_plane(dev_priv, plane); |
1948 | intel_flush_display_plane(dev_priv, plane); |
1949 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1949 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1950 | } |
1950 | } |
1951 | 1951 | ||
1952 | int |
1952 | int |
1953 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1953 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1954 | struct drm_i915_gem_object *obj, |
1954 | struct drm_i915_gem_object *obj, |
1955 | struct intel_ring_buffer *pipelined) |
1955 | struct intel_ring_buffer *pipelined) |
1956 | { |
1956 | { |
1957 | struct drm_i915_private *dev_priv = dev->dev_private; |
1957 | struct drm_i915_private *dev_priv = dev->dev_private; |
1958 | u32 alignment; |
1958 | u32 alignment; |
1959 | int ret; |
1959 | int ret; |
1960 | 1960 | ||
1961 | switch (obj->tiling_mode) { |
1961 | switch (obj->tiling_mode) { |
1962 | case I915_TILING_NONE: |
1962 | case I915_TILING_NONE: |
1963 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1963 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1964 | alignment = 128 * 1024; |
1964 | alignment = 128 * 1024; |
1965 | else if (INTEL_INFO(dev)->gen >= 4) |
1965 | else if (INTEL_INFO(dev)->gen >= 4) |
1966 | alignment = 4 * 1024; |
1966 | alignment = 4 * 1024; |
1967 | else |
1967 | else |
1968 | alignment = 64 * 1024; |
1968 | alignment = 64 * 1024; |
1969 | break; |
1969 | break; |
1970 | case I915_TILING_X: |
1970 | case I915_TILING_X: |
1971 | /* pin() will align the object as required by fence */ |
1971 | /* pin() will align the object as required by fence */ |
1972 | alignment = 0; |
1972 | alignment = 0; |
1973 | break; |
1973 | break; |
1974 | case I915_TILING_Y: |
1974 | case I915_TILING_Y: |
1975 | /* FIXME: Is this true? */ |
1975 | /* FIXME: Is this true? */ |
1976 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
1976 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
1977 | return -EINVAL; |
1977 | return -EINVAL; |
1978 | default: |
1978 | default: |
1979 | BUG(); |
1979 | BUG(); |
1980 | } |
1980 | } |
1981 | 1981 | ||
1982 | dev_priv->mm.interruptible = false; |
1982 | dev_priv->mm.interruptible = false; |
1983 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1983 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1984 | if (ret) |
1984 | if (ret) |
1985 | goto err_interruptible; |
1985 | goto err_interruptible; |
1986 | 1986 | ||
1987 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1987 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1988 | * fence, whereas 965+ only requires a fence if using |
1988 | * fence, whereas 965+ only requires a fence if using |
1989 | * framebuffer compression. For simplicity, we always install |
1989 | * framebuffer compression. For simplicity, we always install |
1990 | * a fence as the cost is not that onerous. |
1990 | * a fence as the cost is not that onerous. |
1991 | */ |
1991 | */ |
1992 | ret = i915_gem_object_get_fence(obj); |
1992 | ret = i915_gem_object_get_fence(obj); |
1993 | if (ret) |
1993 | if (ret) |
1994 | goto err_unpin; |
1994 | goto err_unpin; |
1995 | 1995 | ||
1996 | i915_gem_object_pin_fence(obj); |
1996 | i915_gem_object_pin_fence(obj); |
1997 | 1997 | ||
1998 | dev_priv->mm.interruptible = true; |
1998 | dev_priv->mm.interruptible = true; |
1999 | return 0; |
1999 | return 0; |
2000 | 2000 | ||
2001 | err_unpin: |
2001 | err_unpin: |
2002 | i915_gem_object_unpin(obj); |
2002 | i915_gem_object_unpin(obj); |
2003 | err_interruptible: |
2003 | err_interruptible: |
2004 | dev_priv->mm.interruptible = true; |
2004 | dev_priv->mm.interruptible = true; |
2005 | return ret; |
2005 | return ret; |
2006 | } |
2006 | } |
2007 | 2007 | ||
2008 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) |
2008 | void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) |
2009 | { |
2009 | { |
2010 | // i915_gem_object_unpin_fence(obj); |
2010 | // i915_gem_object_unpin_fence(obj); |
2011 | // i915_gem_object_unpin(obj); |
2011 | // i915_gem_object_unpin(obj); |
2012 | } |
2012 | } |
2013 | 2013 | ||
2014 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
2014 | /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
2015 | * is assumed to be a power-of-two. */ |
2015 | * is assumed to be a power-of-two. */ |
2016 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
2016 | unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
2017 | unsigned int tiling_mode, |
2017 | unsigned int tiling_mode, |
2018 | unsigned int cpp, |
2018 | unsigned int cpp, |
2019 | unsigned int pitch) |
2019 | unsigned int pitch) |
2020 | { |
2020 | { |
2021 | if (tiling_mode != I915_TILING_NONE) { |
2021 | if (tiling_mode != I915_TILING_NONE) { |
2022 | unsigned int tile_rows, tiles; |
2022 | unsigned int tile_rows, tiles; |
2023 | 2023 | ||
2024 | tile_rows = *y / 8; |
2024 | tile_rows = *y / 8; |
2025 | *y %= 8; |
2025 | *y %= 8; |
2026 | 2026 | ||
2027 | tiles = *x / (512/cpp); |
2027 | tiles = *x / (512/cpp); |
2028 | *x %= 512/cpp; |
2028 | *x %= 512/cpp; |
2029 | 2029 | ||
2030 | return tile_rows * pitch * 8 + tiles * 4096; |
2030 | return tile_rows * pitch * 8 + tiles * 4096; |
2031 | } else { |
2031 | } else { |
2032 | unsigned int offset; |
2032 | unsigned int offset; |
2033 | 2033 | ||
2034 | offset = *y * pitch + *x * cpp; |
2034 | offset = *y * pitch + *x * cpp; |
2035 | *y = 0; |
2035 | *y = 0; |
2036 | *x = (offset & 4095) / cpp; |
2036 | *x = (offset & 4095) / cpp; |
2037 | return offset & -4096; |
2037 | return offset & -4096; |
2038 | } |
2038 | } |
2039 | } |
2039 | } |
2040 | 2040 | ||
2041 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2041 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2042 | int x, int y) |
2042 | int x, int y) |
2043 | { |
2043 | { |
2044 | struct drm_device *dev = crtc->dev; |
2044 | struct drm_device *dev = crtc->dev; |
2045 | struct drm_i915_private *dev_priv = dev->dev_private; |
2045 | struct drm_i915_private *dev_priv = dev->dev_private; |
2046 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2046 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2047 | struct intel_framebuffer *intel_fb; |
2047 | struct intel_framebuffer *intel_fb; |
2048 | struct drm_i915_gem_object *obj; |
2048 | struct drm_i915_gem_object *obj; |
2049 | int plane = intel_crtc->plane; |
2049 | int plane = intel_crtc->plane; |
2050 | unsigned long linear_offset; |
2050 | unsigned long linear_offset; |
2051 | u32 dspcntr; |
2051 | u32 dspcntr; |
2052 | u32 reg; |
2052 | u32 reg; |
2053 | 2053 | ||
2054 | switch (plane) { |
2054 | switch (plane) { |
2055 | case 0: |
2055 | case 0: |
2056 | case 1: |
2056 | case 1: |
2057 | break; |
2057 | break; |
2058 | default: |
2058 | default: |
2059 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2059 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2060 | return -EINVAL; |
2060 | return -EINVAL; |
2061 | } |
2061 | } |
2062 | 2062 | ||
2063 | intel_fb = to_intel_framebuffer(fb); |
2063 | intel_fb = to_intel_framebuffer(fb); |
2064 | obj = intel_fb->obj; |
2064 | obj = intel_fb->obj; |
2065 | 2065 | ||
2066 | reg = DSPCNTR(plane); |
2066 | reg = DSPCNTR(plane); |
2067 | dspcntr = I915_READ(reg); |
2067 | dspcntr = I915_READ(reg); |
2068 | /* Mask out pixel format bits in case we change it */ |
2068 | /* Mask out pixel format bits in case we change it */ |
2069 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2069 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2070 | switch (fb->pixel_format) { |
2070 | switch (fb->pixel_format) { |
2071 | case DRM_FORMAT_C8: |
2071 | case DRM_FORMAT_C8: |
2072 | dspcntr |= DISPPLANE_8BPP; |
2072 | dspcntr |= DISPPLANE_8BPP; |
2073 | break; |
2073 | break; |
2074 | case DRM_FORMAT_XRGB1555: |
2074 | case DRM_FORMAT_XRGB1555: |
2075 | case DRM_FORMAT_ARGB1555: |
2075 | case DRM_FORMAT_ARGB1555: |
2076 | dspcntr |= DISPPLANE_BGRX555; |
2076 | dspcntr |= DISPPLANE_BGRX555; |
2077 | break; |
2077 | break; |
2078 | case DRM_FORMAT_RGB565: |
2078 | case DRM_FORMAT_RGB565: |
2079 | dspcntr |= DISPPLANE_BGRX565; |
2079 | dspcntr |= DISPPLANE_BGRX565; |
2080 | break; |
2080 | break; |
2081 | case DRM_FORMAT_XRGB8888: |
2081 | case DRM_FORMAT_XRGB8888: |
2082 | case DRM_FORMAT_ARGB8888: |
2082 | case DRM_FORMAT_ARGB8888: |
2083 | dspcntr |= DISPPLANE_BGRX888; |
2083 | dspcntr |= DISPPLANE_BGRX888; |
2084 | break; |
2084 | break; |
2085 | case DRM_FORMAT_XBGR8888: |
2085 | case DRM_FORMAT_XBGR8888: |
2086 | case DRM_FORMAT_ABGR8888: |
2086 | case DRM_FORMAT_ABGR8888: |
2087 | dspcntr |= DISPPLANE_RGBX888; |
2087 | dspcntr |= DISPPLANE_RGBX888; |
2088 | break; |
2088 | break; |
2089 | case DRM_FORMAT_XRGB2101010: |
2089 | case DRM_FORMAT_XRGB2101010: |
2090 | case DRM_FORMAT_ARGB2101010: |
2090 | case DRM_FORMAT_ARGB2101010: |
2091 | dspcntr |= DISPPLANE_BGRX101010; |
2091 | dspcntr |= DISPPLANE_BGRX101010; |
2092 | break; |
2092 | break; |
2093 | case DRM_FORMAT_XBGR2101010: |
2093 | case DRM_FORMAT_XBGR2101010: |
2094 | case DRM_FORMAT_ABGR2101010: |
2094 | case DRM_FORMAT_ABGR2101010: |
2095 | dspcntr |= DISPPLANE_RGBX101010; |
2095 | dspcntr |= DISPPLANE_RGBX101010; |
2096 | break; |
2096 | break; |
2097 | default: |
2097 | default: |
2098 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
2098 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
2099 | return -EINVAL; |
2099 | return -EINVAL; |
2100 | } |
2100 | } |
2101 | 2101 | ||
2102 | if (INTEL_INFO(dev)->gen >= 4) { |
2102 | if (INTEL_INFO(dev)->gen >= 4) { |
2103 | if (obj->tiling_mode != I915_TILING_NONE) |
2103 | if (obj->tiling_mode != I915_TILING_NONE) |
2104 | dspcntr |= DISPPLANE_TILED; |
2104 | dspcntr |= DISPPLANE_TILED; |
2105 | else |
2105 | else |
2106 | dspcntr &= ~DISPPLANE_TILED; |
2106 | dspcntr &= ~DISPPLANE_TILED; |
2107 | } |
2107 | } |
2108 | 2108 | ||
2109 | I915_WRITE(reg, dspcntr); |
2109 | I915_WRITE(reg, dspcntr); |
2110 | 2110 | ||
2111 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2111 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2112 | 2112 | ||
2113 | if (INTEL_INFO(dev)->gen >= 4) { |
2113 | if (INTEL_INFO(dev)->gen >= 4) { |
2114 | intel_crtc->dspaddr_offset = |
2114 | intel_crtc->dspaddr_offset = |
2115 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2115 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2116 | fb->bits_per_pixel / 8, |
2116 | fb->bits_per_pixel / 8, |
2117 | fb->pitches[0]); |
2117 | fb->pitches[0]); |
2118 | linear_offset -= intel_crtc->dspaddr_offset; |
2118 | linear_offset -= intel_crtc->dspaddr_offset; |
2119 | } else { |
2119 | } else { |
2120 | intel_crtc->dspaddr_offset = linear_offset; |
2120 | intel_crtc->dspaddr_offset = linear_offset; |
2121 | } |
2121 | } |
2122 | 2122 | ||
2123 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
2123 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
2124 | obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
2124 | obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
2125 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2125 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2126 | if (INTEL_INFO(dev)->gen >= 4) { |
2126 | if (INTEL_INFO(dev)->gen >= 4) { |
2127 | I915_MODIFY_DISPBASE(DSPSURF(plane), |
2127 | I915_MODIFY_DISPBASE(DSPSURF(plane), |
2128 | obj->gtt_offset + intel_crtc->dspaddr_offset); |
2128 | obj->gtt_offset + intel_crtc->dspaddr_offset); |
2129 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2129 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2130 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2130 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2131 | } else |
2131 | } else |
2132 | I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); |
2132 | I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); |
2133 | POSTING_READ(reg); |
2133 | POSTING_READ(reg); |
2134 | 2134 | ||
2135 | return 0; |
2135 | return 0; |
2136 | } |
2136 | } |
2137 | 2137 | ||
2138 | static int ironlake_update_plane(struct drm_crtc *crtc, |
2138 | static int ironlake_update_plane(struct drm_crtc *crtc, |
2139 | struct drm_framebuffer *fb, int x, int y) |
2139 | struct drm_framebuffer *fb, int x, int y) |
2140 | { |
2140 | { |
2141 | struct drm_device *dev = crtc->dev; |
2141 | struct drm_device *dev = crtc->dev; |
2142 | struct drm_i915_private *dev_priv = dev->dev_private; |
2142 | struct drm_i915_private *dev_priv = dev->dev_private; |
2143 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2143 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2144 | struct intel_framebuffer *intel_fb; |
2144 | struct intel_framebuffer *intel_fb; |
2145 | struct drm_i915_gem_object *obj; |
2145 | struct drm_i915_gem_object *obj; |
2146 | int plane = intel_crtc->plane; |
2146 | int plane = intel_crtc->plane; |
2147 | unsigned long linear_offset; |
2147 | unsigned long linear_offset; |
2148 | u32 dspcntr; |
2148 | u32 dspcntr; |
2149 | u32 reg; |
2149 | u32 reg; |
2150 | 2150 | ||
2151 | switch (plane) { |
2151 | switch (plane) { |
2152 | case 0: |
2152 | case 0: |
2153 | case 1: |
2153 | case 1: |
2154 | case 2: |
2154 | case 2: |
2155 | break; |
2155 | break; |
2156 | default: |
2156 | default: |
2157 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2157 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2158 | return -EINVAL; |
2158 | return -EINVAL; |
2159 | } |
2159 | } |
2160 | 2160 | ||
2161 | intel_fb = to_intel_framebuffer(fb); |
2161 | intel_fb = to_intel_framebuffer(fb); |
2162 | obj = intel_fb->obj; |
2162 | obj = intel_fb->obj; |
2163 | 2163 | ||
2164 | reg = DSPCNTR(plane); |
2164 | reg = DSPCNTR(plane); |
2165 | dspcntr = I915_READ(reg); |
2165 | dspcntr = I915_READ(reg); |
2166 | /* Mask out pixel format bits in case we change it */ |
2166 | /* Mask out pixel format bits in case we change it */ |
2167 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2167 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2168 | switch (fb->pixel_format) { |
2168 | switch (fb->pixel_format) { |
2169 | case DRM_FORMAT_C8: |
2169 | case DRM_FORMAT_C8: |
2170 | dspcntr |= DISPPLANE_8BPP; |
2170 | dspcntr |= DISPPLANE_8BPP; |
2171 | break; |
2171 | break; |
2172 | case DRM_FORMAT_RGB565: |
2172 | case DRM_FORMAT_RGB565: |
2173 | dspcntr |= DISPPLANE_BGRX565; |
2173 | dspcntr |= DISPPLANE_BGRX565; |
2174 | break; |
2174 | break; |
2175 | case DRM_FORMAT_XRGB8888: |
2175 | case DRM_FORMAT_XRGB8888: |
2176 | case DRM_FORMAT_ARGB8888: |
2176 | case DRM_FORMAT_ARGB8888: |
2177 | dspcntr |= DISPPLANE_BGRX888; |
2177 | dspcntr |= DISPPLANE_BGRX888; |
2178 | break; |
2178 | break; |
2179 | case DRM_FORMAT_XBGR8888: |
2179 | case DRM_FORMAT_XBGR8888: |
2180 | case DRM_FORMAT_ABGR8888: |
2180 | case DRM_FORMAT_ABGR8888: |
2181 | dspcntr |= DISPPLANE_RGBX888; |
2181 | dspcntr |= DISPPLANE_RGBX888; |
2182 | break; |
2182 | break; |
2183 | case DRM_FORMAT_XRGB2101010: |
2183 | case DRM_FORMAT_XRGB2101010: |
2184 | case DRM_FORMAT_ARGB2101010: |
2184 | case DRM_FORMAT_ARGB2101010: |
2185 | dspcntr |= DISPPLANE_BGRX101010; |
2185 | dspcntr |= DISPPLANE_BGRX101010; |
2186 | break; |
2186 | break; |
2187 | case DRM_FORMAT_XBGR2101010: |
2187 | case DRM_FORMAT_XBGR2101010: |
2188 | case DRM_FORMAT_ABGR2101010: |
2188 | case DRM_FORMAT_ABGR2101010: |
2189 | dspcntr |= DISPPLANE_RGBX101010; |
2189 | dspcntr |= DISPPLANE_RGBX101010; |
2190 | break; |
2190 | break; |
2191 | default: |
2191 | default: |
2192 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
2192 | DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format); |
2193 | return -EINVAL; |
2193 | return -EINVAL; |
2194 | } |
2194 | } |
2195 | 2195 | ||
2196 | if (obj->tiling_mode != I915_TILING_NONE) |
2196 | if (obj->tiling_mode != I915_TILING_NONE) |
2197 | dspcntr |= DISPPLANE_TILED; |
2197 | dspcntr |= DISPPLANE_TILED; |
2198 | else |
2198 | else |
2199 | dspcntr &= ~DISPPLANE_TILED; |
2199 | dspcntr &= ~DISPPLANE_TILED; |
2200 | 2200 | ||
2201 | /* must disable */ |
2201 | /* must disable */ |
2202 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2202 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2203 | 2203 | ||
2204 | I915_WRITE(reg, dspcntr); |
2204 | I915_WRITE(reg, dspcntr); |
2205 | 2205 | ||
2206 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2206 | linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2207 | intel_crtc->dspaddr_offset = |
2207 | intel_crtc->dspaddr_offset = |
2208 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2208 | intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
2209 | fb->bits_per_pixel / 8, |
2209 | fb->bits_per_pixel / 8, |
2210 | fb->pitches[0]); |
2210 | fb->pitches[0]); |
2211 | linear_offset -= intel_crtc->dspaddr_offset; |
2211 | linear_offset -= intel_crtc->dspaddr_offset; |
2212 | 2212 | ||
2213 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
2213 | DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", |
2214 | obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
2214 | obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); |
2215 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2215 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2216 | I915_MODIFY_DISPBASE(DSPSURF(plane), |
2216 | I915_MODIFY_DISPBASE(DSPSURF(plane), |
2217 | obj->gtt_offset + intel_crtc->dspaddr_offset); |
2217 | obj->gtt_offset + intel_crtc->dspaddr_offset); |
2218 | if (IS_HASWELL(dev)) { |
2218 | if (IS_HASWELL(dev)) { |
2219 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
2219 | I915_WRITE(DSPOFFSET(plane), (y << 16) | x); |
2220 | } else { |
2220 | } else { |
2221 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2221 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2222 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2222 | I915_WRITE(DSPLINOFF(plane), linear_offset); |
2223 | } |
2223 | } |
2224 | POSTING_READ(reg); |
2224 | POSTING_READ(reg); |
2225 | 2225 | ||
2226 | return 0; |
2226 | return 0; |
2227 | } |
2227 | } |
2228 | 2228 | ||
2229 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
2229 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
2230 | static int |
2230 | static int |
2231 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2231 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2232 | int x, int y, enum mode_set_atomic state) |
2232 | int x, int y, enum mode_set_atomic state) |
2233 | { |
2233 | { |
2234 | struct drm_device *dev = crtc->dev; |
2234 | struct drm_device *dev = crtc->dev; |
2235 | struct drm_i915_private *dev_priv = dev->dev_private; |
2235 | struct drm_i915_private *dev_priv = dev->dev_private; |
2236 | 2236 | ||
2237 | if (dev_priv->display.disable_fbc) |
2237 | if (dev_priv->display.disable_fbc) |
2238 | dev_priv->display.disable_fbc(dev); |
2238 | dev_priv->display.disable_fbc(dev); |
2239 | intel_increase_pllclock(crtc); |
2239 | intel_increase_pllclock(crtc); |
2240 | 2240 | ||
2241 | return dev_priv->display.update_plane(crtc, fb, x, y); |
2241 | return dev_priv->display.update_plane(crtc, fb, x, y); |
2242 | } |
2242 | } |
2243 | 2243 | ||
2244 | #if 0 |
2244 | #if 0 |
2245 | static int |
2245 | static int |
2246 | intel_finish_fb(struct drm_framebuffer *old_fb) |
2246 | intel_finish_fb(struct drm_framebuffer *old_fb) |
2247 | { |
2247 | { |
2248 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
2248 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
2249 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2249 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2250 | bool was_interruptible = dev_priv->mm.interruptible; |
2250 | bool was_interruptible = dev_priv->mm.interruptible; |
2251 | int ret; |
2251 | int ret; |
2252 | 2252 | ||
2253 | /* Big Hammer, we also need to ensure that any pending |
2253 | /* Big Hammer, we also need to ensure that any pending |
2254 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
2254 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
2255 | * current scanout is retired before unpinning the old |
2255 | * current scanout is retired before unpinning the old |
2256 | * framebuffer. |
2256 | * framebuffer. |
2257 | * |
2257 | * |
2258 | * This should only fail upon a hung GPU, in which case we |
2258 | * This should only fail upon a hung GPU, in which case we |
2259 | * can safely continue. |
2259 | * can safely continue. |
2260 | */ |
2260 | */ |
2261 | dev_priv->mm.interruptible = false; |
2261 | dev_priv->mm.interruptible = false; |
2262 | ret = i915_gem_object_finish_gpu(obj); |
2262 | ret = i915_gem_object_finish_gpu(obj); |
2263 | dev_priv->mm.interruptible = was_interruptible; |
2263 | dev_priv->mm.interruptible = was_interruptible; |
2264 | 2264 | ||
2265 | return ret; |
2265 | return ret; |
2266 | } |
2266 | } |
2267 | #endif |
2267 | #endif |
2268 | 2268 | ||
2269 | static int |
2269 | static int |
2270 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2270 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2271 | struct drm_framebuffer *fb) |
2271 | struct drm_framebuffer *fb) |
2272 | { |
2272 | { |
2273 | struct drm_device *dev = crtc->dev; |
2273 | struct drm_device *dev = crtc->dev; |
2274 | struct drm_i915_private *dev_priv = dev->dev_private; |
2274 | struct drm_i915_private *dev_priv = dev->dev_private; |
2275 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2275 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2276 | struct drm_framebuffer *old_fb; |
2276 | struct drm_framebuffer *old_fb; |
2277 | int ret; |
2277 | int ret; |
2278 | 2278 | ||
2279 | /* no fb bound */ |
2279 | /* no fb bound */ |
2280 | if (!fb) { |
2280 | if (!fb) { |
2281 | DRM_ERROR("No FB bound\n"); |
2281 | DRM_ERROR("No FB bound\n"); |
2282 | return 0; |
2282 | return 0; |
2283 | } |
2283 | } |
2284 | 2284 | ||
2285 | if(intel_crtc->plane > dev_priv->num_pipe) { |
2285 | if(intel_crtc->plane > dev_priv->num_pipe) { |
2286 | DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", |
2286 | DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", |
2287 | intel_crtc->plane, |
2287 | intel_crtc->plane, |
2288 | dev_priv->num_pipe); |
2288 | dev_priv->num_pipe); |
2289 | return -EINVAL; |
2289 | return -EINVAL; |
2290 | } |
2290 | } |
2291 | 2291 | ||
2292 | mutex_lock(&dev->struct_mutex); |
2292 | mutex_lock(&dev->struct_mutex); |
2293 | // ret = intel_pin_and_fence_fb_obj(dev, |
2293 | // ret = intel_pin_and_fence_fb_obj(dev, |
2294 | // to_intel_framebuffer(fb)->obj, |
2294 | // to_intel_framebuffer(fb)->obj, |
2295 | // NULL); |
2295 | // NULL); |
2296 | // if (ret != 0) { |
2296 | // if (ret != 0) { |
2297 | // mutex_unlock(&dev->struct_mutex); |
2297 | // mutex_unlock(&dev->struct_mutex); |
2298 | // DRM_ERROR("pin & fence failed\n"); |
2298 | // DRM_ERROR("pin & fence failed\n"); |
2299 | // return ret; |
2299 | // return ret; |
2300 | // } |
2300 | // } |
2301 | 2301 | ||
2302 | // if (crtc->fb) |
2302 | // if (crtc->fb) |
2303 | // intel_finish_fb(crtc->fb); |
2303 | // intel_finish_fb(crtc->fb); |
2304 | 2304 | ||
2305 | ret = dev_priv->display.update_plane(crtc, fb, x, y); |
2305 | ret = dev_priv->display.update_plane(crtc, fb, x, y); |
2306 | if (ret) { |
2306 | if (ret) { |
2307 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); |
2307 | intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); |
2308 | mutex_unlock(&dev->struct_mutex); |
2308 | mutex_unlock(&dev->struct_mutex); |
2309 | DRM_ERROR("failed to update base address\n"); |
2309 | DRM_ERROR("failed to update base address\n"); |
2310 | return ret; |
2310 | return ret; |
2311 | } |
2311 | } |
2312 | 2312 | ||
2313 | old_fb = crtc->fb; |
2313 | old_fb = crtc->fb; |
2314 | crtc->fb = fb; |
2314 | crtc->fb = fb; |
2315 | crtc->x = x; |
2315 | crtc->x = x; |
2316 | crtc->y = y; |
2316 | crtc->y = y; |
2317 | 2317 | ||
2318 | if (old_fb) { |
2318 | if (old_fb) { |
2319 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2319 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2320 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2320 | intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); |
2321 | } |
2321 | } |
2322 | 2322 | ||
2323 | intel_update_fbc(dev); |
2323 | intel_update_fbc(dev); |
2324 | mutex_unlock(&dev->struct_mutex); |
2324 | mutex_unlock(&dev->struct_mutex); |
2325 | 2325 | ||
2326 | return 0; |
2326 | return 0; |
2327 | } |
2327 | } |
2328 | 2328 | ||
2329 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2329 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2330 | { |
2330 | { |
2331 | struct drm_device *dev = crtc->dev; |
2331 | struct drm_device *dev = crtc->dev; |
2332 | struct drm_i915_private *dev_priv = dev->dev_private; |
2332 | struct drm_i915_private *dev_priv = dev->dev_private; |
2333 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2333 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2334 | int pipe = intel_crtc->pipe; |
2334 | int pipe = intel_crtc->pipe; |
2335 | u32 reg, temp; |
2335 | u32 reg, temp; |
2336 | 2336 | ||
2337 | /* enable normal train */ |
2337 | /* enable normal train */ |
2338 | reg = FDI_TX_CTL(pipe); |
2338 | reg = FDI_TX_CTL(pipe); |
2339 | temp = I915_READ(reg); |
2339 | temp = I915_READ(reg); |
2340 | if (IS_IVYBRIDGE(dev)) { |
2340 | if (IS_IVYBRIDGE(dev)) { |
2341 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2341 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2342 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; |
2342 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; |
2343 | } else { |
2343 | } else { |
2344 | temp &= ~FDI_LINK_TRAIN_NONE; |
2344 | temp &= ~FDI_LINK_TRAIN_NONE; |
2345 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; |
2345 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; |
2346 | } |
2346 | } |
2347 | I915_WRITE(reg, temp); |
2347 | I915_WRITE(reg, temp); |
2348 | 2348 | ||
2349 | reg = FDI_RX_CTL(pipe); |
2349 | reg = FDI_RX_CTL(pipe); |
2350 | temp = I915_READ(reg); |
2350 | temp = I915_READ(reg); |
2351 | if (HAS_PCH_CPT(dev)) { |
2351 | if (HAS_PCH_CPT(dev)) { |
2352 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2352 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2353 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; |
2353 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; |
2354 | } else { |
2354 | } else { |
2355 | temp &= ~FDI_LINK_TRAIN_NONE; |
2355 | temp &= ~FDI_LINK_TRAIN_NONE; |
2356 | temp |= FDI_LINK_TRAIN_NONE; |
2356 | temp |= FDI_LINK_TRAIN_NONE; |
2357 | } |
2357 | } |
2358 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); |
2358 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); |
2359 | 2359 | ||
2360 | /* wait one idle pattern time */ |
2360 | /* wait one idle pattern time */ |
2361 | POSTING_READ(reg); |
2361 | POSTING_READ(reg); |
2362 | udelay(1000); |
2362 | udelay(1000); |
2363 | 2363 | ||
2364 | /* IVB wants error correction enabled */ |
2364 | /* IVB wants error correction enabled */ |
2365 | if (IS_IVYBRIDGE(dev)) |
2365 | if (IS_IVYBRIDGE(dev)) |
2366 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | |
2366 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | |
2367 | FDI_FE_ERRC_ENABLE); |
2367 | FDI_FE_ERRC_ENABLE); |
2368 | } |
2368 | } |
2369 | 2369 | ||
2370 | static void ivb_modeset_global_resources(struct drm_device *dev) |
2370 | static void ivb_modeset_global_resources(struct drm_device *dev) |
2371 | { |
2371 | { |
2372 | struct drm_i915_private *dev_priv = dev->dev_private; |
2372 | struct drm_i915_private *dev_priv = dev->dev_private; |
2373 | struct intel_crtc *pipe_B_crtc = |
2373 | struct intel_crtc *pipe_B_crtc = |
2374 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
2374 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
2375 | struct intel_crtc *pipe_C_crtc = |
2375 | struct intel_crtc *pipe_C_crtc = |
2376 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); |
2376 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); |
2377 | uint32_t temp; |
2377 | uint32_t temp; |
2378 | 2378 | ||
2379 | /* When everything is off disable fdi C so that we could enable fdi B |
2379 | /* When everything is off disable fdi C so that we could enable fdi B |
2380 | * with all lanes. XXX: This misses the case where a pipe is not using |
2380 | * with all lanes. XXX: This misses the case where a pipe is not using |
2381 | * any pch resources and so doesn't need any fdi lanes. */ |
2381 | * any pch resources and so doesn't need any fdi lanes. */ |
2382 | if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { |
2382 | if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) { |
2383 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
2383 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
2384 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
2384 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
2385 | 2385 | ||
2386 | temp = I915_READ(SOUTH_CHICKEN1); |
2386 | temp = I915_READ(SOUTH_CHICKEN1); |
2387 | temp &= ~FDI_BC_BIFURCATION_SELECT; |
2387 | temp &= ~FDI_BC_BIFURCATION_SELECT; |
2388 | DRM_DEBUG_KMS("disabling fdi C rx\n"); |
2388 | DRM_DEBUG_KMS("disabling fdi C rx\n"); |
2389 | I915_WRITE(SOUTH_CHICKEN1, temp); |
2389 | I915_WRITE(SOUTH_CHICKEN1, temp); |
2390 | } |
2390 | } |
2391 | } |
2391 | } |
2392 | 2392 | ||
2393 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2393 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2394 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2394 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2395 | { |
2395 | { |
2396 | struct drm_device *dev = crtc->dev; |
2396 | struct drm_device *dev = crtc->dev; |
2397 | struct drm_i915_private *dev_priv = dev->dev_private; |
2397 | struct drm_i915_private *dev_priv = dev->dev_private; |
2398 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2398 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2399 | int pipe = intel_crtc->pipe; |
2399 | int pipe = intel_crtc->pipe; |
2400 | int plane = intel_crtc->plane; |
2400 | int plane = intel_crtc->plane; |
2401 | u32 reg, temp, tries; |
2401 | u32 reg, temp, tries; |
2402 | 2402 | ||
2403 | /* FDI needs bits from pipe & plane first */ |
2403 | /* FDI needs bits from pipe & plane first */ |
2404 | assert_pipe_enabled(dev_priv, pipe); |
2404 | assert_pipe_enabled(dev_priv, pipe); |
2405 | assert_plane_enabled(dev_priv, plane); |
2405 | assert_plane_enabled(dev_priv, plane); |
2406 | 2406 | ||
2407 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2407 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2408 | for train result */ |
2408 | for train result */ |
2409 | reg = FDI_RX_IMR(pipe); |
2409 | reg = FDI_RX_IMR(pipe); |
2410 | temp = I915_READ(reg); |
2410 | temp = I915_READ(reg); |
2411 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2411 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2412 | temp &= ~FDI_RX_BIT_LOCK; |
2412 | temp &= ~FDI_RX_BIT_LOCK; |
2413 | I915_WRITE(reg, temp); |
2413 | I915_WRITE(reg, temp); |
2414 | I915_READ(reg); |
2414 | I915_READ(reg); |
2415 | udelay(150); |
2415 | udelay(150); |
2416 | 2416 | ||
2417 | /* enable CPU FDI TX and PCH FDI RX */ |
2417 | /* enable CPU FDI TX and PCH FDI RX */ |
2418 | reg = FDI_TX_CTL(pipe); |
2418 | reg = FDI_TX_CTL(pipe); |
2419 | temp = I915_READ(reg); |
2419 | temp = I915_READ(reg); |
2420 | temp &= ~(7 << 19); |
2420 | temp &= ~(7 << 19); |
2421 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2421 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2422 | temp &= ~FDI_LINK_TRAIN_NONE; |
2422 | temp &= ~FDI_LINK_TRAIN_NONE; |
2423 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2423 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2424 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2424 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2425 | 2425 | ||
2426 | reg = FDI_RX_CTL(pipe); |
2426 | reg = FDI_RX_CTL(pipe); |
2427 | temp = I915_READ(reg); |
2427 | temp = I915_READ(reg); |
2428 | temp &= ~FDI_LINK_TRAIN_NONE; |
2428 | temp &= ~FDI_LINK_TRAIN_NONE; |
2429 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2429 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2430 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2430 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2431 | 2431 | ||
2432 | POSTING_READ(reg); |
2432 | POSTING_READ(reg); |
2433 | udelay(150); |
2433 | udelay(150); |
2434 | 2434 | ||
2435 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2435 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2436 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2436 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2437 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2437 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2438 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2438 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2439 | 2439 | ||
2440 | reg = FDI_RX_IIR(pipe); |
2440 | reg = FDI_RX_IIR(pipe); |
2441 | for (tries = 0; tries < 5; tries++) { |
2441 | for (tries = 0; tries < 5; tries++) { |
2442 | temp = I915_READ(reg); |
2442 | temp = I915_READ(reg); |
2443 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2443 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2444 | 2444 | ||
2445 | if ((temp & FDI_RX_BIT_LOCK)) { |
2445 | if ((temp & FDI_RX_BIT_LOCK)) { |
2446 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2446 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2447 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2447 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2448 | break; |
2448 | break; |
2449 | } |
2449 | } |
2450 | } |
2450 | } |
2451 | if (tries == 5) |
2451 | if (tries == 5) |
2452 | DRM_ERROR("FDI train 1 fail!\n"); |
2452 | DRM_ERROR("FDI train 1 fail!\n"); |
2453 | 2453 | ||
2454 | /* Train 2 */ |
2454 | /* Train 2 */ |
2455 | reg = FDI_TX_CTL(pipe); |
2455 | reg = FDI_TX_CTL(pipe); |
2456 | temp = I915_READ(reg); |
2456 | temp = I915_READ(reg); |
2457 | temp &= ~FDI_LINK_TRAIN_NONE; |
2457 | temp &= ~FDI_LINK_TRAIN_NONE; |
2458 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2458 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2459 | I915_WRITE(reg, temp); |
2459 | I915_WRITE(reg, temp); |
2460 | 2460 | ||
2461 | reg = FDI_RX_CTL(pipe); |
2461 | reg = FDI_RX_CTL(pipe); |
2462 | temp = I915_READ(reg); |
2462 | temp = I915_READ(reg); |
2463 | temp &= ~FDI_LINK_TRAIN_NONE; |
2463 | temp &= ~FDI_LINK_TRAIN_NONE; |
2464 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2464 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2465 | I915_WRITE(reg, temp); |
2465 | I915_WRITE(reg, temp); |
2466 | 2466 | ||
2467 | POSTING_READ(reg); |
2467 | POSTING_READ(reg); |
2468 | udelay(150); |
2468 | udelay(150); |
2469 | 2469 | ||
2470 | reg = FDI_RX_IIR(pipe); |
2470 | reg = FDI_RX_IIR(pipe); |
2471 | for (tries = 0; tries < 5; tries++) { |
2471 | for (tries = 0; tries < 5; tries++) { |
2472 | temp = I915_READ(reg); |
2472 | temp = I915_READ(reg); |
2473 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2473 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2474 | 2474 | ||
2475 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2475 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2476 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2476 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2477 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2477 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2478 | break; |
2478 | break; |
2479 | } |
2479 | } |
2480 | } |
2480 | } |
2481 | if (tries == 5) |
2481 | if (tries == 5) |
2482 | DRM_ERROR("FDI train 2 fail!\n"); |
2482 | DRM_ERROR("FDI train 2 fail!\n"); |
2483 | 2483 | ||
2484 | DRM_DEBUG_KMS("FDI train done\n"); |
2484 | DRM_DEBUG_KMS("FDI train done\n"); |
2485 | 2485 | ||
2486 | } |
2486 | } |
2487 | 2487 | ||
2488 | static const int snb_b_fdi_train_param[] = { |
2488 | static const int snb_b_fdi_train_param[] = { |
2489 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2489 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2490 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2490 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2491 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
2491 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
2492 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, |
2492 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, |
2493 | }; |
2493 | }; |
2494 | 2494 | ||
2495 | /* The FDI link training functions for SNB/Cougarpoint. */ |
2495 | /* The FDI link training functions for SNB/Cougarpoint. */ |
2496 | static void gen6_fdi_link_train(struct drm_crtc *crtc) |
2496 | static void gen6_fdi_link_train(struct drm_crtc *crtc) |
2497 | { |
2497 | { |
2498 | struct drm_device *dev = crtc->dev; |
2498 | struct drm_device *dev = crtc->dev; |
2499 | struct drm_i915_private *dev_priv = dev->dev_private; |
2499 | struct drm_i915_private *dev_priv = dev->dev_private; |
2500 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2500 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2501 | int pipe = intel_crtc->pipe; |
2501 | int pipe = intel_crtc->pipe; |
2502 | u32 reg, temp, i, retry; |
2502 | u32 reg, temp, i, retry; |
2503 | 2503 | ||
2504 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2504 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2505 | for train result */ |
2505 | for train result */ |
2506 | reg = FDI_RX_IMR(pipe); |
2506 | reg = FDI_RX_IMR(pipe); |
2507 | temp = I915_READ(reg); |
2507 | temp = I915_READ(reg); |
2508 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2508 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2509 | temp &= ~FDI_RX_BIT_LOCK; |
2509 | temp &= ~FDI_RX_BIT_LOCK; |
2510 | I915_WRITE(reg, temp); |
2510 | I915_WRITE(reg, temp); |
2511 | 2511 | ||
2512 | POSTING_READ(reg); |
2512 | POSTING_READ(reg); |
2513 | udelay(150); |
2513 | udelay(150); |
2514 | 2514 | ||
2515 | /* enable CPU FDI TX and PCH FDI RX */ |
2515 | /* enable CPU FDI TX and PCH FDI RX */ |
2516 | reg = FDI_TX_CTL(pipe); |
2516 | reg = FDI_TX_CTL(pipe); |
2517 | temp = I915_READ(reg); |
2517 | temp = I915_READ(reg); |
2518 | temp &= ~(7 << 19); |
2518 | temp &= ~(7 << 19); |
2519 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2519 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2520 | temp &= ~FDI_LINK_TRAIN_NONE; |
2520 | temp &= ~FDI_LINK_TRAIN_NONE; |
2521 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2521 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2522 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2522 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2523 | /* SNB-B */ |
2523 | /* SNB-B */ |
2524 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2524 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2525 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2525 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2526 | 2526 | ||
2527 | I915_WRITE(FDI_RX_MISC(pipe), |
2527 | I915_WRITE(FDI_RX_MISC(pipe), |
2528 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2528 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2529 | 2529 | ||
2530 | reg = FDI_RX_CTL(pipe); |
2530 | reg = FDI_RX_CTL(pipe); |
2531 | temp = I915_READ(reg); |
2531 | temp = I915_READ(reg); |
2532 | if (HAS_PCH_CPT(dev)) { |
2532 | if (HAS_PCH_CPT(dev)) { |
2533 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2533 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2534 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2534 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2535 | } else { |
2535 | } else { |
2536 | temp &= ~FDI_LINK_TRAIN_NONE; |
2536 | temp &= ~FDI_LINK_TRAIN_NONE; |
2537 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2537 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2538 | } |
2538 | } |
2539 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2539 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2540 | 2540 | ||
2541 | POSTING_READ(reg); |
2541 | POSTING_READ(reg); |
2542 | udelay(150); |
2542 | udelay(150); |
2543 | 2543 | ||
2544 | for (i = 0; i < 4; i++) { |
2544 | for (i = 0; i < 4; i++) { |
2545 | reg = FDI_TX_CTL(pipe); |
2545 | reg = FDI_TX_CTL(pipe); |
2546 | temp = I915_READ(reg); |
2546 | temp = I915_READ(reg); |
2547 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2547 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2548 | temp |= snb_b_fdi_train_param[i]; |
2548 | temp |= snb_b_fdi_train_param[i]; |
2549 | I915_WRITE(reg, temp); |
2549 | I915_WRITE(reg, temp); |
2550 | 2550 | ||
2551 | POSTING_READ(reg); |
2551 | POSTING_READ(reg); |
2552 | udelay(500); |
2552 | udelay(500); |
2553 | 2553 | ||
2554 | for (retry = 0; retry < 5; retry++) { |
2554 | for (retry = 0; retry < 5; retry++) { |
2555 | reg = FDI_RX_IIR(pipe); |
2555 | reg = FDI_RX_IIR(pipe); |
2556 | temp = I915_READ(reg); |
2556 | temp = I915_READ(reg); |
2557 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2557 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2558 | if (temp & FDI_RX_BIT_LOCK) { |
2558 | if (temp & FDI_RX_BIT_LOCK) { |
2559 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2559 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2560 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2560 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2561 | break; |
2561 | break; |
2562 | } |
2562 | } |
2563 | udelay(50); |
2563 | udelay(50); |
2564 | } |
2564 | } |
2565 | if (retry < 5) |
2565 | if (retry < 5) |
2566 | break; |
2566 | break; |
2567 | } |
2567 | } |
2568 | if (i == 4) |
2568 | if (i == 4) |
2569 | DRM_ERROR("FDI train 1 fail!\n"); |
2569 | DRM_ERROR("FDI train 1 fail!\n"); |
2570 | 2570 | ||
2571 | /* Train 2 */ |
2571 | /* Train 2 */ |
2572 | reg = FDI_TX_CTL(pipe); |
2572 | reg = FDI_TX_CTL(pipe); |
2573 | temp = I915_READ(reg); |
2573 | temp = I915_READ(reg); |
2574 | temp &= ~FDI_LINK_TRAIN_NONE; |
2574 | temp &= ~FDI_LINK_TRAIN_NONE; |
2575 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2575 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2576 | if (IS_GEN6(dev)) { |
2576 | if (IS_GEN6(dev)) { |
2577 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2577 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2578 | /* SNB-B */ |
2578 | /* SNB-B */ |
2579 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2579 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2580 | } |
2580 | } |
2581 | I915_WRITE(reg, temp); |
2581 | I915_WRITE(reg, temp); |
2582 | 2582 | ||
2583 | reg = FDI_RX_CTL(pipe); |
2583 | reg = FDI_RX_CTL(pipe); |
2584 | temp = I915_READ(reg); |
2584 | temp = I915_READ(reg); |
2585 | if (HAS_PCH_CPT(dev)) { |
2585 | if (HAS_PCH_CPT(dev)) { |
2586 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2586 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2587 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2587 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2588 | } else { |
2588 | } else { |
2589 | temp &= ~FDI_LINK_TRAIN_NONE; |
2589 | temp &= ~FDI_LINK_TRAIN_NONE; |
2590 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2590 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2591 | } |
2591 | } |
2592 | I915_WRITE(reg, temp); |
2592 | I915_WRITE(reg, temp); |
2593 | 2593 | ||
2594 | POSTING_READ(reg); |
2594 | POSTING_READ(reg); |
2595 | udelay(150); |
2595 | udelay(150); |
2596 | 2596 | ||
2597 | for (i = 0; i < 4; i++) { |
2597 | for (i = 0; i < 4; i++) { |
2598 | reg = FDI_TX_CTL(pipe); |
2598 | reg = FDI_TX_CTL(pipe); |
2599 | temp = I915_READ(reg); |
2599 | temp = I915_READ(reg); |
2600 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2600 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2601 | temp |= snb_b_fdi_train_param[i]; |
2601 | temp |= snb_b_fdi_train_param[i]; |
2602 | I915_WRITE(reg, temp); |
2602 | I915_WRITE(reg, temp); |
2603 | 2603 | ||
2604 | POSTING_READ(reg); |
2604 | POSTING_READ(reg); |
2605 | udelay(500); |
2605 | udelay(500); |
2606 | 2606 | ||
2607 | for (retry = 0; retry < 5; retry++) { |
2607 | for (retry = 0; retry < 5; retry++) { |
2608 | reg = FDI_RX_IIR(pipe); |
2608 | reg = FDI_RX_IIR(pipe); |
2609 | temp = I915_READ(reg); |
2609 | temp = I915_READ(reg); |
2610 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2610 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2611 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2611 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2612 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2612 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2613 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2613 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2614 | break; |
2614 | break; |
2615 | } |
2615 | } |
2616 | udelay(50); |
2616 | udelay(50); |
2617 | } |
2617 | } |
2618 | if (retry < 5) |
2618 | if (retry < 5) |
2619 | break; |
2619 | break; |
2620 | } |
2620 | } |
2621 | if (i == 4) |
2621 | if (i == 4) |
2622 | DRM_ERROR("FDI train 2 fail!\n"); |
2622 | DRM_ERROR("FDI train 2 fail!\n"); |
2623 | 2623 | ||
2624 | DRM_DEBUG_KMS("FDI train done.\n"); |
2624 | DRM_DEBUG_KMS("FDI train done.\n"); |
2625 | } |
2625 | } |
2626 | 2626 | ||
2627 | /* Manual link training for Ivy Bridge A0 parts */ |
2627 | /* Manual link training for Ivy Bridge A0 parts */ |
2628 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) |
2628 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) |
2629 | { |
2629 | { |
2630 | struct drm_device *dev = crtc->dev; |
2630 | struct drm_device *dev = crtc->dev; |
2631 | struct drm_i915_private *dev_priv = dev->dev_private; |
2631 | struct drm_i915_private *dev_priv = dev->dev_private; |
2632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2632 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2633 | int pipe = intel_crtc->pipe; |
2633 | int pipe = intel_crtc->pipe; |
2634 | u32 reg, temp, i; |
2634 | u32 reg, temp, i; |
2635 | 2635 | ||
2636 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2636 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2637 | for train result */ |
2637 | for train result */ |
2638 | reg = FDI_RX_IMR(pipe); |
2638 | reg = FDI_RX_IMR(pipe); |
2639 | temp = I915_READ(reg); |
2639 | temp = I915_READ(reg); |
2640 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2640 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2641 | temp &= ~FDI_RX_BIT_LOCK; |
2641 | temp &= ~FDI_RX_BIT_LOCK; |
2642 | I915_WRITE(reg, temp); |
2642 | I915_WRITE(reg, temp); |
2643 | 2643 | ||
2644 | POSTING_READ(reg); |
2644 | POSTING_READ(reg); |
2645 | udelay(150); |
2645 | udelay(150); |
2646 | 2646 | ||
2647 | DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", |
2647 | DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", |
2648 | I915_READ(FDI_RX_IIR(pipe))); |
2648 | I915_READ(FDI_RX_IIR(pipe))); |
2649 | 2649 | ||
2650 | /* enable CPU FDI TX and PCH FDI RX */ |
2650 | /* enable CPU FDI TX and PCH FDI RX */ |
2651 | reg = FDI_TX_CTL(pipe); |
2651 | reg = FDI_TX_CTL(pipe); |
2652 | temp = I915_READ(reg); |
2652 | temp = I915_READ(reg); |
2653 | temp &= ~(7 << 19); |
2653 | temp &= ~(7 << 19); |
2654 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2654 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2655 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); |
2655 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); |
2656 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2656 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2657 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2657 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2658 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2658 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2659 | temp |= FDI_COMPOSITE_SYNC; |
2659 | temp |= FDI_COMPOSITE_SYNC; |
2660 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2660 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2661 | 2661 | ||
2662 | I915_WRITE(FDI_RX_MISC(pipe), |
2662 | I915_WRITE(FDI_RX_MISC(pipe), |
2663 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2663 | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
2664 | 2664 | ||
2665 | reg = FDI_RX_CTL(pipe); |
2665 | reg = FDI_RX_CTL(pipe); |
2666 | temp = I915_READ(reg); |
2666 | temp = I915_READ(reg); |
2667 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2667 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2668 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2668 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2669 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2669 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2670 | temp |= FDI_COMPOSITE_SYNC; |
2670 | temp |= FDI_COMPOSITE_SYNC; |
2671 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2671 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2672 | 2672 | ||
2673 | POSTING_READ(reg); |
2673 | POSTING_READ(reg); |
2674 | udelay(150); |
2674 | udelay(150); |
2675 | 2675 | ||
2676 | for (i = 0; i < 4; i++) { |
2676 | for (i = 0; i < 4; i++) { |
2677 | reg = FDI_TX_CTL(pipe); |
2677 | reg = FDI_TX_CTL(pipe); |
2678 | temp = I915_READ(reg); |
2678 | temp = I915_READ(reg); |
2679 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2679 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2680 | temp |= snb_b_fdi_train_param[i]; |
2680 | temp |= snb_b_fdi_train_param[i]; |
2681 | I915_WRITE(reg, temp); |
2681 | I915_WRITE(reg, temp); |
2682 | 2682 | ||
2683 | POSTING_READ(reg); |
2683 | POSTING_READ(reg); |
2684 | udelay(500); |
2684 | udelay(500); |
2685 | 2685 | ||
2686 | reg = FDI_RX_IIR(pipe); |
2686 | reg = FDI_RX_IIR(pipe); |
2687 | temp = I915_READ(reg); |
2687 | temp = I915_READ(reg); |
2688 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2688 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2689 | 2689 | ||
2690 | if (temp & FDI_RX_BIT_LOCK || |
2690 | if (temp & FDI_RX_BIT_LOCK || |
2691 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2691 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2692 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2692 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2693 | DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); |
2693 | DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); |
2694 | break; |
2694 | break; |
2695 | } |
2695 | } |
2696 | } |
2696 | } |
2697 | if (i == 4) |
2697 | if (i == 4) |
2698 | DRM_ERROR("FDI train 1 fail!\n"); |
2698 | DRM_ERROR("FDI train 1 fail!\n"); |
2699 | 2699 | ||
2700 | /* Train 2 */ |
2700 | /* Train 2 */ |
2701 | reg = FDI_TX_CTL(pipe); |
2701 | reg = FDI_TX_CTL(pipe); |
2702 | temp = I915_READ(reg); |
2702 | temp = I915_READ(reg); |
2703 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2703 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2704 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
2704 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
2705 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2705 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2706 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2706 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2707 | I915_WRITE(reg, temp); |
2707 | I915_WRITE(reg, temp); |
2708 | 2708 | ||
2709 | reg = FDI_RX_CTL(pipe); |
2709 | reg = FDI_RX_CTL(pipe); |
2710 | temp = I915_READ(reg); |
2710 | temp = I915_READ(reg); |
2711 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2711 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2712 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2712 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2713 | I915_WRITE(reg, temp); |
2713 | I915_WRITE(reg, temp); |
2714 | 2714 | ||
2715 | POSTING_READ(reg); |
2715 | POSTING_READ(reg); |
2716 | udelay(150); |
2716 | udelay(150); |
2717 | 2717 | ||
2718 | for (i = 0; i < 4; i++) { |
2718 | for (i = 0; i < 4; i++) { |
2719 | reg = FDI_TX_CTL(pipe); |
2719 | reg = FDI_TX_CTL(pipe); |
2720 | temp = I915_READ(reg); |
2720 | temp = I915_READ(reg); |
2721 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2721 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2722 | temp |= snb_b_fdi_train_param[i]; |
2722 | temp |= snb_b_fdi_train_param[i]; |
2723 | I915_WRITE(reg, temp); |
2723 | I915_WRITE(reg, temp); |
2724 | 2724 | ||
2725 | POSTING_READ(reg); |
2725 | POSTING_READ(reg); |
2726 | udelay(500); |
2726 | udelay(500); |
2727 | 2727 | ||
2728 | reg = FDI_RX_IIR(pipe); |
2728 | reg = FDI_RX_IIR(pipe); |
2729 | temp = I915_READ(reg); |
2729 | temp = I915_READ(reg); |
2730 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2730 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2731 | 2731 | ||
2732 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2732 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2733 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2733 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2734 | DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); |
2734 | DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); |
2735 | break; |
2735 | break; |
2736 | } |
2736 | } |
2737 | } |
2737 | } |
2738 | if (i == 4) |
2738 | if (i == 4) |
2739 | DRM_ERROR("FDI train 2 fail!\n"); |
2739 | DRM_ERROR("FDI train 2 fail!\n"); |
2740 | 2740 | ||
2741 | DRM_DEBUG_KMS("FDI train done.\n"); |
2741 | DRM_DEBUG_KMS("FDI train done.\n"); |
2742 | } |
2742 | } |
2743 | 2743 | ||
2744 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) |
2744 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) |
2745 | { |
2745 | { |
2746 | struct drm_device *dev = intel_crtc->base.dev; |
2746 | struct drm_device *dev = intel_crtc->base.dev; |
2747 | struct drm_i915_private *dev_priv = dev->dev_private; |
2747 | struct drm_i915_private *dev_priv = dev->dev_private; |
2748 | int pipe = intel_crtc->pipe; |
2748 | int pipe = intel_crtc->pipe; |
2749 | u32 reg, temp; |
2749 | u32 reg, temp; |
2750 | 2750 | ||
2751 | 2751 | ||
2752 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2752 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2753 | reg = FDI_RX_CTL(pipe); |
2753 | reg = FDI_RX_CTL(pipe); |
2754 | temp = I915_READ(reg); |
2754 | temp = I915_READ(reg); |
2755 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2755 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2756 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2756 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2757 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2757 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2758 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2758 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2759 | 2759 | ||
2760 | POSTING_READ(reg); |
2760 | POSTING_READ(reg); |
2761 | udelay(200); |
2761 | udelay(200); |
2762 | 2762 | ||
2763 | /* Switch from Rawclk to PCDclk */ |
2763 | /* Switch from Rawclk to PCDclk */ |
2764 | temp = I915_READ(reg); |
2764 | temp = I915_READ(reg); |
2765 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2765 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2766 | 2766 | ||
2767 | POSTING_READ(reg); |
2767 | POSTING_READ(reg); |
2768 | udelay(200); |
2768 | udelay(200); |
2769 | 2769 | ||
2770 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2770 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2771 | reg = FDI_TX_CTL(pipe); |
2771 | reg = FDI_TX_CTL(pipe); |
2772 | temp = I915_READ(reg); |
2772 | temp = I915_READ(reg); |
2773 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
2773 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
2774 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
2774 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
2775 | 2775 | ||
2776 | POSTING_READ(reg); |
2776 | POSTING_READ(reg); |
2777 | udelay(100); |
2777 | udelay(100); |
2778 | } |
2778 | } |
2779 | } |
2779 | } |
2780 | 2780 | ||
2781 | static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) |
2781 | static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) |
2782 | { |
2782 | { |
2783 | struct drm_device *dev = intel_crtc->base.dev; |
2783 | struct drm_device *dev = intel_crtc->base.dev; |
2784 | struct drm_i915_private *dev_priv = dev->dev_private; |
2784 | struct drm_i915_private *dev_priv = dev->dev_private; |
2785 | int pipe = intel_crtc->pipe; |
2785 | int pipe = intel_crtc->pipe; |
2786 | u32 reg, temp; |
2786 | u32 reg, temp; |
2787 | 2787 | ||
2788 | /* Switch from PCDclk to Rawclk */ |
2788 | /* Switch from PCDclk to Rawclk */ |
2789 | reg = FDI_RX_CTL(pipe); |
2789 | reg = FDI_RX_CTL(pipe); |
2790 | temp = I915_READ(reg); |
2790 | temp = I915_READ(reg); |
2791 | I915_WRITE(reg, temp & ~FDI_PCDCLK); |
2791 | I915_WRITE(reg, temp & ~FDI_PCDCLK); |
2792 | 2792 | ||
2793 | /* Disable CPU FDI TX PLL */ |
2793 | /* Disable CPU FDI TX PLL */ |
2794 | reg = FDI_TX_CTL(pipe); |
2794 | reg = FDI_TX_CTL(pipe); |
2795 | temp = I915_READ(reg); |
2795 | temp = I915_READ(reg); |
2796 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); |
2796 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); |
2797 | 2797 | ||
2798 | POSTING_READ(reg); |
2798 | POSTING_READ(reg); |
2799 | udelay(100); |
2799 | udelay(100); |
2800 | 2800 | ||
2801 | reg = FDI_RX_CTL(pipe); |
2801 | reg = FDI_RX_CTL(pipe); |
2802 | temp = I915_READ(reg); |
2802 | temp = I915_READ(reg); |
2803 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); |
2803 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); |
2804 | 2804 | ||
2805 | /* Wait for the clocks to turn off. */ |
2805 | /* Wait for the clocks to turn off. */ |
2806 | POSTING_READ(reg); |
2806 | POSTING_READ(reg); |
2807 | udelay(100); |
2807 | udelay(100); |
2808 | } |
2808 | } |
2809 | 2809 | ||
2810 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2810 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2811 | { |
2811 | { |
2812 | struct drm_device *dev = crtc->dev; |
2812 | struct drm_device *dev = crtc->dev; |
2813 | struct drm_i915_private *dev_priv = dev->dev_private; |
2813 | struct drm_i915_private *dev_priv = dev->dev_private; |
2814 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2814 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2815 | int pipe = intel_crtc->pipe; |
2815 | int pipe = intel_crtc->pipe; |
2816 | u32 reg, temp; |
2816 | u32 reg, temp; |
2817 | 2817 | ||
2818 | /* disable CPU FDI tx and PCH FDI rx */ |
2818 | /* disable CPU FDI tx and PCH FDI rx */ |
2819 | reg = FDI_TX_CTL(pipe); |
2819 | reg = FDI_TX_CTL(pipe); |
2820 | temp = I915_READ(reg); |
2820 | temp = I915_READ(reg); |
2821 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); |
2821 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); |
2822 | POSTING_READ(reg); |
2822 | POSTING_READ(reg); |
2823 | 2823 | ||
2824 | reg = FDI_RX_CTL(pipe); |
2824 | reg = FDI_RX_CTL(pipe); |
2825 | temp = I915_READ(reg); |
2825 | temp = I915_READ(reg); |
2826 | temp &= ~(0x7 << 16); |
2826 | temp &= ~(0x7 << 16); |
2827 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2827 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2828 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
2828 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
2829 | 2829 | ||
2830 | POSTING_READ(reg); |
2830 | POSTING_READ(reg); |
2831 | udelay(100); |
2831 | udelay(100); |
2832 | 2832 | ||
2833 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2833 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2834 | if (HAS_PCH_IBX(dev)) { |
2834 | if (HAS_PCH_IBX(dev)) { |
2835 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2835 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2836 | } |
2836 | } |
2837 | 2837 | ||
2838 | /* still set train pattern 1 */ |
2838 | /* still set train pattern 1 */ |
2839 | reg = FDI_TX_CTL(pipe); |
2839 | reg = FDI_TX_CTL(pipe); |
2840 | temp = I915_READ(reg); |
2840 | temp = I915_READ(reg); |
2841 | temp &= ~FDI_LINK_TRAIN_NONE; |
2841 | temp &= ~FDI_LINK_TRAIN_NONE; |
2842 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2842 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2843 | I915_WRITE(reg, temp); |
2843 | I915_WRITE(reg, temp); |
2844 | 2844 | ||
2845 | reg = FDI_RX_CTL(pipe); |
2845 | reg = FDI_RX_CTL(pipe); |
2846 | temp = I915_READ(reg); |
2846 | temp = I915_READ(reg); |
2847 | if (HAS_PCH_CPT(dev)) { |
2847 | if (HAS_PCH_CPT(dev)) { |
2848 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2848 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2849 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2849 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2850 | } else { |
2850 | } else { |
2851 | temp &= ~FDI_LINK_TRAIN_NONE; |
2851 | temp &= ~FDI_LINK_TRAIN_NONE; |
2852 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2852 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2853 | } |
2853 | } |
2854 | /* BPC in FDI rx is consistent with that in PIPECONF */ |
2854 | /* BPC in FDI rx is consistent with that in PIPECONF */ |
2855 | temp &= ~(0x07 << 16); |
2855 | temp &= ~(0x07 << 16); |
2856 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2856 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
2857 | I915_WRITE(reg, temp); |
2857 | I915_WRITE(reg, temp); |
2858 | 2858 | ||
2859 | POSTING_READ(reg); |
2859 | POSTING_READ(reg); |
2860 | udelay(100); |
2860 | udelay(100); |
2861 | } |
2861 | } |
2862 | 2862 | ||
2863 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
2863 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
2864 | { |
2864 | { |
2865 | struct drm_device *dev = crtc->dev; |
2865 | struct drm_device *dev = crtc->dev; |
2866 | struct drm_i915_private *dev_priv = dev->dev_private; |
2866 | struct drm_i915_private *dev_priv = dev->dev_private; |
2867 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2867 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2868 | unsigned long flags; |
2868 | unsigned long flags; |
2869 | bool pending; |
2869 | bool pending; |
2870 | 2870 | ||
2871 | if (i915_reset_in_progress(&dev_priv->gpu_error) || |
2871 | if (i915_reset_in_progress(&dev_priv->gpu_error) || |
2872 | intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
2872 | intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
2873 | return false; |
2873 | return false; |
2874 | 2874 | ||
2875 | spin_lock_irqsave(&dev->event_lock, flags); |
2875 | spin_lock_irqsave(&dev->event_lock, flags); |
2876 | pending = to_intel_crtc(crtc)->unpin_work != NULL; |
2876 | pending = to_intel_crtc(crtc)->unpin_work != NULL; |
2877 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2877 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2878 | 2878 | ||
2879 | return pending; |
2879 | return pending; |
2880 | } |
2880 | } |
2881 | 2881 | ||
2882 | #if 0 |
2882 | #if 0 |
2883 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2883 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2884 | { |
2884 | { |
2885 | struct drm_device *dev = crtc->dev; |
2885 | struct drm_device *dev = crtc->dev; |
2886 | struct drm_i915_private *dev_priv = dev->dev_private; |
2886 | struct drm_i915_private *dev_priv = dev->dev_private; |
2887 | 2887 | ||
2888 | if (crtc->fb == NULL) |
2888 | if (crtc->fb == NULL) |
2889 | return; |
2889 | return; |
2890 | 2890 | ||
2891 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); |
2891 | WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); |
2892 | 2892 | ||
2893 | wait_event(dev_priv->pending_flip_queue, |
2893 | wait_event(dev_priv->pending_flip_queue, |
2894 | !intel_crtc_has_pending_flip(crtc)); |
2894 | !intel_crtc_has_pending_flip(crtc)); |
2895 | 2895 | ||
2896 | mutex_lock(&dev->struct_mutex); |
2896 | mutex_lock(&dev->struct_mutex); |
2897 | intel_finish_fb(crtc->fb); |
2897 | intel_finish_fb(crtc->fb); |
2898 | mutex_unlock(&dev->struct_mutex); |
2898 | mutex_unlock(&dev->struct_mutex); |
2899 | } |
2899 | } |
2900 | #endif |
2900 | #endif |
2901 | 2901 | ||
2902 | static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) |
2902 | static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc) |
2903 | { |
2903 | { |
2904 | struct drm_device *dev = crtc->dev; |
2904 | struct drm_device *dev = crtc->dev; |
2905 | struct intel_encoder *intel_encoder; |
2905 | struct intel_encoder *intel_encoder; |
2906 | 2906 | ||
2907 | /* |
2907 | /* |
2908 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2908 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2909 | * must be driven by its own crtc; no sharing is possible. |
2909 | * must be driven by its own crtc; no sharing is possible. |
2910 | */ |
2910 | */ |
2911 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2911 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
2912 | switch (intel_encoder->type) { |
2912 | switch (intel_encoder->type) { |
2913 | case INTEL_OUTPUT_EDP: |
2913 | case INTEL_OUTPUT_EDP: |
2914 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
2914 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
2915 | return false; |
2915 | return false; |
2916 | continue; |
2916 | continue; |
2917 | } |
2917 | } |
2918 | } |
2918 | } |
2919 | 2919 | ||
2920 | return true; |
2920 | return true; |
2921 | } |
2921 | } |
2922 | 2922 | ||
2923 | static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) |
2923 | static bool haswell_crtc_driving_pch(struct drm_crtc *crtc) |
2924 | { |
2924 | { |
2925 | return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); |
2925 | return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG); |
2926 | } |
2926 | } |
2927 | 2927 | ||
2928 | /* Program iCLKIP clock to the desired frequency */ |
2928 | /* Program iCLKIP clock to the desired frequency */ |
2929 | static void lpt_program_iclkip(struct drm_crtc *crtc) |
2929 | static void lpt_program_iclkip(struct drm_crtc *crtc) |
2930 | { |
2930 | { |
2931 | struct drm_device *dev = crtc->dev; |
2931 | struct drm_device *dev = crtc->dev; |
2932 | struct drm_i915_private *dev_priv = dev->dev_private; |
2932 | struct drm_i915_private *dev_priv = dev->dev_private; |
2933 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
2933 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
2934 | u32 temp; |
2934 | u32 temp; |
2935 | 2935 | ||
2936 | mutex_lock(&dev_priv->dpio_lock); |
2936 | mutex_lock(&dev_priv->dpio_lock); |
2937 | 2937 | ||
2938 | /* It is necessary to ungate the pixclk gate prior to programming |
2938 | /* It is necessary to ungate the pixclk gate prior to programming |
2939 | * the divisors, and gate it back when it is done. |
2939 | * the divisors, and gate it back when it is done. |
2940 | */ |
2940 | */ |
2941 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); |
2941 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); |
2942 | 2942 | ||
2943 | /* Disable SSCCTL */ |
2943 | /* Disable SSCCTL */ |
2944 | intel_sbi_write(dev_priv, SBI_SSCCTL6, |
2944 | intel_sbi_write(dev_priv, SBI_SSCCTL6, |
2945 | intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | |
2945 | intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | |
2946 | SBI_SSCCTL_DISABLE, |
2946 | SBI_SSCCTL_DISABLE, |
2947 | SBI_ICLK); |
2947 | SBI_ICLK); |
2948 | 2948 | ||
2949 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ |
2949 | /* 20MHz is a corner case which is out of range for the 7-bit divisor */ |
2950 | if (crtc->mode.clock == 20000) { |
2950 | if (crtc->mode.clock == 20000) { |
2951 | auxdiv = 1; |
2951 | auxdiv = 1; |
2952 | divsel = 0x41; |
2952 | divsel = 0x41; |
2953 | phaseinc = 0x20; |
2953 | phaseinc = 0x20; |
2954 | } else { |
2954 | } else { |
2955 | /* The iCLK virtual clock root frequency is in MHz, |
2955 | /* The iCLK virtual clock root frequency is in MHz, |
2956 | * but the crtc->mode.clock in in KHz. To get the divisors, |
2956 | * but the crtc->mode.clock in in KHz. To get the divisors, |
2957 | * it is necessary to divide one by another, so we |
2957 | * it is necessary to divide one by another, so we |
2958 | * convert the virtual clock precision to KHz here for higher |
2958 | * convert the virtual clock precision to KHz here for higher |
2959 | * precision. |
2959 | * precision. |
2960 | */ |
2960 | */ |
2961 | u32 iclk_virtual_root_freq = 172800 * 1000; |
2961 | u32 iclk_virtual_root_freq = 172800 * 1000; |
2962 | u32 iclk_pi_range = 64; |
2962 | u32 iclk_pi_range = 64; |
2963 | u32 desired_divisor, msb_divisor_value, pi_value; |
2963 | u32 desired_divisor, msb_divisor_value, pi_value; |
2964 | 2964 | ||
2965 | desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); |
2965 | desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); |
2966 | msb_divisor_value = desired_divisor / iclk_pi_range; |
2966 | msb_divisor_value = desired_divisor / iclk_pi_range; |
2967 | pi_value = desired_divisor % iclk_pi_range; |
2967 | pi_value = desired_divisor % iclk_pi_range; |
2968 | 2968 | ||
2969 | auxdiv = 0; |
2969 | auxdiv = 0; |
2970 | divsel = msb_divisor_value - 2; |
2970 | divsel = msb_divisor_value - 2; |
2971 | phaseinc = pi_value; |
2971 | phaseinc = pi_value; |
2972 | } |
2972 | } |
2973 | 2973 | ||
2974 | /* This should not happen with any sane values */ |
2974 | /* This should not happen with any sane values */ |
2975 | WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & |
2975 | WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & |
2976 | ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); |
2976 | ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); |
2977 | WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & |
2977 | WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & |
2978 | ~SBI_SSCDIVINTPHASE_INCVAL_MASK); |
2978 | ~SBI_SSCDIVINTPHASE_INCVAL_MASK); |
2979 | 2979 | ||
2980 | DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", |
2980 | DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", |
2981 | crtc->mode.clock, |
2981 | crtc->mode.clock, |
2982 | auxdiv, |
2982 | auxdiv, |
2983 | divsel, |
2983 | divsel, |
2984 | phasedir, |
2984 | phasedir, |
2985 | phaseinc); |
2985 | phaseinc); |
2986 | 2986 | ||
2987 | /* Program SSCDIVINTPHASE6 */ |
2987 | /* Program SSCDIVINTPHASE6 */ |
2988 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); |
2988 | temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); |
2989 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; |
2989 | temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; |
2990 | temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); |
2990 | temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); |
2991 | temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; |
2991 | temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; |
2992 | temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); |
2992 | temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); |
2993 | temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); |
2993 | temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); |
2994 | temp |= SBI_SSCDIVINTPHASE_PROPAGATE; |
2994 | temp |= SBI_SSCDIVINTPHASE_PROPAGATE; |
2995 | intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); |
2995 | intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); |
2996 | 2996 | ||
2997 | /* Program SSCAUXDIV */ |
2997 | /* Program SSCAUXDIV */ |
2998 | temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); |
2998 | temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); |
2999 | temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); |
2999 | temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); |
3000 | temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); |
3000 | temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); |
3001 | intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); |
3001 | intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); |
3002 | 3002 | ||
3003 | /* Enable modulator and associated divider */ |
3003 | /* Enable modulator and associated divider */ |
3004 | temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); |
3004 | temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); |
3005 | temp &= ~SBI_SSCCTL_DISABLE; |
3005 | temp &= ~SBI_SSCCTL_DISABLE; |
3006 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); |
3006 | intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); |
3007 | 3007 | ||
3008 | /* Wait for initialization time */ |
3008 | /* Wait for initialization time */ |
3009 | udelay(24); |
3009 | udelay(24); |
3010 | 3010 | ||
3011 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
3011 | I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
3012 | 3012 | ||
3013 | mutex_unlock(&dev_priv->dpio_lock); |
3013 | mutex_unlock(&dev_priv->dpio_lock); |
3014 | } |
3014 | } |
3015 | 3015 | ||
3016 | /* |
3016 | /* |
3017 | * Enable PCH resources required for PCH ports: |
3017 | * Enable PCH resources required for PCH ports: |
3018 | * - PCH PLLs |
3018 | * - PCH PLLs |
3019 | * - FDI training & RX/TX |
3019 | * - FDI training & RX/TX |
3020 | * - update transcoder timings |
3020 | * - update transcoder timings |
3021 | * - DP transcoding bits |
3021 | * - DP transcoding bits |
3022 | * - transcoder |
3022 | * - transcoder |
3023 | */ |
3023 | */ |
3024 | static void ironlake_pch_enable(struct drm_crtc *crtc) |
3024 | static void ironlake_pch_enable(struct drm_crtc *crtc) |
3025 | { |
3025 | { |
3026 | struct drm_device *dev = crtc->dev; |
3026 | struct drm_device *dev = crtc->dev; |
3027 | struct drm_i915_private *dev_priv = dev->dev_private; |
3027 | struct drm_i915_private *dev_priv = dev->dev_private; |
3028 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3028 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3029 | int pipe = intel_crtc->pipe; |
3029 | int pipe = intel_crtc->pipe; |
3030 | u32 reg, temp; |
3030 | u32 reg, temp; |
3031 | 3031 | ||
3032 | assert_transcoder_disabled(dev_priv, pipe); |
3032 | assert_transcoder_disabled(dev_priv, pipe); |
3033 | 3033 | ||
3034 | /* Write the TU size bits before fdi link training, so that error |
3034 | /* Write the TU size bits before fdi link training, so that error |
3035 | * detection works. */ |
3035 | * detection works. */ |
3036 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
3036 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
3037 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
3037 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
3038 | 3038 | ||
3039 | /* For PCH output, training FDI link */ |
3039 | /* For PCH output, training FDI link */ |
3040 | dev_priv->display.fdi_link_train(crtc); |
3040 | dev_priv->display.fdi_link_train(crtc); |
3041 | 3041 | ||
3042 | /* XXX: pch pll's can be enabled any time before we enable the PCH |
3042 | /* XXX: pch pll's can be enabled any time before we enable the PCH |
3043 | * transcoder, and we actually should do this to not upset any PCH |
3043 | * transcoder, and we actually should do this to not upset any PCH |
3044 | * transcoder that already use the clock when we share it. |
3044 | * transcoder that already use the clock when we share it. |
3045 | * |
3045 | * |
3046 | * Note that enable_pch_pll tries to do the right thing, but get_pch_pll |
3046 | * Note that enable_pch_pll tries to do the right thing, but get_pch_pll |
3047 | * unconditionally resets the pll - we need that to have the right LVDS |
3047 | * unconditionally resets the pll - we need that to have the right LVDS |
3048 | * enable sequence. */ |
3048 | * enable sequence. */ |
3049 | ironlake_enable_pch_pll(intel_crtc); |
3049 | ironlake_enable_pch_pll(intel_crtc); |
3050 | 3050 | ||
3051 | if (HAS_PCH_CPT(dev)) { |
3051 | if (HAS_PCH_CPT(dev)) { |
3052 | u32 sel; |
3052 | u32 sel; |
3053 | 3053 | ||
3054 | temp = I915_READ(PCH_DPLL_SEL); |
3054 | temp = I915_READ(PCH_DPLL_SEL); |
3055 | switch (pipe) { |
3055 | switch (pipe) { |
3056 | default: |
3056 | default: |
3057 | case 0: |
3057 | case 0: |
3058 | temp |= TRANSA_DPLL_ENABLE; |
3058 | temp |= TRANSA_DPLL_ENABLE; |
3059 | sel = TRANSA_DPLLB_SEL; |
3059 | sel = TRANSA_DPLLB_SEL; |
3060 | break; |
3060 | break; |
3061 | case 1: |
3061 | case 1: |
3062 | temp |= TRANSB_DPLL_ENABLE; |
3062 | temp |= TRANSB_DPLL_ENABLE; |
3063 | sel = TRANSB_DPLLB_SEL; |
3063 | sel = TRANSB_DPLLB_SEL; |
3064 | break; |
3064 | break; |
3065 | case 2: |
3065 | case 2: |
3066 | temp |= TRANSC_DPLL_ENABLE; |
3066 | temp |= TRANSC_DPLL_ENABLE; |
3067 | sel = TRANSC_DPLLB_SEL; |
3067 | sel = TRANSC_DPLLB_SEL; |
3068 | break; |
3068 | break; |
3069 | } |
3069 | } |
3070 | if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) |
3070 | if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) |
3071 | temp |= sel; |
3071 | temp |= sel; |
3072 | else |
3072 | else |
3073 | temp &= ~sel; |
3073 | temp &= ~sel; |
3074 | I915_WRITE(PCH_DPLL_SEL, temp); |
3074 | I915_WRITE(PCH_DPLL_SEL, temp); |
3075 | } |
3075 | } |
3076 | 3076 | ||
3077 | /* set transcoder timing, panel must allow it */ |
3077 | /* set transcoder timing, panel must allow it */ |
3078 | assert_panel_unlocked(dev_priv, pipe); |
3078 | assert_panel_unlocked(dev_priv, pipe); |
3079 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
3079 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
3080 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); |
3080 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); |
3081 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); |
3081 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); |
3082 | 3082 | ||
3083 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
3083 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
3084 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
3084 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
3085 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
3085 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
3086 | I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); |
3086 | I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); |
3087 | 3087 | ||
3088 | intel_fdi_normal_train(crtc); |
3088 | intel_fdi_normal_train(crtc); |
3089 | 3089 | ||
3090 | /* For PCH DP, enable TRANS_DP_CTL */ |
3090 | /* For PCH DP, enable TRANS_DP_CTL */ |
3091 | if (HAS_PCH_CPT(dev) && |
3091 | if (HAS_PCH_CPT(dev) && |
3092 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
3092 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
3093 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
3093 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
3094 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; |
3094 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; |
3095 | reg = TRANS_DP_CTL(pipe); |
3095 | reg = TRANS_DP_CTL(pipe); |
3096 | temp = I915_READ(reg); |
3096 | temp = I915_READ(reg); |
3097 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
3097 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
3098 | TRANS_DP_SYNC_MASK | |
3098 | TRANS_DP_SYNC_MASK | |
3099 | TRANS_DP_BPC_MASK); |
3099 | TRANS_DP_BPC_MASK); |
3100 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
3100 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
3101 | TRANS_DP_ENH_FRAMING); |
3101 | TRANS_DP_ENH_FRAMING); |
3102 | temp |= bpc << 9; /* same format but at 11:9 */ |
3102 | temp |= bpc << 9; /* same format but at 11:9 */ |
3103 | 3103 | ||
3104 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
3104 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
3105 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
3105 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
3106 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
3106 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
3107 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
3107 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
3108 | 3108 | ||
3109 | switch (intel_trans_dp_port_sel(crtc)) { |
3109 | switch (intel_trans_dp_port_sel(crtc)) { |
3110 | case PCH_DP_B: |
3110 | case PCH_DP_B: |
3111 | temp |= TRANS_DP_PORT_SEL_B; |
3111 | temp |= TRANS_DP_PORT_SEL_B; |
3112 | break; |
3112 | break; |
3113 | case PCH_DP_C: |
3113 | case PCH_DP_C: |
3114 | temp |= TRANS_DP_PORT_SEL_C; |
3114 | temp |= TRANS_DP_PORT_SEL_C; |
3115 | break; |
3115 | break; |
3116 | case PCH_DP_D: |
3116 | case PCH_DP_D: |
3117 | temp |= TRANS_DP_PORT_SEL_D; |
3117 | temp |= TRANS_DP_PORT_SEL_D; |
3118 | break; |
3118 | break; |
3119 | default: |
3119 | default: |
3120 | BUG(); |
3120 | BUG(); |
3121 | } |
3121 | } |
3122 | 3122 | ||
3123 | I915_WRITE(reg, temp); |
3123 | I915_WRITE(reg, temp); |
3124 | } |
3124 | } |
3125 | 3125 | ||
3126 | ironlake_enable_pch_transcoder(dev_priv, pipe); |
3126 | ironlake_enable_pch_transcoder(dev_priv, pipe); |
3127 | } |
3127 | } |
3128 | 3128 | ||
3129 | static void lpt_pch_enable(struct drm_crtc *crtc) |
3129 | static void lpt_pch_enable(struct drm_crtc *crtc) |
3130 | { |
3130 | { |
3131 | struct drm_device *dev = crtc->dev; |
3131 | struct drm_device *dev = crtc->dev; |
3132 | struct drm_i915_private *dev_priv = dev->dev_private; |
3132 | struct drm_i915_private *dev_priv = dev->dev_private; |
3133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3134 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
3134 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
3135 | 3135 | ||
3136 | assert_transcoder_disabled(dev_priv, TRANSCODER_A); |
3136 | assert_transcoder_disabled(dev_priv, TRANSCODER_A); |
3137 | 3137 | ||
3138 | lpt_program_iclkip(crtc); |
3138 | lpt_program_iclkip(crtc); |
3139 | 3139 | ||
3140 | /* Set transcoder timing. */ |
3140 | /* Set transcoder timing. */ |
3141 | I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); |
3141 | I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder))); |
3142 | I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); |
3142 | I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder))); |
3143 | I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); |
3143 | I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder))); |
3144 | 3144 | ||
3145 | I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); |
3145 | I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder))); |
3146 | I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); |
3146 | I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder))); |
3147 | I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); |
3147 | I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder))); |
3148 | I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); |
3148 | I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder))); |
3149 | 3149 | ||
3150 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
3150 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
3151 | } |
3151 | } |
3152 | 3152 | ||
3153 | static void intel_put_pch_pll(struct intel_crtc *intel_crtc) |
3153 | static void intel_put_pch_pll(struct intel_crtc *intel_crtc) |
3154 | { |
3154 | { |
3155 | struct intel_pch_pll *pll = intel_crtc->pch_pll; |
3155 | struct intel_pch_pll *pll = intel_crtc->pch_pll; |
3156 | 3156 | ||
3157 | if (pll == NULL) |
3157 | if (pll == NULL) |
3158 | return; |
3158 | return; |
3159 | 3159 | ||
3160 | if (pll->refcount == 0) { |
3160 | if (pll->refcount == 0) { |
3161 | WARN(1, "bad PCH PLL refcount\n"); |
3161 | WARN(1, "bad PCH PLL refcount\n"); |
3162 | return; |
3162 | return; |
3163 | } |
3163 | } |
3164 | 3164 | ||
3165 | --pll->refcount; |
3165 | --pll->refcount; |
3166 | intel_crtc->pch_pll = NULL; |
3166 | intel_crtc->pch_pll = NULL; |
3167 | } |
3167 | } |
3168 | 3168 | ||
3169 | static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) |
3169 | static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) |
3170 | { |
3170 | { |
3171 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
3171 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
3172 | struct intel_pch_pll *pll; |
3172 | struct intel_pch_pll *pll; |
3173 | int i; |
3173 | int i; |
3174 | 3174 | ||
3175 | pll = intel_crtc->pch_pll; |
3175 | pll = intel_crtc->pch_pll; |
3176 | if (pll) { |
3176 | if (pll) { |
3177 | DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", |
3177 | DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", |
3178 | intel_crtc->base.base.id, pll->pll_reg); |
3178 | intel_crtc->base.base.id, pll->pll_reg); |
3179 | goto prepare; |
3179 | goto prepare; |
3180 | } |
3180 | } |
3181 | 3181 | ||
3182 | if (HAS_PCH_IBX(dev_priv->dev)) { |
3182 | if (HAS_PCH_IBX(dev_priv->dev)) { |
3183 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ |
3183 | /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ |
3184 | i = intel_crtc->pipe; |
3184 | i = intel_crtc->pipe; |
3185 | pll = &dev_priv->pch_plls[i]; |
3185 | pll = &dev_priv->pch_plls[i]; |
3186 | 3186 | ||
3187 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", |
3187 | DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", |
3188 | intel_crtc->base.base.id, pll->pll_reg); |
3188 | intel_crtc->base.base.id, pll->pll_reg); |
3189 | 3189 | ||
3190 | goto found; |
3190 | goto found; |
3191 | } |
3191 | } |
3192 | 3192 | ||
3193 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
3193 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
3194 | pll = &dev_priv->pch_plls[i]; |
3194 | pll = &dev_priv->pch_plls[i]; |
3195 | 3195 | ||
3196 | /* Only want to check enabled timings first */ |
3196 | /* Only want to check enabled timings first */ |
3197 | if (pll->refcount == 0) |
3197 | if (pll->refcount == 0) |
3198 | continue; |
3198 | continue; |
3199 | 3199 | ||
3200 | if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && |
3200 | if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && |
3201 | fp == I915_READ(pll->fp0_reg)) { |
3201 | fp == I915_READ(pll->fp0_reg)) { |
3202 | DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", |
3202 | DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", |
3203 | intel_crtc->base.base.id, |
3203 | intel_crtc->base.base.id, |
3204 | pll->pll_reg, pll->refcount, pll->active); |
3204 | pll->pll_reg, pll->refcount, pll->active); |
3205 | 3205 | ||
3206 | goto found; |
3206 | goto found; |
3207 | } |
3207 | } |
3208 | } |
3208 | } |
3209 | 3209 | ||
3210 | /* Ok no matching timings, maybe there's a free one? */ |
3210 | /* Ok no matching timings, maybe there's a free one? */ |
3211 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
3211 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
3212 | pll = &dev_priv->pch_plls[i]; |
3212 | pll = &dev_priv->pch_plls[i]; |
3213 | if (pll->refcount == 0) { |
3213 | if (pll->refcount == 0) { |
3214 | DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", |
3214 | DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", |
3215 | intel_crtc->base.base.id, pll->pll_reg); |
3215 | intel_crtc->base.base.id, pll->pll_reg); |
3216 | goto found; |
3216 | goto found; |
3217 | } |
3217 | } |
3218 | } |
3218 | } |
3219 | 3219 | ||
3220 | return NULL; |
3220 | return NULL; |
3221 | 3221 | ||
3222 | found: |
3222 | found: |
3223 | intel_crtc->pch_pll = pll; |
3223 | intel_crtc->pch_pll = pll; |
3224 | pll->refcount++; |
3224 | pll->refcount++; |
3225 | DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); |
3225 | DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe); |
3226 | prepare: /* separate function? */ |
3226 | prepare: /* separate function? */ |
3227 | DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); |
3227 | DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); |
3228 | 3228 | ||
3229 | /* Wait for the clocks to stabilize before rewriting the regs */ |
3229 | /* Wait for the clocks to stabilize before rewriting the regs */ |
3230 | I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); |
3230 | I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); |
3231 | POSTING_READ(pll->pll_reg); |
3231 | POSTING_READ(pll->pll_reg); |
3232 | udelay(150); |
3232 | udelay(150); |
3233 | 3233 | ||
3234 | I915_WRITE(pll->fp0_reg, fp); |
3234 | I915_WRITE(pll->fp0_reg, fp); |
3235 | I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); |
3235 | I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); |
3236 | pll->on = false; |
3236 | pll->on = false; |
3237 | return pll; |
3237 | return pll; |
3238 | } |
3238 | } |
3239 | 3239 | ||
3240 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
3240 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
3241 | { |
3241 | { |
3242 | struct drm_i915_private *dev_priv = dev->dev_private; |
3242 | struct drm_i915_private *dev_priv = dev->dev_private; |
3243 | int dslreg = PIPEDSL(pipe); |
3243 | int dslreg = PIPEDSL(pipe); |
3244 | u32 temp; |
3244 | u32 temp; |
3245 | 3245 | ||
3246 | temp = I915_READ(dslreg); |
3246 | temp = I915_READ(dslreg); |
3247 | udelay(500); |
3247 | udelay(500); |
3248 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
3248 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
3249 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
3249 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
3250 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
3250 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
3251 | } |
3251 | } |
3252 | } |
3252 | } |
3253 | 3253 | ||
3254 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
3254 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
3255 | { |
3255 | { |
3256 | struct drm_device *dev = crtc->dev; |
3256 | struct drm_device *dev = crtc->dev; |
3257 | struct drm_i915_private *dev_priv = dev->dev_private; |
3257 | struct drm_i915_private *dev_priv = dev->dev_private; |
3258 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3258 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3259 | struct intel_encoder *encoder; |
3259 | struct intel_encoder *encoder; |
3260 | int pipe = intel_crtc->pipe; |
3260 | int pipe = intel_crtc->pipe; |
3261 | int plane = intel_crtc->plane; |
3261 | int plane = intel_crtc->plane; |
3262 | u32 temp; |
3262 | u32 temp; |
3263 | bool is_pch_port; |
3263 | bool is_pch_port; |
3264 | 3264 | ||
3265 | WARN_ON(!crtc->enabled); |
3265 | WARN_ON(!crtc->enabled); |
3266 | 3266 | ||
3267 | if (intel_crtc->active) |
3267 | if (intel_crtc->active) |
3268 | return; |
3268 | return; |
3269 | 3269 | ||
3270 | intel_crtc->active = true; |
3270 | intel_crtc->active = true; |
3271 | intel_update_watermarks(dev); |
3271 | intel_update_watermarks(dev); |
3272 | 3272 | ||
3273 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
3273 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
3274 | temp = I915_READ(PCH_LVDS); |
3274 | temp = I915_READ(PCH_LVDS); |
3275 | if ((temp & LVDS_PORT_EN) == 0) |
3275 | if ((temp & LVDS_PORT_EN) == 0) |
3276 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
3276 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
3277 | } |
3277 | } |
3278 | 3278 | ||
3279 | is_pch_port = ironlake_crtc_driving_pch(crtc); |
3279 | is_pch_port = ironlake_crtc_driving_pch(crtc); |
3280 | 3280 | ||
3281 | if (is_pch_port) { |
3281 | if (is_pch_port) { |
3282 | /* Note: FDI PLL enabling _must_ be done before we enable the |
3282 | /* Note: FDI PLL enabling _must_ be done before we enable the |
3283 | * cpu pipes, hence this is separate from all the other fdi/pch |
3283 | * cpu pipes, hence this is separate from all the other fdi/pch |
3284 | * enabling. */ |
3284 | * enabling. */ |
3285 | ironlake_fdi_pll_enable(intel_crtc); |
3285 | ironlake_fdi_pll_enable(intel_crtc); |
3286 | } else { |
3286 | } else { |
3287 | assert_fdi_tx_disabled(dev_priv, pipe); |
3287 | assert_fdi_tx_disabled(dev_priv, pipe); |
3288 | assert_fdi_rx_disabled(dev_priv, pipe); |
3288 | assert_fdi_rx_disabled(dev_priv, pipe); |
3289 | } |
3289 | } |
3290 | 3290 | ||
3291 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3291 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3292 | if (encoder->pre_enable) |
3292 | if (encoder->pre_enable) |
3293 | encoder->pre_enable(encoder); |
3293 | encoder->pre_enable(encoder); |
3294 | 3294 | ||
3295 | /* Enable panel fitting for LVDS */ |
3295 | /* Enable panel fitting for LVDS */ |
3296 | if (dev_priv->pch_pf_size && |
3296 | if (dev_priv->pch_pf_size && |
3297 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
3297 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || |
3298 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
3298 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
3299 | /* Force use of hard-coded filter coefficients |
3299 | /* Force use of hard-coded filter coefficients |
3300 | * as some pre-programmed values are broken, |
3300 | * as some pre-programmed values are broken, |
3301 | * e.g. x201. |
3301 | * e.g. x201. |
3302 | */ |
3302 | */ |
3303 | if (IS_IVYBRIDGE(dev)) |
3303 | if (IS_IVYBRIDGE(dev)) |
3304 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
3304 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
3305 | PF_PIPE_SEL_IVB(pipe)); |
3305 | PF_PIPE_SEL_IVB(pipe)); |
3306 | else |
3306 | else |
3307 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
3307 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
3308 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
3308 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
3309 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3309 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3310 | } |
3310 | } |
3311 | 3311 | ||
3312 | /* |
3312 | /* |
3313 | * On ILK+ LUT must be loaded before the pipe is running but with |
3313 | * On ILK+ LUT must be loaded before the pipe is running but with |
3314 | * clocks enabled |
3314 | * clocks enabled |
3315 | */ |
3315 | */ |
3316 | intel_crtc_load_lut(crtc); |
3316 | intel_crtc_load_lut(crtc); |
3317 | 3317 | ||
3318 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3318 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3319 | intel_enable_plane(dev_priv, plane, pipe); |
3319 | intel_enable_plane(dev_priv, plane, pipe); |
3320 | 3320 | ||
3321 | if (is_pch_port) |
3321 | if (is_pch_port) |
3322 | ironlake_pch_enable(crtc); |
3322 | ironlake_pch_enable(crtc); |
3323 | 3323 | ||
3324 | mutex_lock(&dev->struct_mutex); |
3324 | mutex_lock(&dev->struct_mutex); |
3325 | intel_update_fbc(dev); |
3325 | intel_update_fbc(dev); |
3326 | mutex_unlock(&dev->struct_mutex); |
3326 | mutex_unlock(&dev->struct_mutex); |
3327 | 3327 | ||
3328 | // intel_crtc_update_cursor(crtc, true); |
3328 | // intel_crtc_update_cursor(crtc, true); |
3329 | 3329 | ||
3330 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3330 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3331 | encoder->enable(encoder); |
3331 | encoder->enable(encoder); |
3332 | 3332 | ||
3333 | if (HAS_PCH_CPT(dev)) |
3333 | if (HAS_PCH_CPT(dev)) |
3334 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); |
3334 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); |
3335 | 3335 | ||
3336 | /* |
3336 | /* |
3337 | * There seems to be a race in PCH platform hw (at least on some |
3337 | * There seems to be a race in PCH platform hw (at least on some |
3338 | * outputs) where an enabled pipe still completes any pageflip right |
3338 | * outputs) where an enabled pipe still completes any pageflip right |
3339 | * away (as if the pipe is off) instead of waiting for vblank. As soon |
3339 | * away (as if the pipe is off) instead of waiting for vblank. As soon |
3340 | * as the first vblank happend, everything works as expected. Hence just |
3340 | * as the first vblank happend, everything works as expected. Hence just |
3341 | * wait for one vblank before returning to avoid strange things |
3341 | * wait for one vblank before returning to avoid strange things |
3342 | * happening. |
3342 | * happening. |
3343 | */ |
3343 | */ |
3344 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3344 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3345 | } |
3345 | } |
3346 | 3346 | ||
3347 | static void haswell_crtc_enable(struct drm_crtc *crtc) |
3347 | static void haswell_crtc_enable(struct drm_crtc *crtc) |
3348 | { |
3348 | { |
3349 | struct drm_device *dev = crtc->dev; |
3349 | struct drm_device *dev = crtc->dev; |
3350 | struct drm_i915_private *dev_priv = dev->dev_private; |
3350 | struct drm_i915_private *dev_priv = dev->dev_private; |
3351 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3351 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3352 | struct intel_encoder *encoder; |
3352 | struct intel_encoder *encoder; |
3353 | int pipe = intel_crtc->pipe; |
3353 | int pipe = intel_crtc->pipe; |
3354 | int plane = intel_crtc->plane; |
3354 | int plane = intel_crtc->plane; |
3355 | bool is_pch_port; |
3355 | bool is_pch_port; |
3356 | 3356 | ||
3357 | WARN_ON(!crtc->enabled); |
3357 | WARN_ON(!crtc->enabled); |
3358 | 3358 | ||
3359 | if (intel_crtc->active) |
3359 | if (intel_crtc->active) |
3360 | return; |
3360 | return; |
3361 | 3361 | ||
3362 | intel_crtc->active = true; |
3362 | intel_crtc->active = true; |
3363 | intel_update_watermarks(dev); |
3363 | intel_update_watermarks(dev); |
3364 | 3364 | ||
3365 | is_pch_port = haswell_crtc_driving_pch(crtc); |
3365 | is_pch_port = haswell_crtc_driving_pch(crtc); |
3366 | 3366 | ||
3367 | if (is_pch_port) |
3367 | if (is_pch_port) |
3368 | dev_priv->display.fdi_link_train(crtc); |
3368 | dev_priv->display.fdi_link_train(crtc); |
3369 | 3369 | ||
3370 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3370 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3371 | if (encoder->pre_enable) |
3371 | if (encoder->pre_enable) |
3372 | encoder->pre_enable(encoder); |
3372 | encoder->pre_enable(encoder); |
3373 | 3373 | ||
3374 | intel_ddi_enable_pipe_clock(intel_crtc); |
3374 | intel_ddi_enable_pipe_clock(intel_crtc); |
3375 | 3375 | ||
3376 | /* Enable panel fitting for eDP */ |
3376 | /* Enable panel fitting for eDP */ |
3377 | if (dev_priv->pch_pf_size && |
3377 | if (dev_priv->pch_pf_size && |
3378 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
3378 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
3379 | /* Force use of hard-coded filter coefficients |
3379 | /* Force use of hard-coded filter coefficients |
3380 | * as some pre-programmed values are broken, |
3380 | * as some pre-programmed values are broken, |
3381 | * e.g. x201. |
3381 | * e.g. x201. |
3382 | */ |
3382 | */ |
3383 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
3383 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | |
3384 | PF_PIPE_SEL_IVB(pipe)); |
3384 | PF_PIPE_SEL_IVB(pipe)); |
3385 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
3385 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
3386 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3386 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3387 | } |
3387 | } |
3388 | 3388 | ||
3389 | /* |
3389 | /* |
3390 | * On ILK+ LUT must be loaded before the pipe is running but with |
3390 | * On ILK+ LUT must be loaded before the pipe is running but with |
3391 | * clocks enabled |
3391 | * clocks enabled |
3392 | */ |
3392 | */ |
3393 | intel_crtc_load_lut(crtc); |
3393 | intel_crtc_load_lut(crtc); |
3394 | 3394 | ||
3395 | intel_ddi_set_pipe_settings(crtc); |
3395 | intel_ddi_set_pipe_settings(crtc); |
3396 | intel_ddi_enable_pipe_func(crtc); |
3396 | intel_ddi_enable_pipe_func(crtc); |
3397 | 3397 | ||
3398 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3398 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3399 | intel_enable_plane(dev_priv, plane, pipe); |
3399 | intel_enable_plane(dev_priv, plane, pipe); |
3400 | 3400 | ||
3401 | if (is_pch_port) |
3401 | if (is_pch_port) |
3402 | lpt_pch_enable(crtc); |
3402 | lpt_pch_enable(crtc); |
3403 | 3403 | ||
3404 | mutex_lock(&dev->struct_mutex); |
3404 | mutex_lock(&dev->struct_mutex); |
3405 | intel_update_fbc(dev); |
3405 | intel_update_fbc(dev); |
3406 | mutex_unlock(&dev->struct_mutex); |
3406 | mutex_unlock(&dev->struct_mutex); |
3407 | 3407 | ||
3408 | // intel_crtc_update_cursor(crtc, true); |
3408 | // intel_crtc_update_cursor(crtc, true); |
3409 | 3409 | ||
3410 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3410 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3411 | encoder->enable(encoder); |
3411 | encoder->enable(encoder); |
3412 | 3412 | ||
3413 | /* |
3413 | /* |
3414 | * There seems to be a race in PCH platform hw (at least on some |
3414 | * There seems to be a race in PCH platform hw (at least on some |
3415 | * outputs) where an enabled pipe still completes any pageflip right |
3415 | * outputs) where an enabled pipe still completes any pageflip right |
3416 | * away (as if the pipe is off) instead of waiting for vblank. As soon |
3416 | * away (as if the pipe is off) instead of waiting for vblank. As soon |
3417 | * as the first vblank happend, everything works as expected. Hence just |
3417 | * as the first vblank happend, everything works as expected. Hence just |
3418 | * wait for one vblank before returning to avoid strange things |
3418 | * wait for one vblank before returning to avoid strange things |
3419 | * happening. |
3419 | * happening. |
3420 | */ |
3420 | */ |
3421 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3421 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
3422 | } |
3422 | } |
3423 | 3423 | ||
3424 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3424 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3425 | { |
3425 | { |
3426 | struct drm_device *dev = crtc->dev; |
3426 | struct drm_device *dev = crtc->dev; |
3427 | struct drm_i915_private *dev_priv = dev->dev_private; |
3427 | struct drm_i915_private *dev_priv = dev->dev_private; |
3428 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3428 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3429 | struct intel_encoder *encoder; |
3429 | struct intel_encoder *encoder; |
3430 | int pipe = intel_crtc->pipe; |
3430 | int pipe = intel_crtc->pipe; |
3431 | int plane = intel_crtc->plane; |
3431 | int plane = intel_crtc->plane; |
3432 | u32 reg, temp; |
3432 | u32 reg, temp; |
3433 | 3433 | ||
3434 | 3434 | ||
3435 | if (!intel_crtc->active) |
3435 | if (!intel_crtc->active) |
3436 | return; |
3436 | return; |
3437 | 3437 | ||
3438 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3438 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3439 | encoder->disable(encoder); |
3439 | encoder->disable(encoder); |
3440 | 3440 | ||
3441 | // intel_crtc_wait_for_pending_flips(crtc); |
3441 | // intel_crtc_wait_for_pending_flips(crtc); |
3442 | // drm_vblank_off(dev, pipe); |
3442 | // drm_vblank_off(dev, pipe); |
3443 | // intel_crtc_update_cursor(crtc, false); |
3443 | // intel_crtc_update_cursor(crtc, false); |
3444 | 3444 | ||
3445 | intel_disable_plane(dev_priv, plane, pipe); |
3445 | intel_disable_plane(dev_priv, plane, pipe); |
3446 | 3446 | ||
3447 | if (dev_priv->cfb_plane == plane) |
3447 | if (dev_priv->cfb_plane == plane) |
3448 | intel_disable_fbc(dev); |
3448 | intel_disable_fbc(dev); |
3449 | 3449 | ||
3450 | intel_disable_pipe(dev_priv, pipe); |
3450 | intel_disable_pipe(dev_priv, pipe); |
3451 | 3451 | ||
3452 | /* Disable PF */ |
3452 | /* Disable PF */ |
3453 | I915_WRITE(PF_CTL(pipe), 0); |
3453 | I915_WRITE(PF_CTL(pipe), 0); |
3454 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3454 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3455 | 3455 | ||
3456 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3456 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3457 | if (encoder->post_disable) |
3457 | if (encoder->post_disable) |
3458 | encoder->post_disable(encoder); |
3458 | encoder->post_disable(encoder); |
3459 | 3459 | ||
3460 | ironlake_fdi_disable(crtc); |
3460 | ironlake_fdi_disable(crtc); |
3461 | 3461 | ||
3462 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
3462 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
3463 | 3463 | ||
3464 | if (HAS_PCH_CPT(dev)) { |
3464 | if (HAS_PCH_CPT(dev)) { |
3465 | /* disable TRANS_DP_CTL */ |
3465 | /* disable TRANS_DP_CTL */ |
3466 | reg = TRANS_DP_CTL(pipe); |
3466 | reg = TRANS_DP_CTL(pipe); |
3467 | temp = I915_READ(reg); |
3467 | temp = I915_READ(reg); |
3468 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); |
3468 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); |
3469 | temp |= TRANS_DP_PORT_SEL_NONE; |
3469 | temp |= TRANS_DP_PORT_SEL_NONE; |
3470 | I915_WRITE(reg, temp); |
3470 | I915_WRITE(reg, temp); |
3471 | 3471 | ||
3472 | /* disable DPLL_SEL */ |
3472 | /* disable DPLL_SEL */ |
3473 | temp = I915_READ(PCH_DPLL_SEL); |
3473 | temp = I915_READ(PCH_DPLL_SEL); |
3474 | switch (pipe) { |
3474 | switch (pipe) { |
3475 | case 0: |
3475 | case 0: |
3476 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3476 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3477 | break; |
3477 | break; |
3478 | case 1: |
3478 | case 1: |
3479 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3479 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3480 | break; |
3480 | break; |
3481 | case 2: |
3481 | case 2: |
3482 | /* C shares PLL A or B */ |
3482 | /* C shares PLL A or B */ |
3483 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3483 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3484 | break; |
3484 | break; |
3485 | default: |
3485 | default: |
3486 | BUG(); /* wtf */ |
3486 | BUG(); /* wtf */ |
3487 | } |
3487 | } |
3488 | I915_WRITE(PCH_DPLL_SEL, temp); |
3488 | I915_WRITE(PCH_DPLL_SEL, temp); |
3489 | } |
3489 | } |
3490 | 3490 | ||
3491 | /* disable PCH DPLL */ |
3491 | /* disable PCH DPLL */ |
3492 | intel_disable_pch_pll(intel_crtc); |
3492 | intel_disable_pch_pll(intel_crtc); |
3493 | 3493 | ||
3494 | ironlake_fdi_pll_disable(intel_crtc); |
3494 | ironlake_fdi_pll_disable(intel_crtc); |
3495 | 3495 | ||
3496 | intel_crtc->active = false; |
3496 | intel_crtc->active = false; |
3497 | intel_update_watermarks(dev); |
3497 | intel_update_watermarks(dev); |
3498 | 3498 | ||
3499 | mutex_lock(&dev->struct_mutex); |
3499 | mutex_lock(&dev->struct_mutex); |
3500 | intel_update_fbc(dev); |
3500 | intel_update_fbc(dev); |
3501 | mutex_unlock(&dev->struct_mutex); |
3501 | mutex_unlock(&dev->struct_mutex); |
3502 | } |
3502 | } |
3503 | 3503 | ||
3504 | static void haswell_crtc_disable(struct drm_crtc *crtc) |
3504 | static void haswell_crtc_disable(struct drm_crtc *crtc) |
3505 | { |
3505 | { |
3506 | struct drm_device *dev = crtc->dev; |
3506 | struct drm_device *dev = crtc->dev; |
3507 | struct drm_i915_private *dev_priv = dev->dev_private; |
3507 | struct drm_i915_private *dev_priv = dev->dev_private; |
3508 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3508 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3509 | struct intel_encoder *encoder; |
3509 | struct intel_encoder *encoder; |
3510 | int pipe = intel_crtc->pipe; |
3510 | int pipe = intel_crtc->pipe; |
3511 | int plane = intel_crtc->plane; |
3511 | int plane = intel_crtc->plane; |
3512 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
3512 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
3513 | bool is_pch_port; |
3513 | bool is_pch_port; |
3514 | 3514 | ||
3515 | if (!intel_crtc->active) |
3515 | if (!intel_crtc->active) |
3516 | return; |
3516 | return; |
3517 | 3517 | ||
3518 | is_pch_port = haswell_crtc_driving_pch(crtc); |
3518 | is_pch_port = haswell_crtc_driving_pch(crtc); |
3519 | 3519 | ||
3520 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3520 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3521 | encoder->disable(encoder); |
3521 | encoder->disable(encoder); |
3522 | 3522 | ||
3523 | 3523 | ||
3524 | intel_disable_plane(dev_priv, plane, pipe); |
3524 | intel_disable_plane(dev_priv, plane, pipe); |
3525 | 3525 | ||
3526 | if (dev_priv->cfb_plane == plane) |
3526 | if (dev_priv->cfb_plane == plane) |
3527 | intel_disable_fbc(dev); |
3527 | intel_disable_fbc(dev); |
3528 | 3528 | ||
3529 | intel_disable_pipe(dev_priv, pipe); |
3529 | intel_disable_pipe(dev_priv, pipe); |
3530 | 3530 | ||
3531 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
3531 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
3532 | 3532 | ||
3533 | /* Disable PF */ |
3533 | /* Disable PF */ |
3534 | I915_WRITE(PF_CTL(pipe), 0); |
3534 | I915_WRITE(PF_CTL(pipe), 0); |
3535 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3535 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3536 | 3536 | ||
3537 | intel_ddi_disable_pipe_clock(intel_crtc); |
3537 | intel_ddi_disable_pipe_clock(intel_crtc); |
3538 | 3538 | ||
3539 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3539 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3540 | if (encoder->post_disable) |
3540 | if (encoder->post_disable) |
3541 | encoder->post_disable(encoder); |
3541 | encoder->post_disable(encoder); |
3542 | 3542 | ||
3543 | if (is_pch_port) { |
3543 | if (is_pch_port) { |
3544 | lpt_disable_pch_transcoder(dev_priv); |
3544 | lpt_disable_pch_transcoder(dev_priv); |
3545 | intel_ddi_fdi_disable(crtc); |
3545 | intel_ddi_fdi_disable(crtc); |
3546 | } |
3546 | } |
3547 | 3547 | ||
3548 | intel_crtc->active = false; |
3548 | intel_crtc->active = false; |
3549 | intel_update_watermarks(dev); |
3549 | intel_update_watermarks(dev); |
3550 | 3550 | ||
3551 | mutex_lock(&dev->struct_mutex); |
3551 | mutex_lock(&dev->struct_mutex); |
3552 | intel_update_fbc(dev); |
3552 | intel_update_fbc(dev); |
3553 | mutex_unlock(&dev->struct_mutex); |
3553 | mutex_unlock(&dev->struct_mutex); |
3554 | } |
3554 | } |
3555 | 3555 | ||
3556 | static void ironlake_crtc_off(struct drm_crtc *crtc) |
3556 | static void ironlake_crtc_off(struct drm_crtc *crtc) |
3557 | { |
3557 | { |
3558 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3558 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3559 | intel_put_pch_pll(intel_crtc); |
3559 | intel_put_pch_pll(intel_crtc); |
3560 | } |
3560 | } |
3561 | 3561 | ||
3562 | static void haswell_crtc_off(struct drm_crtc *crtc) |
3562 | static void haswell_crtc_off(struct drm_crtc *crtc) |
3563 | { |
3563 | { |
3564 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3564 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3565 | 3565 | ||
3566 | /* Stop saying we're using TRANSCODER_EDP because some other CRTC might |
3566 | /* Stop saying we're using TRANSCODER_EDP because some other CRTC might |
3567 | * start using it. */ |
3567 | * start using it. */ |
3568 | intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; |
3568 | intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; |
3569 | 3569 | ||
3570 | intel_ddi_put_crtc_pll(crtc); |
3570 | intel_ddi_put_crtc_pll(crtc); |
3571 | } |
3571 | } |
3572 | 3572 | ||
3573 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3573 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3574 | { |
3574 | { |
3575 | if (!enable && intel_crtc->overlay) { |
3575 | if (!enable && intel_crtc->overlay) { |
3576 | struct drm_device *dev = intel_crtc->base.dev; |
3576 | struct drm_device *dev = intel_crtc->base.dev; |
3577 | struct drm_i915_private *dev_priv = dev->dev_private; |
3577 | struct drm_i915_private *dev_priv = dev->dev_private; |
3578 | 3578 | ||
3579 | mutex_lock(&dev->struct_mutex); |
3579 | mutex_lock(&dev->struct_mutex); |
3580 | dev_priv->mm.interruptible = false; |
3580 | dev_priv->mm.interruptible = false; |
3581 | // (void) intel_overlay_switch_off(intel_crtc->overlay); |
3581 | // (void) intel_overlay_switch_off(intel_crtc->overlay); |
3582 | dev_priv->mm.interruptible = true; |
3582 | dev_priv->mm.interruptible = true; |
3583 | mutex_unlock(&dev->struct_mutex); |
3583 | mutex_unlock(&dev->struct_mutex); |
3584 | } |
3584 | } |
3585 | 3585 | ||
3586 | /* Let userspace switch the overlay on again. In most cases userspace |
3586 | /* Let userspace switch the overlay on again. In most cases userspace |
3587 | * has to recompute where to put it anyway. |
3587 | * has to recompute where to put it anyway. |
3588 | */ |
3588 | */ |
3589 | } |
3589 | } |
3590 | 3590 | ||
3591 | /** |
3591 | /** |
3592 | * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware |
3592 | * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware |
3593 | * cursor plane briefly if not already running after enabling the display |
3593 | * cursor plane briefly if not already running after enabling the display |
3594 | * plane. |
3594 | * plane. |
3595 | * This workaround avoids occasional blank screens when self refresh is |
3595 | * This workaround avoids occasional blank screens when self refresh is |
3596 | * enabled. |
3596 | * enabled. |
3597 | */ |
3597 | */ |
3598 | static void |
3598 | static void |
3599 | g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) |
3599 | g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) |
3600 | { |
3600 | { |
3601 | u32 cntl = I915_READ(CURCNTR(pipe)); |
3601 | u32 cntl = I915_READ(CURCNTR(pipe)); |
3602 | 3602 | ||
3603 | if ((cntl & CURSOR_MODE) == 0) { |
3603 | if ((cntl & CURSOR_MODE) == 0) { |
3604 | u32 fw_bcl_self = I915_READ(FW_BLC_SELF); |
3604 | u32 fw_bcl_self = I915_READ(FW_BLC_SELF); |
3605 | 3605 | ||
3606 | I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); |
3606 | I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); |
3607 | I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); |
3607 | I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); |
3608 | intel_wait_for_vblank(dev_priv->dev, pipe); |
3608 | intel_wait_for_vblank(dev_priv->dev, pipe); |
3609 | I915_WRITE(CURCNTR(pipe), cntl); |
3609 | I915_WRITE(CURCNTR(pipe), cntl); |
3610 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); |
3610 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); |
3611 | I915_WRITE(FW_BLC_SELF, fw_bcl_self); |
3611 | I915_WRITE(FW_BLC_SELF, fw_bcl_self); |
3612 | } |
3612 | } |
3613 | } |
3613 | } |
3614 | 3614 | ||
3615 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
3615 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
3616 | { |
3616 | { |
3617 | struct drm_device *dev = crtc->dev; |
3617 | struct drm_device *dev = crtc->dev; |
3618 | struct drm_i915_private *dev_priv = dev->dev_private; |
3618 | struct drm_i915_private *dev_priv = dev->dev_private; |
3619 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3619 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3620 | struct intel_encoder *encoder; |
3620 | struct intel_encoder *encoder; |
3621 | int pipe = intel_crtc->pipe; |
3621 | int pipe = intel_crtc->pipe; |
3622 | int plane = intel_crtc->plane; |
3622 | int plane = intel_crtc->plane; |
3623 | 3623 | ||
3624 | WARN_ON(!crtc->enabled); |
3624 | WARN_ON(!crtc->enabled); |
3625 | 3625 | ||
3626 | if (intel_crtc->active) |
3626 | if (intel_crtc->active) |
3627 | return; |
3627 | return; |
3628 | 3628 | ||
3629 | intel_crtc->active = true; |
3629 | intel_crtc->active = true; |
3630 | intel_update_watermarks(dev); |
3630 | intel_update_watermarks(dev); |
3631 | 3631 | ||
3632 | intel_enable_pll(dev_priv, pipe); |
3632 | intel_enable_pll(dev_priv, pipe); |
3633 | 3633 | ||
3634 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3634 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3635 | if (encoder->pre_enable) |
3635 | if (encoder->pre_enable) |
3636 | encoder->pre_enable(encoder); |
3636 | encoder->pre_enable(encoder); |
3637 | 3637 | ||
3638 | intel_enable_pipe(dev_priv, pipe, false); |
3638 | intel_enable_pipe(dev_priv, pipe, false); |
3639 | intel_enable_plane(dev_priv, plane, pipe); |
3639 | intel_enable_plane(dev_priv, plane, pipe); |
3640 | if (IS_G4X(dev)) |
3640 | if (IS_G4X(dev)) |
3641 | g4x_fixup_plane(dev_priv, pipe); |
3641 | g4x_fixup_plane(dev_priv, pipe); |
3642 | 3642 | ||
3643 | intel_crtc_load_lut(crtc); |
3643 | intel_crtc_load_lut(crtc); |
3644 | intel_update_fbc(dev); |
3644 | intel_update_fbc(dev); |
3645 | 3645 | ||
3646 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
3646 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
3647 | intel_crtc_dpms_overlay(intel_crtc, true); |
3647 | intel_crtc_dpms_overlay(intel_crtc, true); |
3648 | // intel_crtc_update_cursor(crtc, true); |
3648 | // intel_crtc_update_cursor(crtc, true); |
3649 | 3649 | ||
3650 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3650 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3651 | encoder->enable(encoder); |
3651 | encoder->enable(encoder); |
3652 | } |
3652 | } |
3653 | 3653 | ||
3654 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3654 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3655 | { |
3655 | { |
3656 | struct drm_device *dev = crtc->dev; |
3656 | struct drm_device *dev = crtc->dev; |
3657 | struct drm_i915_private *dev_priv = dev->dev_private; |
3657 | struct drm_i915_private *dev_priv = dev->dev_private; |
3658 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3658 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3659 | struct intel_encoder *encoder; |
3659 | struct intel_encoder *encoder; |
3660 | int pipe = intel_crtc->pipe; |
3660 | int pipe = intel_crtc->pipe; |
3661 | int plane = intel_crtc->plane; |
3661 | int plane = intel_crtc->plane; |
3662 | u32 pctl; |
3662 | u32 pctl; |
3663 | 3663 | ||
3664 | 3664 | ||
3665 | if (!intel_crtc->active) |
3665 | if (!intel_crtc->active) |
3666 | return; |
3666 | return; |
3667 | 3667 | ||
3668 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3668 | for_each_encoder_on_crtc(dev, crtc, encoder) |
3669 | encoder->disable(encoder); |
3669 | encoder->disable(encoder); |
3670 | 3670 | ||
3671 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
3671 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
3672 | // intel_crtc_wait_for_pending_flips(crtc); |
3672 | // intel_crtc_wait_for_pending_flips(crtc); |
3673 | // drm_vblank_off(dev, pipe); |
3673 | // drm_vblank_off(dev, pipe); |
3674 | intel_crtc_dpms_overlay(intel_crtc, false); |
3674 | intel_crtc_dpms_overlay(intel_crtc, false); |
3675 | // intel_crtc_update_cursor(crtc, false); |
3675 | // intel_crtc_update_cursor(crtc, false); |
3676 | 3676 | ||
3677 | if (dev_priv->cfb_plane == plane) |
3677 | if (dev_priv->cfb_plane == plane) |
3678 | intel_disable_fbc(dev); |
3678 | intel_disable_fbc(dev); |
3679 | 3679 | ||
3680 | intel_disable_plane(dev_priv, plane, pipe); |
3680 | intel_disable_plane(dev_priv, plane, pipe); |
3681 | intel_disable_pipe(dev_priv, pipe); |
3681 | intel_disable_pipe(dev_priv, pipe); |
3682 | 3682 | ||
3683 | /* Disable pannel fitter if it is on this pipe. */ |
3683 | /* Disable pannel fitter if it is on this pipe. */ |
3684 | pctl = I915_READ(PFIT_CONTROL); |
3684 | pctl = I915_READ(PFIT_CONTROL); |
3685 | if ((pctl & PFIT_ENABLE) && |
3685 | if ((pctl & PFIT_ENABLE) && |
3686 | ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) |
3686 | ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) |
3687 | I915_WRITE(PFIT_CONTROL, 0); |
3687 | I915_WRITE(PFIT_CONTROL, 0); |
3688 | 3688 | ||
3689 | intel_disable_pll(dev_priv, pipe); |
3689 | intel_disable_pll(dev_priv, pipe); |
3690 | 3690 | ||
3691 | intel_crtc->active = false; |
3691 | intel_crtc->active = false; |
3692 | intel_update_fbc(dev); |
3692 | intel_update_fbc(dev); |
3693 | intel_update_watermarks(dev); |
3693 | intel_update_watermarks(dev); |
3694 | } |
3694 | } |
3695 | 3695 | ||
3696 | static void i9xx_crtc_off(struct drm_crtc *crtc) |
3696 | static void i9xx_crtc_off(struct drm_crtc *crtc) |
3697 | { |
3697 | { |
3698 | } |
3698 | } |
3699 | 3699 | ||
3700 | static void intel_crtc_update_sarea(struct drm_crtc *crtc, |
3700 | static void intel_crtc_update_sarea(struct drm_crtc *crtc, |
3701 | bool enabled) |
3701 | bool enabled) |
3702 | { |
3702 | { |
3703 | struct drm_device *dev = crtc->dev; |
3703 | struct drm_device *dev = crtc->dev; |
3704 | struct drm_i915_master_private *master_priv; |
3704 | struct drm_i915_master_private *master_priv; |
3705 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3705 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3706 | int pipe = intel_crtc->pipe; |
3706 | int pipe = intel_crtc->pipe; |
3707 | 3707 | ||
3708 | 3708 | ||
3709 | #if 0 |
3709 | #if 0 |
3710 | if (!dev->primary->master) |
3710 | if (!dev->primary->master) |
3711 | return; |
3711 | return; |
3712 | 3712 | ||
3713 | master_priv = dev->primary->master->driver_priv; |
3713 | master_priv = dev->primary->master->driver_priv; |
3714 | if (!master_priv->sarea_priv) |
3714 | if (!master_priv->sarea_priv) |
3715 | return; |
3715 | return; |
3716 | 3716 | ||
3717 | switch (pipe) { |
3717 | switch (pipe) { |
3718 | case 0: |
3718 | case 0: |
3719 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; |
3719 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; |
3720 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; |
3720 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; |
3721 | break; |
3721 | break; |
3722 | case 1: |
3722 | case 1: |
3723 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; |
3723 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; |
3724 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
3724 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
3725 | break; |
3725 | break; |
3726 | default: |
3726 | default: |
3727 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
3727 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
3728 | break; |
3728 | break; |
3729 | } |
3729 | } |
3730 | #endif |
3730 | #endif |
3731 | 3731 | ||
3732 | } |
3732 | } |
3733 | 3733 | ||
3734 | /** |
3734 | /** |
3735 | * Sets the power management mode of the pipe and plane. |
3735 | * Sets the power management mode of the pipe and plane. |
3736 | */ |
3736 | */ |
3737 | void intel_crtc_update_dpms(struct drm_crtc *crtc) |
3737 | void intel_crtc_update_dpms(struct drm_crtc *crtc) |
3738 | { |
3738 | { |
3739 | struct drm_device *dev = crtc->dev; |
3739 | struct drm_device *dev = crtc->dev; |
3740 | struct drm_i915_private *dev_priv = dev->dev_private; |
3740 | struct drm_i915_private *dev_priv = dev->dev_private; |
3741 | struct intel_encoder *intel_encoder; |
3741 | struct intel_encoder *intel_encoder; |
3742 | bool enable = false; |
3742 | bool enable = false; |
3743 | 3743 | ||
3744 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
3744 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) |
3745 | enable |= intel_encoder->connectors_active; |
3745 | enable |= intel_encoder->connectors_active; |
3746 | 3746 | ||
3747 | if (enable) |
3747 | if (enable) |
3748 | dev_priv->display.crtc_enable(crtc); |
3748 | dev_priv->display.crtc_enable(crtc); |
3749 | else |
3749 | else |
3750 | dev_priv->display.crtc_disable(crtc); |
3750 | dev_priv->display.crtc_disable(crtc); |
3751 | 3751 | ||
3752 | intel_crtc_update_sarea(crtc, enable); |
3752 | intel_crtc_update_sarea(crtc, enable); |
3753 | } |
3753 | } |
3754 | 3754 | ||
3755 | static void intel_crtc_disable(struct drm_crtc *crtc) |
3755 | static void intel_crtc_disable(struct drm_crtc *crtc) |
3756 | { |
3756 | { |
3757 | struct drm_device *dev = crtc->dev; |
3757 | struct drm_device *dev = crtc->dev; |
3758 | struct drm_connector *connector; |
3758 | struct drm_connector *connector; |
3759 | struct drm_i915_private *dev_priv = dev->dev_private; |
3759 | struct drm_i915_private *dev_priv = dev->dev_private; |
3760 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3760 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3761 | 3761 | ||
3762 | /* crtc should still be enabled when we disable it. */ |
3762 | /* crtc should still be enabled when we disable it. */ |
3763 | WARN_ON(!crtc->enabled); |
3763 | WARN_ON(!crtc->enabled); |
3764 | 3764 | ||
3765 | intel_crtc->eld_vld = false; |
3765 | intel_crtc->eld_vld = false; |
3766 | dev_priv->display.crtc_disable(crtc); |
3766 | dev_priv->display.crtc_disable(crtc); |
3767 | intel_crtc_update_sarea(crtc, false); |
3767 | intel_crtc_update_sarea(crtc, false); |
3768 | dev_priv->display.off(crtc); |
3768 | dev_priv->display.off(crtc); |
3769 | 3769 | ||
3770 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); |
3770 | assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); |
3771 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); |
3771 | assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); |
3772 | 3772 | ||
3773 | // if (crtc->fb) { |
3773 | // if (crtc->fb) { |
3774 | // mutex_lock(&dev->struct_mutex); |
3774 | // mutex_lock(&dev->struct_mutex); |
3775 | // intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
3775 | // intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); |
3776 | // mutex_unlock(&dev->struct_mutex); |
3776 | // mutex_unlock(&dev->struct_mutex); |
3777 | // crtc->fb = NULL; |
3777 | // crtc->fb = NULL; |
3778 | // } |
3778 | // } |
3779 | 3779 | ||
3780 | /* Update computed state. */ |
3780 | /* Update computed state. */ |
3781 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
3781 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
3782 | if (!connector->encoder || !connector->encoder->crtc) |
3782 | if (!connector->encoder || !connector->encoder->crtc) |
3783 | continue; |
3783 | continue; |
3784 | 3784 | ||
3785 | if (connector->encoder->crtc != crtc) |
3785 | if (connector->encoder->crtc != crtc) |
3786 | continue; |
3786 | continue; |
3787 | 3787 | ||
3788 | connector->dpms = DRM_MODE_DPMS_OFF; |
3788 | connector->dpms = DRM_MODE_DPMS_OFF; |
3789 | to_intel_encoder(connector->encoder)->connectors_active = false; |
3789 | to_intel_encoder(connector->encoder)->connectors_active = false; |
3790 | } |
3790 | } |
3791 | } |
3791 | } |
3792 | 3792 | ||
3793 | void intel_modeset_disable(struct drm_device *dev) |
3793 | void intel_modeset_disable(struct drm_device *dev) |
3794 | { |
3794 | { |
3795 | struct drm_crtc *crtc; |
3795 | struct drm_crtc *crtc; |
3796 | 3796 | ||
3797 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3797 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3798 | if (crtc->enabled) |
3798 | if (crtc->enabled) |
3799 | intel_crtc_disable(crtc); |
3799 | intel_crtc_disable(crtc); |
3800 | } |
3800 | } |
3801 | } |
3801 | } |
3802 | 3802 | ||
3803 | void intel_encoder_destroy(struct drm_encoder *encoder) |
3803 | void intel_encoder_destroy(struct drm_encoder *encoder) |
3804 | { |
3804 | { |
3805 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3805 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3806 | 3806 | ||
3807 | drm_encoder_cleanup(encoder); |
3807 | drm_encoder_cleanup(encoder); |
3808 | kfree(intel_encoder); |
3808 | kfree(intel_encoder); |
3809 | } |
3809 | } |
3810 | 3810 | ||
3811 | /* Simple dpms helper for encodres with just one connector, no cloning and only |
3811 | /* Simple dpms helper for encodres with just one connector, no cloning and only |
3812 | * one kind of off state. It clamps all !ON modes to fully OFF and changes the |
3812 | * one kind of off state. It clamps all !ON modes to fully OFF and changes the |
3813 | * state of the entire output pipe. */ |
3813 | * state of the entire output pipe. */ |
3814 | void intel_encoder_dpms(struct intel_encoder *encoder, int mode) |
3814 | void intel_encoder_dpms(struct intel_encoder *encoder, int mode) |
3815 | { |
3815 | { |
3816 | if (mode == DRM_MODE_DPMS_ON) { |
3816 | if (mode == DRM_MODE_DPMS_ON) { |
3817 | encoder->connectors_active = true; |
3817 | encoder->connectors_active = true; |
3818 | 3818 | ||
3819 | intel_crtc_update_dpms(encoder->base.crtc); |
3819 | intel_crtc_update_dpms(encoder->base.crtc); |
3820 | } else { |
3820 | } else { |
3821 | encoder->connectors_active = false; |
3821 | encoder->connectors_active = false; |
3822 | 3822 | ||
3823 | intel_crtc_update_dpms(encoder->base.crtc); |
3823 | intel_crtc_update_dpms(encoder->base.crtc); |
3824 | } |
3824 | } |
3825 | } |
3825 | } |
3826 | 3826 | ||
3827 | /* Cross check the actual hw state with our own modeset state tracking (and it's |
3827 | /* Cross check the actual hw state with our own modeset state tracking (and it's |
3828 | * internal consistency). */ |
3828 | * internal consistency). */ |
3829 | static void intel_connector_check_state(struct intel_connector *connector) |
3829 | static void intel_connector_check_state(struct intel_connector *connector) |
3830 | { |
3830 | { |
3831 | if (connector->get_hw_state(connector)) { |
3831 | if (connector->get_hw_state(connector)) { |
3832 | struct intel_encoder *encoder = connector->encoder; |
3832 | struct intel_encoder *encoder = connector->encoder; |
3833 | struct drm_crtc *crtc; |
3833 | struct drm_crtc *crtc; |
3834 | bool encoder_enabled; |
3834 | bool encoder_enabled; |
3835 | enum pipe pipe; |
3835 | enum pipe pipe; |
3836 | 3836 | ||
3837 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
3837 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
3838 | connector->base.base.id, |
3838 | connector->base.base.id, |
3839 | drm_get_connector_name(&connector->base)); |
3839 | drm_get_connector_name(&connector->base)); |
3840 | 3840 | ||
3841 | WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, |
3841 | WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, |
3842 | "wrong connector dpms state\n"); |
3842 | "wrong connector dpms state\n"); |
3843 | WARN(connector->base.encoder != &encoder->base, |
3843 | WARN(connector->base.encoder != &encoder->base, |
3844 | "active connector not linked to encoder\n"); |
3844 | "active connector not linked to encoder\n"); |
3845 | WARN(!encoder->connectors_active, |
3845 | WARN(!encoder->connectors_active, |
3846 | "encoder->connectors_active not set\n"); |
3846 | "encoder->connectors_active not set\n"); |
3847 | 3847 | ||
3848 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); |
3848 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); |
3849 | WARN(!encoder_enabled, "encoder not enabled\n"); |
3849 | WARN(!encoder_enabled, "encoder not enabled\n"); |
3850 | if (WARN_ON(!encoder->base.crtc)) |
3850 | if (WARN_ON(!encoder->base.crtc)) |
3851 | return; |
3851 | return; |
3852 | 3852 | ||
3853 | crtc = encoder->base.crtc; |
3853 | crtc = encoder->base.crtc; |
3854 | 3854 | ||
3855 | WARN(!crtc->enabled, "crtc not enabled\n"); |
3855 | WARN(!crtc->enabled, "crtc not enabled\n"); |
3856 | WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); |
3856 | WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); |
3857 | WARN(pipe != to_intel_crtc(crtc)->pipe, |
3857 | WARN(pipe != to_intel_crtc(crtc)->pipe, |
3858 | "encoder active on the wrong pipe\n"); |
3858 | "encoder active on the wrong pipe\n"); |
3859 | } |
3859 | } |
3860 | } |
3860 | } |
3861 | 3861 | ||
3862 | /* Even simpler default implementation, if there's really no special case to |
3862 | /* Even simpler default implementation, if there's really no special case to |
3863 | * consider. */ |
3863 | * consider. */ |
3864 | void intel_connector_dpms(struct drm_connector *connector, int mode) |
3864 | void intel_connector_dpms(struct drm_connector *connector, int mode) |
3865 | { |
3865 | { |
3866 | struct intel_encoder *encoder = intel_attached_encoder(connector); |
3866 | struct intel_encoder *encoder = intel_attached_encoder(connector); |
3867 | 3867 | ||
3868 | /* All the simple cases only support two dpms states. */ |
3868 | /* All the simple cases only support two dpms states. */ |
3869 | if (mode != DRM_MODE_DPMS_ON) |
3869 | if (mode != DRM_MODE_DPMS_ON) |
3870 | mode = DRM_MODE_DPMS_OFF; |
3870 | mode = DRM_MODE_DPMS_OFF; |
3871 | 3871 | ||
3872 | if (mode == connector->dpms) |
3872 | if (mode == connector->dpms) |
3873 | return; |
3873 | return; |
3874 | 3874 | ||
3875 | connector->dpms = mode; |
3875 | connector->dpms = mode; |
3876 | 3876 | ||
3877 | /* Only need to change hw state when actually enabled */ |
3877 | /* Only need to change hw state when actually enabled */ |
3878 | if (encoder->base.crtc) |
3878 | if (encoder->base.crtc) |
3879 | intel_encoder_dpms(encoder, mode); |
3879 | intel_encoder_dpms(encoder, mode); |
3880 | else |
3880 | else |
3881 | WARN_ON(encoder->connectors_active != false); |
3881 | WARN_ON(encoder->connectors_active != false); |
3882 | 3882 | ||
3883 | intel_modeset_check_state(connector->dev); |
3883 | intel_modeset_check_state(connector->dev); |
3884 | } |
3884 | } |
3885 | 3885 | ||
3886 | /* Simple connector->get_hw_state implementation for encoders that support only |
3886 | /* Simple connector->get_hw_state implementation for encoders that support only |
3887 | * one connector and no cloning and hence the encoder state determines the state |
3887 | * one connector and no cloning and hence the encoder state determines the state |
3888 | * of the connector. */ |
3888 | * of the connector. */ |
3889 | bool intel_connector_get_hw_state(struct intel_connector *connector) |
3889 | bool intel_connector_get_hw_state(struct intel_connector *connector) |
3890 | { |
3890 | { |
3891 | enum pipe pipe = 0; |
3891 | enum pipe pipe = 0; |
3892 | struct intel_encoder *encoder = connector->encoder; |
3892 | struct intel_encoder *encoder = connector->encoder; |
3893 | 3893 | ||
3894 | return encoder->get_hw_state(encoder, &pipe); |
3894 | return encoder->get_hw_state(encoder, &pipe); |
3895 | } |
3895 | } |
3896 | 3896 | ||
3897 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
3897 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
3898 | const struct drm_display_mode *mode, |
3898 | const struct drm_display_mode *mode, |
3899 | struct drm_display_mode *adjusted_mode) |
3899 | struct drm_display_mode *adjusted_mode) |
3900 | { |
3900 | { |
3901 | struct drm_device *dev = crtc->dev; |
3901 | struct drm_device *dev = crtc->dev; |
3902 | 3902 | ||
3903 | if (HAS_PCH_SPLIT(dev)) { |
3903 | if (HAS_PCH_SPLIT(dev)) { |
3904 | /* FDI link clock is fixed at 2.7G */ |
3904 | /* FDI link clock is fixed at 2.7G */ |
3905 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
3905 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
3906 | return false; |
3906 | return false; |
3907 | } |
3907 | } |
3908 | 3908 | ||
3909 | /* All interlaced capable intel hw wants timings in frames. Note though |
3909 | /* All interlaced capable intel hw wants timings in frames. Note though |
3910 | * that intel_lvds_mode_fixup does some funny tricks with the crtc |
3910 | * that intel_lvds_mode_fixup does some funny tricks with the crtc |
3911 | * timings, so we need to be careful not to clobber these.*/ |
3911 | * timings, so we need to be careful not to clobber these.*/ |
3912 | if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) |
3912 | if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) |
3913 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3913 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3914 | 3914 | ||
3915 | /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes |
3915 | /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes |
3916 | * with a hsync front porch of 0. |
3916 | * with a hsync front porch of 0. |
3917 | */ |
3917 | */ |
3918 | if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && |
3918 | if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && |
3919 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) |
3919 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) |
3920 | return false; |
3920 | return false; |
3921 | 3921 | ||
3922 | return true; |
3922 | return true; |
3923 | } |
3923 | } |
3924 | 3924 | ||
3925 | static int valleyview_get_display_clock_speed(struct drm_device *dev) |
3925 | static int valleyview_get_display_clock_speed(struct drm_device *dev) |
3926 | { |
3926 | { |
3927 | return 400000; /* FIXME */ |
3927 | return 400000; /* FIXME */ |
3928 | } |
3928 | } |
3929 | 3929 | ||
3930 | static int i945_get_display_clock_speed(struct drm_device *dev) |
3930 | static int i945_get_display_clock_speed(struct drm_device *dev) |
3931 | { |
3931 | { |
3932 | return 400000; |
3932 | return 400000; |
3933 | } |
3933 | } |
3934 | 3934 | ||
3935 | static int i915_get_display_clock_speed(struct drm_device *dev) |
3935 | static int i915_get_display_clock_speed(struct drm_device *dev) |
3936 | { |
3936 | { |
3937 | return 333000; |
3937 | return 333000; |
3938 | } |
3938 | } |
3939 | 3939 | ||
3940 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
3940 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
3941 | { |
3941 | { |
3942 | return 200000; |
3942 | return 200000; |
3943 | } |
3943 | } |
3944 | 3944 | ||
3945 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
3945 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
3946 | { |
3946 | { |
3947 | u16 gcfgc = 0; |
3947 | u16 gcfgc = 0; |
3948 | 3948 | ||
3949 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
3949 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
3950 | 3950 | ||
3951 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
3951 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
3952 | return 133000; |
3952 | return 133000; |
3953 | else { |
3953 | else { |
3954 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
3954 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
3955 | case GC_DISPLAY_CLOCK_333_MHZ: |
3955 | case GC_DISPLAY_CLOCK_333_MHZ: |
3956 | return 333000; |
3956 | return 333000; |
3957 | default: |
3957 | default: |
3958 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
3958 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
3959 | return 190000; |
3959 | return 190000; |
3960 | } |
3960 | } |
3961 | } |
3961 | } |
3962 | } |
3962 | } |
3963 | 3963 | ||
3964 | static int i865_get_display_clock_speed(struct drm_device *dev) |
3964 | static int i865_get_display_clock_speed(struct drm_device *dev) |
3965 | { |
3965 | { |
3966 | return 266000; |
3966 | return 266000; |
3967 | } |
3967 | } |
3968 | 3968 | ||
3969 | static int i855_get_display_clock_speed(struct drm_device *dev) |
3969 | static int i855_get_display_clock_speed(struct drm_device *dev) |
3970 | { |
3970 | { |
3971 | u16 hpllcc = 0; |
3971 | u16 hpllcc = 0; |
3972 | /* Assume that the hardware is in the high speed state. This |
3972 | /* Assume that the hardware is in the high speed state. This |
3973 | * should be the default. |
3973 | * should be the default. |
3974 | */ |
3974 | */ |
3975 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
3975 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
3976 | case GC_CLOCK_133_200: |
3976 | case GC_CLOCK_133_200: |
3977 | case GC_CLOCK_100_200: |
3977 | case GC_CLOCK_100_200: |
3978 | return 200000; |
3978 | return 200000; |
3979 | case GC_CLOCK_166_250: |
3979 | case GC_CLOCK_166_250: |
3980 | return 250000; |
3980 | return 250000; |
3981 | case GC_CLOCK_100_133: |
3981 | case GC_CLOCK_100_133: |
3982 | return 133000; |
3982 | return 133000; |
3983 | } |
3983 | } |
3984 | 3984 | ||
3985 | /* Shouldn't happen */ |
3985 | /* Shouldn't happen */ |
3986 | return 0; |
3986 | return 0; |
3987 | } |
3987 | } |
3988 | 3988 | ||
3989 | static int i830_get_display_clock_speed(struct drm_device *dev) |
3989 | static int i830_get_display_clock_speed(struct drm_device *dev) |
3990 | { |
3990 | { |
3991 | return 133000; |
3991 | return 133000; |
3992 | } |
3992 | } |
3993 | 3993 | ||
3994 | static void |
3994 | static void |
3995 | intel_reduce_ratio(uint32_t *num, uint32_t *den) |
3995 | intel_reduce_ratio(uint32_t *num, uint32_t *den) |
3996 | { |
3996 | { |
3997 | while (*num > 0xffffff || *den > 0xffffff) { |
3997 | while (*num > 0xffffff || *den > 0xffffff) { |
3998 | *num >>= 1; |
3998 | *num >>= 1; |
3999 | *den >>= 1; |
3999 | *den >>= 1; |
4000 | } |
4000 | } |
4001 | } |
4001 | } |
4002 | 4002 | ||
4003 | void |
4003 | void |
4004 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
4004 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
4005 | int pixel_clock, int link_clock, |
4005 | int pixel_clock, int link_clock, |
4006 | struct intel_link_m_n *m_n) |
4006 | struct intel_link_m_n *m_n) |
4007 | { |
4007 | { |
4008 | m_n->tu = 64; |
4008 | m_n->tu = 64; |
4009 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
4009 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
4010 | m_n->gmch_n = link_clock * nlanes * 8; |
4010 | m_n->gmch_n = link_clock * nlanes * 8; |
4011 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
4011 | intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
4012 | m_n->link_m = pixel_clock; |
4012 | m_n->link_m = pixel_clock; |
4013 | m_n->link_n = link_clock; |
4013 | m_n->link_n = link_clock; |
4014 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
4014 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
4015 | } |
4015 | } |
4016 | 4016 | ||
4017 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4017 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4018 | { |
4018 | { |
4019 | if (i915_panel_use_ssc >= 0) |
4019 | if (i915_panel_use_ssc >= 0) |
4020 | return i915_panel_use_ssc != 0; |
4020 | return i915_panel_use_ssc != 0; |
4021 | return dev_priv->lvds_use_ssc |
4021 | return dev_priv->lvds_use_ssc |
4022 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4022 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4023 | } |
4023 | } |
4024 | 4024 | ||
4025 | /** |
4025 | /** |
4026 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send |
4026 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send |
4027 | * @crtc: CRTC structure |
4027 | * @crtc: CRTC structure |
4028 | * @mode: requested mode |
4028 | * @mode: requested mode |
4029 | * |
4029 | * |
4030 | * A pipe may be connected to one or more outputs. Based on the depth of the |
4030 | * A pipe may be connected to one or more outputs. Based on the depth of the |
4031 | * attached framebuffer, choose a good color depth to use on the pipe. |
4031 | * attached framebuffer, choose a good color depth to use on the pipe. |
4032 | * |
4032 | * |
4033 | * If possible, match the pipe depth to the fb depth. In some cases, this |
4033 | * If possible, match the pipe depth to the fb depth. In some cases, this |
4034 | * isn't ideal, because the connected output supports a lesser or restricted |
4034 | * isn't ideal, because the connected output supports a lesser or restricted |
4035 | * set of depths. Resolve that here: |
4035 | * set of depths. Resolve that here: |
4036 | * LVDS typically supports only 6bpc, so clamp down in that case |
4036 | * LVDS typically supports only 6bpc, so clamp down in that case |
4037 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc |
4037 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc |
4038 | * Displays may support a restricted set as well, check EDID and clamp as |
4038 | * Displays may support a restricted set as well, check EDID and clamp as |
4039 | * appropriate. |
4039 | * appropriate. |
4040 | * DP may want to dither down to 6bpc to fit larger modes |
4040 | * DP may want to dither down to 6bpc to fit larger modes |
4041 | * |
4041 | * |
4042 | * RETURNS: |
4042 | * RETURNS: |
4043 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, |
4043 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, |
4044 | * true if they don't match). |
4044 | * true if they don't match). |
4045 | */ |
4045 | */ |
4046 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, |
4046 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, |
4047 | struct drm_framebuffer *fb, |
4047 | struct drm_framebuffer *fb, |
4048 | unsigned int *pipe_bpp, |
4048 | unsigned int *pipe_bpp, |
4049 | struct drm_display_mode *mode) |
4049 | struct drm_display_mode *mode) |
4050 | { |
4050 | { |
4051 | struct drm_device *dev = crtc->dev; |
4051 | struct drm_device *dev = crtc->dev; |
4052 | struct drm_i915_private *dev_priv = dev->dev_private; |
4052 | struct drm_i915_private *dev_priv = dev->dev_private; |
4053 | struct drm_connector *connector; |
4053 | struct drm_connector *connector; |
4054 | struct intel_encoder *intel_encoder; |
4054 | struct intel_encoder *intel_encoder; |
4055 | unsigned int display_bpc = UINT_MAX, bpc; |
4055 | unsigned int display_bpc = UINT_MAX, bpc; |
4056 | 4056 | ||
4057 | /* Walk the encoders & connectors on this crtc, get min bpc */ |
4057 | /* Walk the encoders & connectors on this crtc, get min bpc */ |
4058 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
4058 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
4059 | 4059 | ||
4060 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { |
4060 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { |
4061 | unsigned int lvds_bpc; |
4061 | unsigned int lvds_bpc; |
4062 | 4062 | ||
4063 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == |
4063 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == |
4064 | LVDS_A3_POWER_UP) |
4064 | LVDS_A3_POWER_UP) |
4065 | lvds_bpc = 8; |
4065 | lvds_bpc = 8; |
4066 | else |
4066 | else |
4067 | lvds_bpc = 6; |
4067 | lvds_bpc = 6; |
4068 | 4068 | ||
4069 | if (lvds_bpc < display_bpc) { |
4069 | if (lvds_bpc < display_bpc) { |
4070 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
4070 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
4071 | display_bpc = lvds_bpc; |
4071 | display_bpc = lvds_bpc; |
4072 | } |
4072 | } |
4073 | continue; |
4073 | continue; |
4074 | } |
4074 | } |
4075 | 4075 | ||
4076 | /* Not one of the known troublemakers, check the EDID */ |
4076 | /* Not one of the known troublemakers, check the EDID */ |
4077 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
4077 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
4078 | head) { |
4078 | head) { |
4079 | if (connector->encoder != &intel_encoder->base) |
4079 | if (connector->encoder != &intel_encoder->base) |
4080 | continue; |
4080 | continue; |
4081 | 4081 | ||
4082 | /* Don't use an invalid EDID bpc value */ |
4082 | /* Don't use an invalid EDID bpc value */ |
4083 | if (connector->display_info.bpc && |
4083 | if (connector->display_info.bpc && |
4084 | connector->display_info.bpc < display_bpc) { |
4084 | connector->display_info.bpc < display_bpc) { |
4085 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4085 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4086 | display_bpc = connector->display_info.bpc; |
4086 | display_bpc = connector->display_info.bpc; |
4087 | } |
4087 | } |
4088 | } |
4088 | } |
4089 | 4089 | ||
4090 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
4090 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
4091 | /* Use VBT settings if we have an eDP panel */ |
4091 | /* Use VBT settings if we have an eDP panel */ |
4092 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
4092 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
4093 | 4093 | ||
4094 | if (edp_bpc && edp_bpc < display_bpc) { |
4094 | if (edp_bpc && edp_bpc < display_bpc) { |
4095 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
4095 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
4096 | display_bpc = edp_bpc; |
4096 | display_bpc = edp_bpc; |
4097 | } |
4097 | } |
4098 | continue; |
4098 | continue; |
4099 | } |
4099 | } |
4100 | 4100 | ||
4101 | /* |
4101 | /* |
4102 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
4102 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
4103 | * through, clamp it down. (Note: >12bpc will be caught below.) |
4103 | * through, clamp it down. (Note: >12bpc will be caught below.) |
4104 | */ |
4104 | */ |
4105 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
4105 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
4106 | if (display_bpc > 8 && display_bpc < 12) { |
4106 | if (display_bpc > 8 && display_bpc < 12) { |
4107 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
4107 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
4108 | display_bpc = 12; |
4108 | display_bpc = 12; |
4109 | } else { |
4109 | } else { |
4110 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
4110 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
4111 | display_bpc = 8; |
4111 | display_bpc = 8; |
4112 | } |
4112 | } |
4113 | } |
4113 | } |
4114 | } |
4114 | } |
4115 | 4115 | ||
4116 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4116 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4117 | DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); |
4117 | DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); |
4118 | display_bpc = 6; |
4118 | display_bpc = 6; |
4119 | } |
4119 | } |
4120 | 4120 | ||
4121 | /* |
4121 | /* |
4122 | * We could just drive the pipe at the highest bpc all the time and |
4122 | * We could just drive the pipe at the highest bpc all the time and |
4123 | * enable dithering as needed, but that costs bandwidth. So choose |
4123 | * enable dithering as needed, but that costs bandwidth. So choose |
4124 | * the minimum value that expresses the full color range of the fb but |
4124 | * the minimum value that expresses the full color range of the fb but |
4125 | * also stays within the max display bpc discovered above. |
4125 | * also stays within the max display bpc discovered above. |
4126 | */ |
4126 | */ |
4127 | 4127 | ||
4128 | switch (fb->depth) { |
4128 | switch (fb->depth) { |
4129 | case 8: |
4129 | case 8: |
4130 | bpc = 8; /* since we go through a colormap */ |
4130 | bpc = 8; /* since we go through a colormap */ |
4131 | break; |
4131 | break; |
4132 | case 15: |
4132 | case 15: |
4133 | case 16: |
4133 | case 16: |
4134 | bpc = 6; /* min is 18bpp */ |
4134 | bpc = 6; /* min is 18bpp */ |
4135 | break; |
4135 | break; |
4136 | case 24: |
4136 | case 24: |
4137 | bpc = 8; |
4137 | bpc = 8; |
4138 | break; |
4138 | break; |
4139 | case 30: |
4139 | case 30: |
4140 | bpc = 10; |
4140 | bpc = 10; |
4141 | break; |
4141 | break; |
4142 | case 48: |
4142 | case 48: |
4143 | bpc = 12; |
4143 | bpc = 12; |
4144 | break; |
4144 | break; |
4145 | default: |
4145 | default: |
4146 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
4146 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
4147 | bpc = min((unsigned int)8, display_bpc); |
4147 | bpc = min((unsigned int)8, display_bpc); |
4148 | break; |
4148 | break; |
4149 | } |
4149 | } |
4150 | 4150 | ||
4151 | display_bpc = min(display_bpc, bpc); |
4151 | display_bpc = min(display_bpc, bpc); |
4152 | 4152 | ||
4153 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4153 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4154 | bpc, display_bpc); |
4154 | bpc, display_bpc); |
4155 | 4155 | ||
4156 | *pipe_bpp = display_bpc * 3; |
4156 | *pipe_bpp = display_bpc * 3; |
4157 | 4157 | ||
4158 | return display_bpc != bpc; |
4158 | return display_bpc != bpc; |
4159 | } |
4159 | } |
4160 | 4160 | ||
4161 | static int vlv_get_refclk(struct drm_crtc *crtc) |
4161 | static int vlv_get_refclk(struct drm_crtc *crtc) |
4162 | { |
4162 | { |
4163 | struct drm_device *dev = crtc->dev; |
4163 | struct drm_device *dev = crtc->dev; |
4164 | struct drm_i915_private *dev_priv = dev->dev_private; |
4164 | struct drm_i915_private *dev_priv = dev->dev_private; |
4165 | int refclk = 27000; /* for DP & HDMI */ |
4165 | int refclk = 27000; /* for DP & HDMI */ |
4166 | 4166 | ||
4167 | return 100000; /* only one validated so far */ |
4167 | return 100000; /* only one validated so far */ |
4168 | 4168 | ||
4169 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
4169 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
4170 | refclk = 96000; |
4170 | refclk = 96000; |
4171 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
4171 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
4172 | if (intel_panel_use_ssc(dev_priv)) |
4172 | if (intel_panel_use_ssc(dev_priv)) |
4173 | refclk = 100000; |
4173 | refclk = 100000; |
4174 | else |
4174 | else |
4175 | refclk = 96000; |
4175 | refclk = 96000; |
4176 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
4176 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
4177 | refclk = 100000; |
4177 | refclk = 100000; |
4178 | } |
4178 | } |
4179 | 4179 | ||
4180 | return refclk; |
4180 | return refclk; |
4181 | } |
4181 | } |
4182 | 4182 | ||
4183 | static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) |
4183 | static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) |
4184 | { |
4184 | { |
4185 | struct drm_device *dev = crtc->dev; |
4185 | struct drm_device *dev = crtc->dev; |
4186 | struct drm_i915_private *dev_priv = dev->dev_private; |
4186 | struct drm_i915_private *dev_priv = dev->dev_private; |
4187 | int refclk; |
4187 | int refclk; |
4188 | 4188 | ||
4189 | if (IS_VALLEYVIEW(dev)) { |
4189 | if (IS_VALLEYVIEW(dev)) { |
4190 | refclk = vlv_get_refclk(crtc); |
4190 | refclk = vlv_get_refclk(crtc); |
4191 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4191 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4192 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
4192 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
4193 | refclk = dev_priv->lvds_ssc_freq * 1000; |
4193 | refclk = dev_priv->lvds_ssc_freq * 1000; |
4194 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
4194 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
4195 | refclk / 1000); |
4195 | refclk / 1000); |
4196 | } else if (!IS_GEN2(dev)) { |
4196 | } else if (!IS_GEN2(dev)) { |
4197 | refclk = 96000; |
4197 | refclk = 96000; |
4198 | } else { |
4198 | } else { |
4199 | refclk = 48000; |
4199 | refclk = 48000; |
4200 | } |
4200 | } |
4201 | 4201 | ||
4202 | return refclk; |
4202 | return refclk; |
4203 | } |
4203 | } |
4204 | 4204 | ||
4205 | static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, |
4205 | static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode, |
4206 | intel_clock_t *clock) |
4206 | intel_clock_t *clock) |
4207 | { |
4207 | { |
4208 | /* SDVO TV has fixed PLL values depend on its clock range, |
4208 | /* SDVO TV has fixed PLL values depend on its clock range, |
4209 | this mirrors vbios setting. */ |
4209 | this mirrors vbios setting. */ |
4210 | if (adjusted_mode->clock >= 100000 |
4210 | if (adjusted_mode->clock >= 100000 |
4211 | && adjusted_mode->clock < 140500) { |
4211 | && adjusted_mode->clock < 140500) { |
4212 | clock->p1 = 2; |
4212 | clock->p1 = 2; |
4213 | clock->p2 = 10; |
4213 | clock->p2 = 10; |
4214 | clock->n = 3; |
4214 | clock->n = 3; |
4215 | clock->m1 = 16; |
4215 | clock->m1 = 16; |
4216 | clock->m2 = 8; |
4216 | clock->m2 = 8; |
4217 | } else if (adjusted_mode->clock >= 140500 |
4217 | } else if (adjusted_mode->clock >= 140500 |
4218 | && adjusted_mode->clock <= 200000) { |
4218 | && adjusted_mode->clock <= 200000) { |
4219 | clock->p1 = 1; |
4219 | clock->p1 = 1; |
4220 | clock->p2 = 10; |
4220 | clock->p2 = 10; |
4221 | clock->n = 6; |
4221 | clock->n = 6; |
4222 | clock->m1 = 12; |
4222 | clock->m1 = 12; |
4223 | clock->m2 = 8; |
4223 | clock->m2 = 8; |
4224 | } |
4224 | } |
4225 | } |
4225 | } |
4226 | 4226 | ||
4227 | static void i9xx_update_pll_dividers(struct drm_crtc *crtc, |
4227 | static void i9xx_update_pll_dividers(struct drm_crtc *crtc, |
4228 | intel_clock_t *clock, |
4228 | intel_clock_t *clock, |
4229 | intel_clock_t *reduced_clock) |
4229 | intel_clock_t *reduced_clock) |
4230 | { |
4230 | { |
4231 | struct drm_device *dev = crtc->dev; |
4231 | struct drm_device *dev = crtc->dev; |
4232 | struct drm_i915_private *dev_priv = dev->dev_private; |
4232 | struct drm_i915_private *dev_priv = dev->dev_private; |
4233 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4233 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4234 | int pipe = intel_crtc->pipe; |
4234 | int pipe = intel_crtc->pipe; |
4235 | u32 fp, fp2 = 0; |
4235 | u32 fp, fp2 = 0; |
4236 | 4236 | ||
4237 | if (IS_PINEVIEW(dev)) { |
4237 | if (IS_PINEVIEW(dev)) { |
4238 | fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; |
4238 | fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2; |
4239 | if (reduced_clock) |
4239 | if (reduced_clock) |
4240 | fp2 = (1 << reduced_clock->n) << 16 | |
4240 | fp2 = (1 << reduced_clock->n) << 16 | |
4241 | reduced_clock->m1 << 8 | reduced_clock->m2; |
4241 | reduced_clock->m1 << 8 | reduced_clock->m2; |
4242 | } else { |
4242 | } else { |
4243 | fp = clock->n << 16 | clock->m1 << 8 | clock->m2; |
4243 | fp = clock->n << 16 | clock->m1 << 8 | clock->m2; |
4244 | if (reduced_clock) |
4244 | if (reduced_clock) |
4245 | fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | |
4245 | fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 | |
4246 | reduced_clock->m2; |
4246 | reduced_clock->m2; |
4247 | } |
4247 | } |
4248 | 4248 | ||
4249 | I915_WRITE(FP0(pipe), fp); |
4249 | I915_WRITE(FP0(pipe), fp); |
4250 | 4250 | ||
4251 | intel_crtc->lowfreq_avail = false; |
4251 | intel_crtc->lowfreq_avail = false; |
4252 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4252 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4253 | reduced_clock && i915_powersave) { |
4253 | reduced_clock && i915_powersave) { |
4254 | I915_WRITE(FP1(pipe), fp2); |
4254 | I915_WRITE(FP1(pipe), fp2); |
4255 | intel_crtc->lowfreq_avail = true; |
4255 | intel_crtc->lowfreq_avail = true; |
4256 | } else { |
4256 | } else { |
4257 | I915_WRITE(FP1(pipe), fp); |
4257 | I915_WRITE(FP1(pipe), fp); |
4258 | } |
4258 | } |
4259 | } |
4259 | } |
4260 | 4260 | ||
4261 | static void vlv_update_pll(struct drm_crtc *crtc, |
4261 | static void vlv_update_pll(struct drm_crtc *crtc, |
4262 | struct drm_display_mode *mode, |
4262 | struct drm_display_mode *mode, |
4263 | struct drm_display_mode *adjusted_mode, |
4263 | struct drm_display_mode *adjusted_mode, |
4264 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4264 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4265 | int num_connectors) |
4265 | int num_connectors) |
4266 | { |
4266 | { |
4267 | struct drm_device *dev = crtc->dev; |
4267 | struct drm_device *dev = crtc->dev; |
4268 | struct drm_i915_private *dev_priv = dev->dev_private; |
4268 | struct drm_i915_private *dev_priv = dev->dev_private; |
4269 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4269 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4270 | int pipe = intel_crtc->pipe; |
4270 | int pipe = intel_crtc->pipe; |
4271 | u32 dpll, mdiv, pdiv; |
4271 | u32 dpll, mdiv, pdiv; |
4272 | u32 bestn, bestm1, bestm2, bestp1, bestp2; |
4272 | u32 bestn, bestm1, bestm2, bestp1, bestp2; |
4273 | bool is_sdvo; |
4273 | bool is_sdvo; |
4274 | u32 temp; |
4274 | u32 temp; |
4275 | 4275 | ||
4276 | mutex_lock(&dev_priv->dpio_lock); |
4276 | mutex_lock(&dev_priv->dpio_lock); |
4277 | 4277 | ||
4278 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4278 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4279 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4279 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4280 | 4280 | ||
4281 | dpll = DPLL_VGA_MODE_DIS; |
4281 | dpll = DPLL_VGA_MODE_DIS; |
4282 | dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; |
4282 | dpll |= DPLL_EXT_BUFFER_ENABLE_VLV; |
4283 | dpll |= DPLL_REFA_CLK_ENABLE_VLV; |
4283 | dpll |= DPLL_REFA_CLK_ENABLE_VLV; |
4284 | dpll |= DPLL_INTEGRATED_CLOCK_VLV; |
4284 | dpll |= DPLL_INTEGRATED_CLOCK_VLV; |
4285 | 4285 | ||
4286 | I915_WRITE(DPLL(pipe), dpll); |
4286 | I915_WRITE(DPLL(pipe), dpll); |
4287 | POSTING_READ(DPLL(pipe)); |
4287 | POSTING_READ(DPLL(pipe)); |
4288 | 4288 | ||
4289 | bestn = clock->n; |
4289 | bestn = clock->n; |
4290 | bestm1 = clock->m1; |
4290 | bestm1 = clock->m1; |
4291 | bestm2 = clock->m2; |
4291 | bestm2 = clock->m2; |
4292 | bestp1 = clock->p1; |
4292 | bestp1 = clock->p1; |
4293 | bestp2 = clock->p2; |
4293 | bestp2 = clock->p2; |
4294 | 4294 | ||
4295 | /* |
4295 | /* |
4296 | * In Valleyview PLL and program lane counter registers are exposed |
4296 | * In Valleyview PLL and program lane counter registers are exposed |
4297 | * through DPIO interface |
4297 | * through DPIO interface |
4298 | */ |
4298 | */ |
4299 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
4299 | mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); |
4300 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); |
4300 | mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); |
4301 | mdiv |= ((bestn << DPIO_N_SHIFT)); |
4301 | mdiv |= ((bestn << DPIO_N_SHIFT)); |
4302 | mdiv |= (1 << DPIO_POST_DIV_SHIFT); |
4302 | mdiv |= (1 << DPIO_POST_DIV_SHIFT); |
4303 | mdiv |= (1 << DPIO_K_SHIFT); |
4303 | mdiv |= (1 << DPIO_K_SHIFT); |
4304 | mdiv |= DPIO_ENABLE_CALIBRATION; |
4304 | mdiv |= DPIO_ENABLE_CALIBRATION; |
4305 | intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); |
4305 | intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); |
4306 | 4306 | ||
4307 | intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); |
4307 | intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); |
4308 | 4308 | ||
4309 | pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | |
4309 | pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) | |
4310 | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | |
4310 | (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | |
4311 | (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | |
4311 | (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) | |
4312 | (5 << DPIO_CLK_BIAS_CTL_SHIFT); |
4312 | (5 << DPIO_CLK_BIAS_CTL_SHIFT); |
4313 | intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); |
4313 | intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); |
4314 | 4314 | ||
4315 | intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); |
4315 | intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b); |
4316 | 4316 | ||
4317 | dpll |= DPLL_VCO_ENABLE; |
4317 | dpll |= DPLL_VCO_ENABLE; |
4318 | I915_WRITE(DPLL(pipe), dpll); |
4318 | I915_WRITE(DPLL(pipe), dpll); |
4319 | POSTING_READ(DPLL(pipe)); |
4319 | POSTING_READ(DPLL(pipe)); |
4320 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
4320 | if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) |
4321 | DRM_ERROR("DPLL %d failed to lock\n", pipe); |
4321 | DRM_ERROR("DPLL %d failed to lock\n", pipe); |
4322 | 4322 | ||
4323 | intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); |
4323 | intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620); |
4324 | 4324 | ||
4325 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4325 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4326 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4326 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4327 | 4327 | ||
4328 | I915_WRITE(DPLL(pipe), dpll); |
4328 | I915_WRITE(DPLL(pipe), dpll); |
4329 | 4329 | ||
4330 | /* Wait for the clocks to stabilize. */ |
4330 | /* Wait for the clocks to stabilize. */ |
4331 | POSTING_READ(DPLL(pipe)); |
4331 | POSTING_READ(DPLL(pipe)); |
4332 | udelay(150); |
4332 | udelay(150); |
4333 | 4333 | ||
4334 | temp = 0; |
4334 | temp = 0; |
4335 | if (is_sdvo) { |
4335 | if (is_sdvo) { |
4336 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
4336 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
4337 | if (temp > 1) |
4337 | if (temp > 1) |
4338 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
4338 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
4339 | else |
4339 | else |
4340 | temp = 0; |
4340 | temp = 0; |
4341 | } |
4341 | } |
4342 | I915_WRITE(DPLL_MD(pipe), temp); |
4342 | I915_WRITE(DPLL_MD(pipe), temp); |
4343 | POSTING_READ(DPLL_MD(pipe)); |
4343 | POSTING_READ(DPLL_MD(pipe)); |
4344 | 4344 | ||
4345 | /* Now program lane control registers */ |
4345 | /* Now program lane control registers */ |
4346 | if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) |
4346 | if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) |
4347 | || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) |
4347 | || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) |
4348 | { |
4348 | { |
4349 | temp = 0x1000C4; |
4349 | temp = 0x1000C4; |
4350 | if(pipe == 1) |
4350 | if(pipe == 1) |
4351 | temp |= (1 << 21); |
4351 | temp |= (1 << 21); |
4352 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); |
4352 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp); |
4353 | } |
4353 | } |
4354 | if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) |
4354 | if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP)) |
4355 | { |
4355 | { |
4356 | temp = 0x1000C4; |
4356 | temp = 0x1000C4; |
4357 | if(pipe == 1) |
4357 | if(pipe == 1) |
4358 | temp |= (1 << 21); |
4358 | temp |= (1 << 21); |
4359 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); |
4359 | intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); |
4360 | } |
4360 | } |
4361 | 4361 | ||
4362 | mutex_unlock(&dev_priv->dpio_lock); |
4362 | mutex_unlock(&dev_priv->dpio_lock); |
4363 | } |
4363 | } |
4364 | 4364 | ||
4365 | static void i9xx_update_pll(struct drm_crtc *crtc, |
4365 | static void i9xx_update_pll(struct drm_crtc *crtc, |
4366 | struct drm_display_mode *mode, |
4366 | struct drm_display_mode *mode, |
4367 | struct drm_display_mode *adjusted_mode, |
4367 | struct drm_display_mode *adjusted_mode, |
4368 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4368 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4369 | int num_connectors) |
4369 | int num_connectors) |
4370 | { |
4370 | { |
4371 | struct drm_device *dev = crtc->dev; |
4371 | struct drm_device *dev = crtc->dev; |
4372 | struct drm_i915_private *dev_priv = dev->dev_private; |
4372 | struct drm_i915_private *dev_priv = dev->dev_private; |
4373 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4373 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4374 | struct intel_encoder *encoder; |
4374 | struct intel_encoder *encoder; |
4375 | int pipe = intel_crtc->pipe; |
4375 | int pipe = intel_crtc->pipe; |
4376 | u32 dpll; |
4376 | u32 dpll; |
4377 | bool is_sdvo; |
4377 | bool is_sdvo; |
4378 | 4378 | ||
4379 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
4379 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
4380 | 4380 | ||
4381 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4381 | is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
4382 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4382 | intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4383 | 4383 | ||
4384 | dpll = DPLL_VGA_MODE_DIS; |
4384 | dpll = DPLL_VGA_MODE_DIS; |
4385 | 4385 | ||
4386 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
4386 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
4387 | dpll |= DPLLB_MODE_LVDS; |
4387 | dpll |= DPLLB_MODE_LVDS; |
4388 | else |
4388 | else |
4389 | dpll |= DPLLB_MODE_DAC_SERIAL; |
4389 | dpll |= DPLLB_MODE_DAC_SERIAL; |
4390 | if (is_sdvo) { |
4390 | if (is_sdvo) { |
4391 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
4391 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
4392 | if (pixel_multiplier > 1) { |
4392 | if (pixel_multiplier > 1) { |
4393 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4393 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
4394 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
4394 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
4395 | } |
4395 | } |
4396 | dpll |= DPLL_DVO_HIGH_SPEED; |
4396 | dpll |= DPLL_DVO_HIGH_SPEED; |
4397 | } |
4397 | } |
4398 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4398 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4399 | dpll |= DPLL_DVO_HIGH_SPEED; |
4399 | dpll |= DPLL_DVO_HIGH_SPEED; |
4400 | 4400 | ||
4401 | /* compute bitmask from p1 value */ |
4401 | /* compute bitmask from p1 value */ |
4402 | if (IS_PINEVIEW(dev)) |
4402 | if (IS_PINEVIEW(dev)) |
4403 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
4403 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
4404 | else { |
4404 | else { |
4405 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4405 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4406 | if (IS_G4X(dev) && reduced_clock) |
4406 | if (IS_G4X(dev) && reduced_clock) |
4407 | dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
4407 | dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
4408 | } |
4408 | } |
4409 | switch (clock->p2) { |
4409 | switch (clock->p2) { |
4410 | case 5: |
4410 | case 5: |
4411 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
4411 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
4412 | break; |
4412 | break; |
4413 | case 7: |
4413 | case 7: |
4414 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
4414 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
4415 | break; |
4415 | break; |
4416 | case 10: |
4416 | case 10: |
4417 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
4417 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
4418 | break; |
4418 | break; |
4419 | case 14: |
4419 | case 14: |
4420 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
4420 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
4421 | break; |
4421 | break; |
4422 | } |
4422 | } |
4423 | if (INTEL_INFO(dev)->gen >= 4) |
4423 | if (INTEL_INFO(dev)->gen >= 4) |
4424 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
4424 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
4425 | 4425 | ||
4426 | if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4426 | if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4427 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
4427 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
4428 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4428 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4429 | /* XXX: just matching BIOS for now */ |
4429 | /* XXX: just matching BIOS for now */ |
4430 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
4430 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
4431 | dpll |= 3; |
4431 | dpll |= 3; |
4432 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4432 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4433 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
4433 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
4434 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
4434 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
4435 | else |
4435 | else |
4436 | dpll |= PLL_REF_INPUT_DREFCLK; |
4436 | dpll |= PLL_REF_INPUT_DREFCLK; |
4437 | 4437 | ||
4438 | dpll |= DPLL_VCO_ENABLE; |
4438 | dpll |= DPLL_VCO_ENABLE; |
4439 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
4439 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
4440 | POSTING_READ(DPLL(pipe)); |
4440 | POSTING_READ(DPLL(pipe)); |
4441 | udelay(150); |
4441 | udelay(150); |
4442 | 4442 | ||
4443 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4443 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4444 | if (encoder->pre_pll_enable) |
4444 | if (encoder->pre_pll_enable) |
4445 | encoder->pre_pll_enable(encoder); |
4445 | encoder->pre_pll_enable(encoder); |
4446 | 4446 | ||
4447 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4447 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
4448 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4448 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4449 | 4449 | ||
4450 | I915_WRITE(DPLL(pipe), dpll); |
4450 | I915_WRITE(DPLL(pipe), dpll); |
4451 | 4451 | ||
4452 | /* Wait for the clocks to stabilize. */ |
4452 | /* Wait for the clocks to stabilize. */ |
4453 | POSTING_READ(DPLL(pipe)); |
4453 | POSTING_READ(DPLL(pipe)); |
4454 | udelay(150); |
4454 | udelay(150); |
4455 | 4455 | ||
4456 | if (INTEL_INFO(dev)->gen >= 4) { |
4456 | if (INTEL_INFO(dev)->gen >= 4) { |
4457 | u32 temp = 0; |
4457 | u32 temp = 0; |
4458 | if (is_sdvo) { |
4458 | if (is_sdvo) { |
4459 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
4459 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
4460 | if (temp > 1) |
4460 | if (temp > 1) |
4461 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
4461 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
4462 | else |
4462 | else |
4463 | temp = 0; |
4463 | temp = 0; |
4464 | } |
4464 | } |
4465 | I915_WRITE(DPLL_MD(pipe), temp); |
4465 | I915_WRITE(DPLL_MD(pipe), temp); |
4466 | } else { |
4466 | } else { |
4467 | /* The pixel multiplier can only be updated once the |
4467 | /* The pixel multiplier can only be updated once the |
4468 | * DPLL is enabled and the clocks are stable. |
4468 | * DPLL is enabled and the clocks are stable. |
4469 | * |
4469 | * |
4470 | * So write it again. |
4470 | * So write it again. |
4471 | */ |
4471 | */ |
4472 | I915_WRITE(DPLL(pipe), dpll); |
4472 | I915_WRITE(DPLL(pipe), dpll); |
4473 | } |
4473 | } |
4474 | } |
4474 | } |
4475 | 4475 | ||
4476 | static void i8xx_update_pll(struct drm_crtc *crtc, |
4476 | static void i8xx_update_pll(struct drm_crtc *crtc, |
4477 | struct drm_display_mode *adjusted_mode, |
4477 | struct drm_display_mode *adjusted_mode, |
4478 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4478 | intel_clock_t *clock, intel_clock_t *reduced_clock, |
4479 | int num_connectors) |
4479 | int num_connectors) |
4480 | { |
4480 | { |
4481 | struct drm_device *dev = crtc->dev; |
4481 | struct drm_device *dev = crtc->dev; |
4482 | struct drm_i915_private *dev_priv = dev->dev_private; |
4482 | struct drm_i915_private *dev_priv = dev->dev_private; |
4483 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4483 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4484 | struct intel_encoder *encoder; |
4484 | struct intel_encoder *encoder; |
4485 | int pipe = intel_crtc->pipe; |
4485 | int pipe = intel_crtc->pipe; |
4486 | u32 dpll; |
4486 | u32 dpll; |
4487 | 4487 | ||
4488 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
4488 | i9xx_update_pll_dividers(crtc, clock, reduced_clock); |
4489 | 4489 | ||
4490 | dpll = DPLL_VGA_MODE_DIS; |
4490 | dpll = DPLL_VGA_MODE_DIS; |
4491 | 4491 | ||
4492 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
4492 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
4493 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4493 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4494 | } else { |
4494 | } else { |
4495 | if (clock->p1 == 2) |
4495 | if (clock->p1 == 2) |
4496 | dpll |= PLL_P1_DIVIDE_BY_TWO; |
4496 | dpll |= PLL_P1_DIVIDE_BY_TWO; |
4497 | else |
4497 | else |
4498 | dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4498 | dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
4499 | if (clock->p2 == 4) |
4499 | if (clock->p2 == 4) |
4500 | dpll |= PLL_P2_DIVIDE_BY_4; |
4500 | dpll |= PLL_P2_DIVIDE_BY_4; |
4501 | } |
4501 | } |
4502 | 4502 | ||
4503 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4503 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) |
4504 | /* XXX: just matching BIOS for now */ |
4504 | /* XXX: just matching BIOS for now */ |
4505 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
4505 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
4506 | dpll |= 3; |
4506 | dpll |= 3; |
4507 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4507 | else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
4508 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
4508 | intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
4509 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
4509 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
4510 | else |
4510 | else |
4511 | dpll |= PLL_REF_INPUT_DREFCLK; |
4511 | dpll |= PLL_REF_INPUT_DREFCLK; |
4512 | 4512 | ||
4513 | dpll |= DPLL_VCO_ENABLE; |
4513 | dpll |= DPLL_VCO_ENABLE; |
4514 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
4514 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
4515 | POSTING_READ(DPLL(pipe)); |
4515 | POSTING_READ(DPLL(pipe)); |
4516 | udelay(150); |
4516 | udelay(150); |
4517 | 4517 | ||
4518 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4518 | for_each_encoder_on_crtc(dev, crtc, encoder) |
4519 | if (encoder->pre_pll_enable) |
4519 | if (encoder->pre_pll_enable) |
4520 | encoder->pre_pll_enable(encoder); |
4520 | encoder->pre_pll_enable(encoder); |
4521 | 4521 | ||
4522 | I915_WRITE(DPLL(pipe), dpll); |
4522 | I915_WRITE(DPLL(pipe), dpll); |
4523 | 4523 | ||
4524 | /* Wait for the clocks to stabilize. */ |
4524 | /* Wait for the clocks to stabilize. */ |
4525 | POSTING_READ(DPLL(pipe)); |
4525 | POSTING_READ(DPLL(pipe)); |
4526 | udelay(150); |
4526 | udelay(150); |
4527 | 4527 | ||
4528 | /* The pixel multiplier can only be updated once the |
4528 | /* The pixel multiplier can only be updated once the |
4529 | * DPLL is enabled and the clocks are stable. |
4529 | * DPLL is enabled and the clocks are stable. |
4530 | * |
4530 | * |
4531 | * So write it again. |
4531 | * So write it again. |
4532 | */ |
4532 | */ |
4533 | I915_WRITE(DPLL(pipe), dpll); |
4533 | I915_WRITE(DPLL(pipe), dpll); |
4534 | } |
4534 | } |
4535 | 4535 | ||
4536 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, |
4536 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, |
4537 | struct drm_display_mode *mode, |
4537 | struct drm_display_mode *mode, |
4538 | struct drm_display_mode *adjusted_mode) |
4538 | struct drm_display_mode *adjusted_mode) |
4539 | { |
4539 | { |
4540 | struct drm_device *dev = intel_crtc->base.dev; |
4540 | struct drm_device *dev = intel_crtc->base.dev; |
4541 | struct drm_i915_private *dev_priv = dev->dev_private; |
4541 | struct drm_i915_private *dev_priv = dev->dev_private; |
4542 | enum pipe pipe = intel_crtc->pipe; |
4542 | enum pipe pipe = intel_crtc->pipe; |
4543 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
4543 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
4544 | uint32_t vsyncshift; |
4544 | uint32_t vsyncshift; |
4545 | 4545 | ||
4546 | if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
4546 | if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
4547 | /* the chip adds 2 halflines automatically */ |
4547 | /* the chip adds 2 halflines automatically */ |
4548 | adjusted_mode->crtc_vtotal -= 1; |
4548 | adjusted_mode->crtc_vtotal -= 1; |
4549 | adjusted_mode->crtc_vblank_end -= 1; |
4549 | adjusted_mode->crtc_vblank_end -= 1; |
4550 | vsyncshift = adjusted_mode->crtc_hsync_start |
4550 | vsyncshift = adjusted_mode->crtc_hsync_start |
4551 | - adjusted_mode->crtc_htotal / 2; |
4551 | - adjusted_mode->crtc_htotal / 2; |
4552 | } else { |
4552 | } else { |
4553 | vsyncshift = 0; |
4553 | vsyncshift = 0; |
4554 | } |
4554 | } |
4555 | 4555 | ||
4556 | if (INTEL_INFO(dev)->gen > 3) |
4556 | if (INTEL_INFO(dev)->gen > 3) |
4557 | I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); |
4557 | I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); |
4558 | 4558 | ||
4559 | I915_WRITE(HTOTAL(cpu_transcoder), |
4559 | I915_WRITE(HTOTAL(cpu_transcoder), |
4560 | (adjusted_mode->crtc_hdisplay - 1) | |
4560 | (adjusted_mode->crtc_hdisplay - 1) | |
4561 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
4561 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
4562 | I915_WRITE(HBLANK(cpu_transcoder), |
4562 | I915_WRITE(HBLANK(cpu_transcoder), |
4563 | (adjusted_mode->crtc_hblank_start - 1) | |
4563 | (adjusted_mode->crtc_hblank_start - 1) | |
4564 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
4564 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
4565 | I915_WRITE(HSYNC(cpu_transcoder), |
4565 | I915_WRITE(HSYNC(cpu_transcoder), |
4566 | (adjusted_mode->crtc_hsync_start - 1) | |
4566 | (adjusted_mode->crtc_hsync_start - 1) | |
4567 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
4567 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
4568 | 4568 | ||
4569 | I915_WRITE(VTOTAL(cpu_transcoder), |
4569 | I915_WRITE(VTOTAL(cpu_transcoder), |
4570 | (adjusted_mode->crtc_vdisplay - 1) | |
4570 | (adjusted_mode->crtc_vdisplay - 1) | |
4571 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
4571 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
4572 | I915_WRITE(VBLANK(cpu_transcoder), |
4572 | I915_WRITE(VBLANK(cpu_transcoder), |
4573 | (adjusted_mode->crtc_vblank_start - 1) | |
4573 | (adjusted_mode->crtc_vblank_start - 1) | |
4574 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
4574 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
4575 | I915_WRITE(VSYNC(cpu_transcoder), |
4575 | I915_WRITE(VSYNC(cpu_transcoder), |
4576 | (adjusted_mode->crtc_vsync_start - 1) | |
4576 | (adjusted_mode->crtc_vsync_start - 1) | |
4577 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
4577 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
4578 | 4578 | ||
4579 | /* Workaround: when the EDP input selection is B, the VTOTAL_B must be |
4579 | /* Workaround: when the EDP input selection is B, the VTOTAL_B must be |
4580 | * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is |
4580 | * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is |
4581 | * documented on the DDI_FUNC_CTL register description, EDP Input Select |
4581 | * documented on the DDI_FUNC_CTL register description, EDP Input Select |
4582 | * bits. */ |
4582 | * bits. */ |
4583 | if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && |
4583 | if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && |
4584 | (pipe == PIPE_B || pipe == PIPE_C)) |
4584 | (pipe == PIPE_B || pipe == PIPE_C)) |
4585 | I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); |
4585 | I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); |
4586 | 4586 | ||
4587 | /* pipesrc controls the size that is scaled from, which should |
4587 | /* pipesrc controls the size that is scaled from, which should |
4588 | * always be the user's requested size. |
4588 | * always be the user's requested size. |
4589 | */ |
4589 | */ |
4590 | I915_WRITE(PIPESRC(pipe), |
4590 | I915_WRITE(PIPESRC(pipe), |
4591 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
4591 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
4592 | } |
4592 | } |
4593 | 4593 | ||
4594 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4594 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4595 | struct drm_display_mode *mode, |
4595 | struct drm_display_mode *mode, |
4596 | struct drm_display_mode *adjusted_mode, |
4596 | struct drm_display_mode *adjusted_mode, |
4597 | int x, int y, |
4597 | int x, int y, |
4598 | struct drm_framebuffer *fb) |
4598 | struct drm_framebuffer *fb) |
4599 | { |
4599 | { |
4600 | struct drm_device *dev = crtc->dev; |
4600 | struct drm_device *dev = crtc->dev; |
4601 | struct drm_i915_private *dev_priv = dev->dev_private; |
4601 | struct drm_i915_private *dev_priv = dev->dev_private; |
4602 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4602 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4603 | int pipe = intel_crtc->pipe; |
4603 | int pipe = intel_crtc->pipe; |
4604 | int plane = intel_crtc->plane; |
4604 | int plane = intel_crtc->plane; |
4605 | int refclk, num_connectors = 0; |
4605 | int refclk, num_connectors = 0; |
4606 | intel_clock_t clock, reduced_clock; |
4606 | intel_clock_t clock, reduced_clock; |
4607 | u32 dspcntr, pipeconf; |
4607 | u32 dspcntr, pipeconf; |
4608 | bool ok, has_reduced_clock = false, is_sdvo = false; |
4608 | bool ok, has_reduced_clock = false, is_sdvo = false; |
4609 | bool is_lvds = false, is_tv = false, is_dp = false; |
4609 | bool is_lvds = false, is_tv = false, is_dp = false; |
4610 | struct intel_encoder *encoder; |
4610 | struct intel_encoder *encoder; |
4611 | const intel_limit_t *limit; |
4611 | const intel_limit_t *limit; |
4612 | int ret; |
4612 | int ret; |
4613 | 4613 | ||
4614 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
4614 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
4615 | switch (encoder->type) { |
4615 | switch (encoder->type) { |
4616 | case INTEL_OUTPUT_LVDS: |
4616 | case INTEL_OUTPUT_LVDS: |
4617 | is_lvds = true; |
4617 | is_lvds = true; |
4618 | break; |
4618 | break; |
4619 | case INTEL_OUTPUT_SDVO: |
4619 | case INTEL_OUTPUT_SDVO: |
4620 | case INTEL_OUTPUT_HDMI: |
4620 | case INTEL_OUTPUT_HDMI: |
4621 | is_sdvo = true; |
4621 | is_sdvo = true; |
4622 | if (encoder->needs_tv_clock) |
4622 | if (encoder->needs_tv_clock) |
4623 | is_tv = true; |
4623 | is_tv = true; |
4624 | break; |
4624 | break; |
4625 | case INTEL_OUTPUT_TVOUT: |
4625 | case INTEL_OUTPUT_TVOUT: |
4626 | is_tv = true; |
4626 | is_tv = true; |
4627 | break; |
4627 | break; |
4628 | case INTEL_OUTPUT_DISPLAYPORT: |
4628 | case INTEL_OUTPUT_DISPLAYPORT: |
4629 | is_dp = true; |
4629 | is_dp = true; |
4630 | break; |
4630 | break; |
4631 | } |
4631 | } |
4632 | 4632 | ||
4633 | num_connectors++; |
4633 | num_connectors++; |
4634 | } |
4634 | } |
4635 | 4635 | ||
4636 | refclk = i9xx_get_refclk(crtc, num_connectors); |
4636 | refclk = i9xx_get_refclk(crtc, num_connectors); |
4637 | 4637 | ||
4638 | /* |
4638 | /* |
4639 | * Returns a set of divisors for the desired target clock with the given |
4639 | * Returns a set of divisors for the desired target clock with the given |
4640 | * refclk, or FALSE. The returned values represent the clock equation: |
4640 | * refclk, or FALSE. The returned values represent the clock equation: |
4641 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
4641 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
4642 | */ |
4642 | */ |
4643 | limit = intel_limit(crtc, refclk); |
4643 | limit = intel_limit(crtc, refclk); |
4644 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
4644 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
4645 | &clock); |
4645 | &clock); |
4646 | if (!ok) { |
4646 | if (!ok) { |
4647 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
4647 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
4648 | return -EINVAL; |
4648 | return -EINVAL; |
4649 | } |
4649 | } |
4650 | 4650 | ||
4651 | /* Ensure that the cursor is valid for the new mode before changing... */ |
4651 | /* Ensure that the cursor is valid for the new mode before changing... */ |
4652 | // intel_crtc_update_cursor(crtc, true); |
4652 | // intel_crtc_update_cursor(crtc, true); |
4653 | 4653 | ||
4654 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
4654 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
4655 | /* |
4655 | /* |
4656 | * Ensure we match the reduced clock's P to the target clock. |
4656 | * Ensure we match the reduced clock's P to the target clock. |
4657 | * If the clocks don't match, we can't switch the display clock |
4657 | * If the clocks don't match, we can't switch the display clock |
4658 | * by using the FP0/FP1. In such case we will disable the LVDS |
4658 | * by using the FP0/FP1. In such case we will disable the LVDS |
4659 | * downclock feature. |
4659 | * downclock feature. |
4660 | */ |
4660 | */ |
4661 | has_reduced_clock = limit->find_pll(limit, crtc, |
4661 | has_reduced_clock = limit->find_pll(limit, crtc, |
4662 | dev_priv->lvds_downclock, |
4662 | dev_priv->lvds_downclock, |
4663 | refclk, |
4663 | refclk, |
4664 | &clock, |
4664 | &clock, |
4665 | &reduced_clock); |
4665 | &reduced_clock); |
4666 | } |
4666 | } |
4667 | 4667 | ||
4668 | if (is_sdvo && is_tv) |
4668 | if (is_sdvo && is_tv) |
4669 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); |
4669 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); |
4670 | 4670 | ||
4671 | if (IS_GEN2(dev)) |
4671 | if (IS_GEN2(dev)) |
4672 | i8xx_update_pll(crtc, adjusted_mode, &clock, |
4672 | i8xx_update_pll(crtc, adjusted_mode, &clock, |
4673 | has_reduced_clock ? &reduced_clock : NULL, |
4673 | has_reduced_clock ? &reduced_clock : NULL, |
4674 | num_connectors); |
4674 | num_connectors); |
4675 | else if (IS_VALLEYVIEW(dev)) |
4675 | else if (IS_VALLEYVIEW(dev)) |
4676 | vlv_update_pll(crtc, mode, adjusted_mode, &clock, |
4676 | vlv_update_pll(crtc, mode, adjusted_mode, &clock, |
4677 | has_reduced_clock ? &reduced_clock : NULL, |
4677 | has_reduced_clock ? &reduced_clock : NULL, |
4678 | num_connectors); |
4678 | num_connectors); |
4679 | else |
4679 | else |
4680 | i9xx_update_pll(crtc, mode, adjusted_mode, &clock, |
4680 | i9xx_update_pll(crtc, mode, adjusted_mode, &clock, |
4681 | has_reduced_clock ? &reduced_clock : NULL, |
4681 | has_reduced_clock ? &reduced_clock : NULL, |
4682 | num_connectors); |
4682 | num_connectors); |
4683 | 4683 | ||
4684 | /* setup pipeconf */ |
4684 | /* setup pipeconf */ |
4685 | pipeconf = I915_READ(PIPECONF(pipe)); |
4685 | pipeconf = I915_READ(PIPECONF(pipe)); |
4686 | 4686 | ||
4687 | /* Set up the display plane register */ |
4687 | /* Set up the display plane register */ |
4688 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
4688 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
4689 | 4689 | ||
4690 | if (pipe == 0) |
4690 | if (pipe == 0) |
4691 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
4691 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
4692 | else |
4692 | else |
4693 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
4693 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
4694 | 4694 | ||
4695 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
4695 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
4696 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
4696 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
4697 | * core speed. |
4697 | * core speed. |
4698 | * |
4698 | * |
4699 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
4699 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
4700 | * pipe == 0 check? |
4700 | * pipe == 0 check? |
4701 | */ |
4701 | */ |
4702 | if (mode->clock > |
4702 | if (mode->clock > |
4703 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
4703 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
4704 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
4704 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
4705 | else |
4705 | else |
4706 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
4706 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
4707 | } |
4707 | } |
4708 | 4708 | ||
4709 | /* default to 8bpc */ |
4709 | /* default to 8bpc */ |
4710 | pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); |
4710 | pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); |
4711 | if (is_dp) { |
4711 | if (is_dp) { |
4712 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4712 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4713 | pipeconf |= PIPECONF_6BPC | |
4713 | pipeconf |= PIPECONF_6BPC | |
4714 | PIPECONF_DITHER_EN | |
4714 | PIPECONF_DITHER_EN | |
4715 | PIPECONF_DITHER_TYPE_SP; |
4715 | PIPECONF_DITHER_TYPE_SP; |
4716 | } |
4716 | } |
4717 | } |
4717 | } |
4718 | 4718 | ||
4719 | if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
4719 | if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
4720 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4720 | if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4721 | pipeconf |= PIPECONF_6BPC | |
4721 | pipeconf |= PIPECONF_6BPC | |
4722 | PIPECONF_ENABLE | |
4722 | PIPECONF_ENABLE | |
4723 | I965_PIPECONF_ACTIVE; |
4723 | I965_PIPECONF_ACTIVE; |
4724 | } |
4724 | } |
4725 | } |
4725 | } |
4726 | 4726 | ||
4727 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
4727 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
4728 | drm_mode_debug_printmodeline(mode); |
4728 | drm_mode_debug_printmodeline(mode); |
4729 | 4729 | ||
4730 | if (HAS_PIPE_CXSR(dev)) { |
4730 | if (HAS_PIPE_CXSR(dev)) { |
4731 | if (intel_crtc->lowfreq_avail) { |
4731 | if (intel_crtc->lowfreq_avail) { |
4732 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
4732 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
4733 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
4733 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
4734 | } else { |
4734 | } else { |
4735 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
4735 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
4736 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
4736 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
4737 | } |
4737 | } |
4738 | } |
4738 | } |
4739 | 4739 | ||
4740 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
4740 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
4741 | if (!IS_GEN2(dev) && |
4741 | if (!IS_GEN2(dev) && |
4742 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
4742 | adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
4743 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
4743 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
4744 | else |
4744 | else |
4745 | pipeconf |= PIPECONF_PROGRESSIVE; |
4745 | pipeconf |= PIPECONF_PROGRESSIVE; |
4746 | 4746 | ||
4747 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
4747 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
4748 | 4748 | ||
4749 | /* pipesrc and dspsize control the size that is scaled from, |
4749 | /* pipesrc and dspsize control the size that is scaled from, |
4750 | * which should always be the user's requested size. |
4750 | * which should always be the user's requested size. |
4751 | */ |
4751 | */ |
4752 | I915_WRITE(DSPSIZE(plane), |
4752 | I915_WRITE(DSPSIZE(plane), |
4753 | ((mode->vdisplay - 1) << 16) | |
4753 | ((mode->vdisplay - 1) << 16) | |
4754 | (mode->hdisplay - 1)); |
4754 | (mode->hdisplay - 1)); |
4755 | I915_WRITE(DSPPOS(plane), 0); |
4755 | I915_WRITE(DSPPOS(plane), 0); |
4756 | 4756 | ||
4757 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4757 | I915_WRITE(PIPECONF(pipe), pipeconf); |
4758 | POSTING_READ(PIPECONF(pipe)); |
4758 | POSTING_READ(PIPECONF(pipe)); |
4759 | intel_enable_pipe(dev_priv, pipe, false); |
4759 | intel_enable_pipe(dev_priv, pipe, false); |
4760 | 4760 | ||
4761 | intel_wait_for_vblank(dev, pipe); |
4761 | intel_wait_for_vblank(dev, pipe); |
4762 | 4762 | ||
4763 | I915_WRITE(DSPCNTR(plane), dspcntr); |
4763 | I915_WRITE(DSPCNTR(plane), dspcntr); |
4764 | POSTING_READ(DSPCNTR(plane)); |
4764 | POSTING_READ(DSPCNTR(plane)); |
4765 | 4765 | ||
4766 | ret = intel_pipe_set_base(crtc, x, y, fb); |
4766 | ret = intel_pipe_set_base(crtc, x, y, fb); |
4767 | 4767 | ||
4768 | intel_update_watermarks(dev); |
4768 | intel_update_watermarks(dev); |
4769 | 4769 | ||
4770 | return ret; |
4770 | return ret; |
4771 | } |
4771 | } |
4772 | 4772 | ||
4773 | static void ironlake_init_pch_refclk(struct drm_device *dev) |
4773 | static void ironlake_init_pch_refclk(struct drm_device *dev) |
4774 | { |
4774 | { |
4775 | struct drm_i915_private *dev_priv = dev->dev_private; |
4775 | struct drm_i915_private *dev_priv = dev->dev_private; |
4776 | struct drm_mode_config *mode_config = &dev->mode_config; |
4776 | struct drm_mode_config *mode_config = &dev->mode_config; |
4777 | struct intel_encoder *encoder; |
4777 | struct intel_encoder *encoder; |
4778 | u32 temp; |
4778 | u32 temp; |
4779 | bool has_lvds = false; |
4779 | bool has_lvds = false; |
4780 | bool has_cpu_edp = false; |
4780 | bool has_cpu_edp = false; |
4781 | bool has_pch_edp = false; |
4781 | bool has_pch_edp = false; |
4782 | bool has_panel = false; |
4782 | bool has_panel = false; |
4783 | bool has_ck505 = false; |
4783 | bool has_ck505 = false; |
4784 | bool can_ssc = false; |
4784 | bool can_ssc = false; |
4785 | 4785 | ||
4786 | /* We need to take the global config into account */ |
4786 | /* We need to take the global config into account */ |
4787 | list_for_each_entry(encoder, &mode_config->encoder_list, |
4787 | list_for_each_entry(encoder, &mode_config->encoder_list, |
4788 | base.head) { |
4788 | base.head) { |
4789 | switch (encoder->type) { |
4789 | switch (encoder->type) { |
4790 | case INTEL_OUTPUT_LVDS: |
4790 | case INTEL_OUTPUT_LVDS: |
4791 | has_panel = true; |
4791 | has_panel = true; |
4792 | has_lvds = true; |
4792 | has_lvds = true; |
4793 | break; |
4793 | break; |
4794 | case INTEL_OUTPUT_EDP: |
4794 | case INTEL_OUTPUT_EDP: |
4795 | has_panel = true; |
4795 | has_panel = true; |
4796 | if (intel_encoder_is_pch_edp(&encoder->base)) |
4796 | if (intel_encoder_is_pch_edp(&encoder->base)) |
4797 | has_pch_edp = true; |
4797 | has_pch_edp = true; |
4798 | else |
4798 | else |
4799 | has_cpu_edp = true; |
4799 | has_cpu_edp = true; |
4800 | break; |
4800 | break; |
4801 | } |
4801 | } |
4802 | } |
4802 | } |
4803 | 4803 | ||
4804 | if (HAS_PCH_IBX(dev)) { |
4804 | if (HAS_PCH_IBX(dev)) { |
4805 | has_ck505 = dev_priv->display_clock_mode; |
4805 | has_ck505 = dev_priv->display_clock_mode; |
4806 | can_ssc = has_ck505; |
4806 | can_ssc = has_ck505; |
4807 | } else { |
4807 | } else { |
4808 | has_ck505 = false; |
4808 | has_ck505 = false; |
4809 | can_ssc = true; |
4809 | can_ssc = true; |
4810 | } |
4810 | } |
4811 | 4811 | ||
4812 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", |
4812 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", |
4813 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, |
4813 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, |
4814 | has_ck505); |
4814 | has_ck505); |
4815 | 4815 | ||
4816 | /* Ironlake: try to setup display ref clock before DPLL |
4816 | /* Ironlake: try to setup display ref clock before DPLL |
4817 | * enabling. This is only under driver's control after |
4817 | * enabling. This is only under driver's control after |
4818 | * PCH B stepping, previous chipset stepping should be |
4818 | * PCH B stepping, previous chipset stepping should be |
4819 | * ignoring this setting. |
4819 | * ignoring this setting. |
4820 | */ |
4820 | */ |
4821 | temp = I915_READ(PCH_DREF_CONTROL); |
4821 | temp = I915_READ(PCH_DREF_CONTROL); |
4822 | /* Always enable nonspread source */ |
4822 | /* Always enable nonspread source */ |
4823 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
4823 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
4824 | 4824 | ||
4825 | if (has_ck505) |
4825 | if (has_ck505) |
4826 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
4826 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
4827 | else |
4827 | else |
4828 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; |
4828 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; |
4829 | 4829 | ||
4830 | if (has_panel) { |
4830 | if (has_panel) { |
4831 | temp &= ~DREF_SSC_SOURCE_MASK; |
4831 | temp &= ~DREF_SSC_SOURCE_MASK; |
4832 | temp |= DREF_SSC_SOURCE_ENABLE; |
4832 | temp |= DREF_SSC_SOURCE_ENABLE; |
4833 | 4833 | ||
4834 | /* SSC must be turned on before enabling the CPU output */ |
4834 | /* SSC must be turned on before enabling the CPU output */ |
4835 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
4835 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
4836 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
4836 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
4837 | temp |= DREF_SSC1_ENABLE; |
4837 | temp |= DREF_SSC1_ENABLE; |
4838 | } else |
4838 | } else |
4839 | temp &= ~DREF_SSC1_ENABLE; |
4839 | temp &= ~DREF_SSC1_ENABLE; |
4840 | 4840 | ||
4841 | /* Get SSC going before enabling the outputs */ |
4841 | /* Get SSC going before enabling the outputs */ |
4842 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4842 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4843 | POSTING_READ(PCH_DREF_CONTROL); |
4843 | POSTING_READ(PCH_DREF_CONTROL); |
4844 | udelay(200); |
4844 | udelay(200); |
4845 | 4845 | ||
4846 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
4846 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
4847 | 4847 | ||
4848 | /* Enable CPU source on CPU attached eDP */ |
4848 | /* Enable CPU source on CPU attached eDP */ |
4849 | if (has_cpu_edp) { |
4849 | if (has_cpu_edp) { |
4850 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
4850 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
4851 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
4851 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
4852 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
4852 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
4853 | } |
4853 | } |
4854 | else |
4854 | else |
4855 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
4855 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
4856 | } else |
4856 | } else |
4857 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
4857 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
4858 | 4858 | ||
4859 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4859 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4860 | POSTING_READ(PCH_DREF_CONTROL); |
4860 | POSTING_READ(PCH_DREF_CONTROL); |
4861 | udelay(200); |
4861 | udelay(200); |
4862 | } else { |
4862 | } else { |
4863 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); |
4863 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); |
4864 | 4864 | ||
4865 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
4865 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
4866 | 4866 | ||
4867 | /* Turn off CPU output */ |
4867 | /* Turn off CPU output */ |
4868 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
4868 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
4869 | 4869 | ||
4870 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4870 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4871 | POSTING_READ(PCH_DREF_CONTROL); |
4871 | POSTING_READ(PCH_DREF_CONTROL); |
4872 | udelay(200); |
4872 | udelay(200); |
4873 | 4873 | ||
4874 | /* Turn off the SSC source */ |
4874 | /* Turn off the SSC source */ |
4875 | temp &= ~DREF_SSC_SOURCE_MASK; |
4875 | temp &= ~DREF_SSC_SOURCE_MASK; |
4876 | temp |= DREF_SSC_SOURCE_DISABLE; |
4876 | temp |= DREF_SSC_SOURCE_DISABLE; |
4877 | 4877 | ||
4878 | /* Turn off SSC1 */ |
4878 | /* Turn off SSC1 */ |
4879 | temp &= ~ DREF_SSC1_ENABLE; |
4879 | temp &= ~ DREF_SSC1_ENABLE; |
4880 | 4880 | ||
4881 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4881 | I915_WRITE(PCH_DREF_CONTROL, temp); |
4882 | POSTING_READ(PCH_DREF_CONTROL); |
4882 | POSTING_READ(PCH_DREF_CONTROL); |
4883 | udelay(200); |
4883 | udelay(200); |
4884 | } |
4884 | } |
4885 | } |
4885 | } |
4886 | 4886 | ||
4887 | /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ |
4887 | /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ |
4888 | static void lpt_init_pch_refclk(struct drm_device *dev) |
4888 | static void lpt_init_pch_refclk(struct drm_device *dev) |
4889 | { |
4889 | { |
4890 | struct drm_i915_private *dev_priv = dev->dev_private; |
4890 | struct drm_i915_private *dev_priv = dev->dev_private; |
4891 | struct drm_mode_config *mode_config = &dev->mode_config; |
4891 | struct drm_mode_config *mode_config = &dev->mode_config; |
4892 | struct intel_encoder *encoder; |
4892 | struct intel_encoder *encoder; |
4893 | bool has_vga = false; |
4893 | bool has_vga = false; |
4894 | bool is_sdv = false; |
4894 | bool is_sdv = false; |
4895 | u32 tmp; |
4895 | u32 tmp; |
4896 | 4896 | ||
4897 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4897 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4898 | switch (encoder->type) { |
4898 | switch (encoder->type) { |
4899 | case INTEL_OUTPUT_ANALOG: |
4899 | case INTEL_OUTPUT_ANALOG: |
4900 | has_vga = true; |
4900 | has_vga = true; |
4901 | break; |
4901 | break; |
4902 | } |
4902 | } |
4903 | } |
4903 | } |
4904 | 4904 | ||
4905 | if (!has_vga) |
4905 | if (!has_vga) |
4906 | return; |
4906 | return; |
4907 | 4907 | ||
4908 | mutex_lock(&dev_priv->dpio_lock); |
4908 | mutex_lock(&dev_priv->dpio_lock); |
4909 | 4909 | ||
4910 | /* XXX: Rip out SDV support once Haswell ships for real. */ |
4910 | /* XXX: Rip out SDV support once Haswell ships for real. */ |
4911 | if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) |
4911 | if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) |
4912 | is_sdv = true; |
4912 | is_sdv = true; |
4913 | 4913 | ||
4914 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
4914 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
4915 | tmp &= ~SBI_SSCCTL_DISABLE; |
4915 | tmp &= ~SBI_SSCCTL_DISABLE; |
4916 | tmp |= SBI_SSCCTL_PATHALT; |
4916 | tmp |= SBI_SSCCTL_PATHALT; |
4917 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
4917 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
4918 | 4918 | ||
4919 | udelay(24); |
4919 | udelay(24); |
4920 | 4920 | ||
4921 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
4921 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
4922 | tmp &= ~SBI_SSCCTL_PATHALT; |
4922 | tmp &= ~SBI_SSCCTL_PATHALT; |
4923 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
4923 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
4924 | 4924 | ||
4925 | if (!is_sdv) { |
4925 | if (!is_sdv) { |
4926 | tmp = I915_READ(SOUTH_CHICKEN2); |
4926 | tmp = I915_READ(SOUTH_CHICKEN2); |
4927 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; |
4927 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; |
4928 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
4928 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
4929 | 4929 | ||
4930 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & |
4930 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & |
4931 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) |
4931 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) |
4932 | DRM_ERROR("FDI mPHY reset assert timeout\n"); |
4932 | DRM_ERROR("FDI mPHY reset assert timeout\n"); |
4933 | 4933 | ||
4934 | tmp = I915_READ(SOUTH_CHICKEN2); |
4934 | tmp = I915_READ(SOUTH_CHICKEN2); |
4935 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; |
4935 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; |
4936 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
4936 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
4937 | 4937 | ||
4938 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & |
4938 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & |
4939 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, |
4939 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, |
4940 | 100)) |
4940 | 100)) |
4941 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); |
4941 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); |
4942 | } |
4942 | } |
4943 | 4943 | ||
4944 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); |
4944 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); |
4945 | tmp &= ~(0xFF << 24); |
4945 | tmp &= ~(0xFF << 24); |
4946 | tmp |= (0x12 << 24); |
4946 | tmp |= (0x12 << 24); |
4947 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
4947 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
4948 | 4948 | ||
4949 | if (!is_sdv) { |
4949 | if (!is_sdv) { |
4950 | tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY); |
4950 | tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY); |
4951 | tmp &= ~(0x3 << 6); |
4951 | tmp &= ~(0x3 << 6); |
4952 | tmp |= (1 << 6) | (1 << 0); |
4952 | tmp |= (1 << 6) | (1 << 0); |
4953 | intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY); |
4953 | intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY); |
4954 | } |
4954 | } |
4955 | 4955 | ||
4956 | if (is_sdv) { |
4956 | if (is_sdv) { |
4957 | tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); |
4957 | tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); |
4958 | tmp |= 0x7FFF; |
4958 | tmp |= 0x7FFF; |
4959 | intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); |
4959 | intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); |
4960 | } |
4960 | } |
4961 | 4961 | ||
4962 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); |
4962 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); |
4963 | tmp |= (1 << 11); |
4963 | tmp |= (1 << 11); |
4964 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); |
4964 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); |
4965 | 4965 | ||
4966 | tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); |
4966 | tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); |
4967 | tmp |= (1 << 11); |
4967 | tmp |= (1 << 11); |
4968 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); |
4968 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); |
4969 | 4969 | ||
4970 | if (is_sdv) { |
4970 | if (is_sdv) { |
4971 | tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); |
4971 | tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); |
4972 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
4972 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
4973 | intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); |
4973 | intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); |
4974 | 4974 | ||
4975 | tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); |
4975 | tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); |
4976 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
4976 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); |
4977 | intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); |
4977 | intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); |
4978 | 4978 | ||
4979 | tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); |
4979 | tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); |
4980 | tmp |= (0x3F << 8); |
4980 | tmp |= (0x3F << 8); |
4981 | intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); |
4981 | intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); |
4982 | 4982 | ||
4983 | tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); |
4983 | tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); |
4984 | tmp |= (0x3F << 8); |
4984 | tmp |= (0x3F << 8); |
4985 | intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); |
4985 | intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); |
4986 | } |
4986 | } |
4987 | 4987 | ||
4988 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); |
4988 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); |
4989 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
4989 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
4990 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); |
4990 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); |
4991 | 4991 | ||
4992 | tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); |
4992 | tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); |
4993 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
4993 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
4994 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); |
4994 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); |
4995 | 4995 | ||
4996 | if (!is_sdv) { |
4996 | if (!is_sdv) { |
4997 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); |
4997 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); |
4998 | tmp &= ~(7 << 13); |
4998 | tmp &= ~(7 << 13); |
4999 | tmp |= (5 << 13); |
4999 | tmp |= (5 << 13); |
5000 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); |
5000 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); |
5001 | 5001 | ||
5002 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); |
5002 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); |
5003 | tmp &= ~(7 << 13); |
5003 | tmp &= ~(7 << 13); |
5004 | tmp |= (5 << 13); |
5004 | tmp |= (5 << 13); |
5005 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); |
5005 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); |
5006 | } |
5006 | } |
5007 | 5007 | ||
5008 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); |
5008 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); |
5009 | tmp &= ~0xFF; |
5009 | tmp &= ~0xFF; |
5010 | tmp |= 0x1C; |
5010 | tmp |= 0x1C; |
5011 | intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); |
5011 | intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); |
5012 | 5012 | ||
5013 | tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); |
5013 | tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); |
5014 | tmp &= ~0xFF; |
5014 | tmp &= ~0xFF; |
5015 | tmp |= 0x1C; |
5015 | tmp |= 0x1C; |
5016 | intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); |
5016 | intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); |
5017 | 5017 | ||
5018 | tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); |
5018 | tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); |
5019 | tmp &= ~(0xFF << 16); |
5019 | tmp &= ~(0xFF << 16); |
5020 | tmp |= (0x1C << 16); |
5020 | tmp |= (0x1C << 16); |
5021 | intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); |
5021 | intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); |
5022 | 5022 | ||
5023 | tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); |
5023 | tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); |
5024 | tmp &= ~(0xFF << 16); |
5024 | tmp &= ~(0xFF << 16); |
5025 | tmp |= (0x1C << 16); |
5025 | tmp |= (0x1C << 16); |
5026 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); |
5026 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); |
5027 | 5027 | ||
5028 | if (!is_sdv) { |
5028 | if (!is_sdv) { |
5029 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); |
5029 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); |
5030 | tmp |= (1 << 27); |
5030 | tmp |= (1 << 27); |
5031 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); |
5031 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); |
5032 | 5032 | ||
5033 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); |
5033 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); |
5034 | tmp |= (1 << 27); |
5034 | tmp |= (1 << 27); |
5035 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); |
5035 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); |
5036 | 5036 | ||
5037 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); |
5037 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); |
5038 | tmp &= ~(0xF << 28); |
5038 | tmp &= ~(0xF << 28); |
5039 | tmp |= (4 << 28); |
5039 | tmp |= (4 << 28); |
5040 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); |
5040 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); |
5041 | 5041 | ||
5042 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); |
5042 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); |
5043 | tmp &= ~(0xF << 28); |
5043 | tmp &= ~(0xF << 28); |
5044 | tmp |= (4 << 28); |
5044 | tmp |= (4 << 28); |
5045 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); |
5045 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); |
5046 | } |
5046 | } |
5047 | 5047 | ||
5048 | /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ |
5048 | /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ |
5049 | tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); |
5049 | tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); |
5050 | tmp |= SBI_DBUFF0_ENABLE; |
5050 | tmp |= SBI_DBUFF0_ENABLE; |
5051 | intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); |
5051 | intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); |
5052 | 5052 | ||
5053 | mutex_unlock(&dev_priv->dpio_lock); |
5053 | mutex_unlock(&dev_priv->dpio_lock); |
5054 | } |
5054 | } |
5055 | 5055 | ||
5056 | /* |
5056 | /* |
5057 | * Initialize reference clocks when the driver loads |
5057 | * Initialize reference clocks when the driver loads |
5058 | */ |
5058 | */ |
5059 | void intel_init_pch_refclk(struct drm_device *dev) |
5059 | void intel_init_pch_refclk(struct drm_device *dev) |
5060 | { |
5060 | { |
5061 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
5061 | if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
5062 | ironlake_init_pch_refclk(dev); |
5062 | ironlake_init_pch_refclk(dev); |
5063 | else if (HAS_PCH_LPT(dev)) |
5063 | else if (HAS_PCH_LPT(dev)) |
5064 | lpt_init_pch_refclk(dev); |
5064 | lpt_init_pch_refclk(dev); |
5065 | } |
5065 | } |
5066 | 5066 | ||
5067 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
5067 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
5068 | { |
5068 | { |
5069 | struct drm_device *dev = crtc->dev; |
5069 | struct drm_device *dev = crtc->dev; |
5070 | struct drm_i915_private *dev_priv = dev->dev_private; |
5070 | struct drm_i915_private *dev_priv = dev->dev_private; |
5071 | struct intel_encoder *encoder; |
5071 | struct intel_encoder *encoder; |
5072 | struct intel_encoder *edp_encoder = NULL; |
5072 | struct intel_encoder *edp_encoder = NULL; |
5073 | int num_connectors = 0; |
5073 | int num_connectors = 0; |
5074 | bool is_lvds = false; |
5074 | bool is_lvds = false; |
5075 | 5075 | ||
5076 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5076 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5077 | switch (encoder->type) { |
5077 | switch (encoder->type) { |
5078 | case INTEL_OUTPUT_LVDS: |
5078 | case INTEL_OUTPUT_LVDS: |
5079 | is_lvds = true; |
5079 | is_lvds = true; |
5080 | break; |
5080 | break; |
5081 | case INTEL_OUTPUT_EDP: |
5081 | case INTEL_OUTPUT_EDP: |
5082 | edp_encoder = encoder; |
5082 | edp_encoder = encoder; |
5083 | break; |
5083 | break; |
5084 | } |
5084 | } |
5085 | num_connectors++; |
5085 | num_connectors++; |
5086 | } |
5086 | } |
5087 | 5087 | ||
5088 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5088 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5089 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5089 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5090 | dev_priv->lvds_ssc_freq); |
5090 | dev_priv->lvds_ssc_freq); |
5091 | return dev_priv->lvds_ssc_freq * 1000; |
5091 | return dev_priv->lvds_ssc_freq * 1000; |
5092 | } |
5092 | } |
5093 | 5093 | ||
5094 | return 120000; |
5094 | return 120000; |
5095 | } |
5095 | } |
5096 | 5096 | ||
5097 | static void ironlake_set_pipeconf(struct drm_crtc *crtc, |
5097 | static void ironlake_set_pipeconf(struct drm_crtc *crtc, |
5098 | struct drm_display_mode *adjusted_mode, |
5098 | struct drm_display_mode *adjusted_mode, |
5099 | bool dither) |
5099 | bool dither) |
5100 | { |
5100 | { |
5101 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
5101 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
5102 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5102 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5103 | int pipe = intel_crtc->pipe; |
5103 | int pipe = intel_crtc->pipe; |
5104 | uint32_t val; |
5104 | uint32_t val; |
5105 | 5105 | ||
5106 | val = I915_READ(PIPECONF(pipe)); |
5106 | val = I915_READ(PIPECONF(pipe)); |
5107 | 5107 | ||
5108 | val &= ~PIPECONF_BPC_MASK; |
5108 | val &= ~PIPECONF_BPC_MASK; |
5109 | switch (intel_crtc->bpp) { |
5109 | switch (intel_crtc->bpp) { |
5110 | case 18: |
5110 | case 18: |
5111 | val |= PIPECONF_6BPC; |
5111 | val |= PIPECONF_6BPC; |
5112 | break; |
5112 | break; |
5113 | case 24: |
5113 | case 24: |
5114 | val |= PIPECONF_8BPC; |
5114 | val |= PIPECONF_8BPC; |
5115 | break; |
5115 | break; |
5116 | case 30: |
5116 | case 30: |
5117 | val |= PIPECONF_10BPC; |
5117 | val |= PIPECONF_10BPC; |
5118 | break; |
5118 | break; |
5119 | case 36: |
5119 | case 36: |
5120 | val |= PIPECONF_12BPC; |
5120 | val |= PIPECONF_12BPC; |
5121 | break; |
5121 | break; |
5122 | default: |
5122 | default: |
5123 | /* Case prevented by intel_choose_pipe_bpp_dither. */ |
5123 | /* Case prevented by intel_choose_pipe_bpp_dither. */ |
5124 | BUG(); |
5124 | BUG(); |
5125 | } |
5125 | } |
5126 | 5126 | ||
5127 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
5127 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
5128 | if (dither) |
5128 | if (dither) |
5129 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
5129 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
5130 | 5130 | ||
5131 | val &= ~PIPECONF_INTERLACE_MASK; |
5131 | val &= ~PIPECONF_INTERLACE_MASK; |
5132 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
5132 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
5133 | val |= PIPECONF_INTERLACED_ILK; |
5133 | val |= PIPECONF_INTERLACED_ILK; |
5134 | else |
5134 | else |
5135 | val |= PIPECONF_PROGRESSIVE; |
5135 | val |= PIPECONF_PROGRESSIVE; |
5136 | 5136 | ||
5137 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5137 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5138 | val |= PIPECONF_COLOR_RANGE_SELECT; |
5138 | val |= PIPECONF_COLOR_RANGE_SELECT; |
5139 | else |
5139 | else |
5140 | val &= ~PIPECONF_COLOR_RANGE_SELECT; |
5140 | val &= ~PIPECONF_COLOR_RANGE_SELECT; |
5141 | 5141 | ||
5142 | I915_WRITE(PIPECONF(pipe), val); |
5142 | I915_WRITE(PIPECONF(pipe), val); |
5143 | POSTING_READ(PIPECONF(pipe)); |
5143 | POSTING_READ(PIPECONF(pipe)); |
5144 | } |
5144 | } |
5145 | 5145 | ||
5146 | /* |
5146 | /* |
5147 | * Set up the pipe CSC unit. |
5147 | * Set up the pipe CSC unit. |
5148 | * |
5148 | * |
5149 | * Currently only full range RGB to limited range RGB conversion |
5149 | * Currently only full range RGB to limited range RGB conversion |
5150 | * is supported, but eventually this should handle various |
5150 | * is supported, but eventually this should handle various |
5151 | * RGB<->YCbCr scenarios as well. |
5151 | * RGB<->YCbCr scenarios as well. |
5152 | */ |
5152 | */ |
5153 | static void intel_set_pipe_csc(struct drm_crtc *crtc, |
5153 | static void intel_set_pipe_csc(struct drm_crtc *crtc, |
5154 | const struct drm_display_mode *adjusted_mode) |
5154 | const struct drm_display_mode *adjusted_mode) |
5155 | { |
5155 | { |
5156 | struct drm_device *dev = crtc->dev; |
5156 | struct drm_device *dev = crtc->dev; |
5157 | struct drm_i915_private *dev_priv = dev->dev_private; |
5157 | struct drm_i915_private *dev_priv = dev->dev_private; |
5158 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5158 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5159 | int pipe = intel_crtc->pipe; |
5159 | int pipe = intel_crtc->pipe; |
5160 | uint16_t coeff = 0x7800; /* 1.0 */ |
5160 | uint16_t coeff = 0x7800; /* 1.0 */ |
5161 | 5161 | ||
5162 | /* |
5162 | /* |
5163 | * TODO: Check what kind of values actually come out of the pipe |
5163 | * TODO: Check what kind of values actually come out of the pipe |
5164 | * with these coeff/postoff values and adjust to get the best |
5164 | * with these coeff/postoff values and adjust to get the best |
5165 | * accuracy. Perhaps we even need to take the bpc value into |
5165 | * accuracy. Perhaps we even need to take the bpc value into |
5166 | * consideration. |
5166 | * consideration. |
5167 | */ |
5167 | */ |
5168 | 5168 | ||
5169 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5169 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5170 | coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ |
5170 | coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ |
5171 | 5171 | ||
5172 | /* |
5172 | /* |
5173 | * GY/GU and RY/RU should be the other way around according |
5173 | * GY/GU and RY/RU should be the other way around according |
5174 | * to BSpec, but reality doesn't agree. Just set them up in |
5174 | * to BSpec, but reality doesn't agree. Just set them up in |
5175 | * a way that results in the correct picture. |
5175 | * a way that results in the correct picture. |
5176 | */ |
5176 | */ |
5177 | I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); |
5177 | I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); |
5178 | I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); |
5178 | I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); |
5179 | 5179 | ||
5180 | I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); |
5180 | I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); |
5181 | I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); |
5181 | I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); |
5182 | 5182 | ||
5183 | I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); |
5183 | I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); |
5184 | I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); |
5184 | I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); |
5185 | 5185 | ||
5186 | I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); |
5186 | I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); |
5187 | I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); |
5187 | I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); |
5188 | I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); |
5188 | I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); |
5189 | 5189 | ||
5190 | if (INTEL_INFO(dev)->gen > 6) { |
5190 | if (INTEL_INFO(dev)->gen > 6) { |
5191 | uint16_t postoff = 0; |
5191 | uint16_t postoff = 0; |
5192 | 5192 | ||
5193 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5193 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5194 | postoff = (16 * (1 << 13) / 255) & 0x1fff; |
5194 | postoff = (16 * (1 << 13) / 255) & 0x1fff; |
5195 | 5195 | ||
5196 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); |
5196 | I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); |
5197 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); |
5197 | I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); |
5198 | I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); |
5198 | I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); |
5199 | 5199 | ||
5200 | I915_WRITE(PIPE_CSC_MODE(pipe), 0); |
5200 | I915_WRITE(PIPE_CSC_MODE(pipe), 0); |
5201 | } else { |
5201 | } else { |
5202 | uint32_t mode = CSC_MODE_YUV_TO_RGB; |
5202 | uint32_t mode = CSC_MODE_YUV_TO_RGB; |
5203 | 5203 | ||
5204 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5204 | if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
5205 | mode |= CSC_BLACK_SCREEN_OFFSET; |
5205 | mode |= CSC_BLACK_SCREEN_OFFSET; |
5206 | 5206 | ||
5207 | I915_WRITE(PIPE_CSC_MODE(pipe), mode); |
5207 | I915_WRITE(PIPE_CSC_MODE(pipe), mode); |
5208 | } |
5208 | } |
5209 | } |
5209 | } |
5210 | 5210 | ||
5211 | static void haswell_set_pipeconf(struct drm_crtc *crtc, |
5211 | static void haswell_set_pipeconf(struct drm_crtc *crtc, |
5212 | struct drm_display_mode *adjusted_mode, |
5212 | struct drm_display_mode *adjusted_mode, |
5213 | bool dither) |
5213 | bool dither) |
5214 | { |
5214 | { |
5215 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
5215 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
5216 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5216 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5217 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
5217 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
5218 | uint32_t val; |
5218 | uint32_t val; |
5219 | 5219 | ||
5220 | val = I915_READ(PIPECONF(cpu_transcoder)); |
5220 | val = I915_READ(PIPECONF(cpu_transcoder)); |
5221 | 5221 | ||
5222 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
5222 | val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); |
5223 | if (dither) |
5223 | if (dither) |
5224 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
5224 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
5225 | 5225 | ||
5226 | val &= ~PIPECONF_INTERLACE_MASK_HSW; |
5226 | val &= ~PIPECONF_INTERLACE_MASK_HSW; |
5227 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
5227 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) |
5228 | val |= PIPECONF_INTERLACED_ILK; |
5228 | val |= PIPECONF_INTERLACED_ILK; |
5229 | else |
5229 | else |
5230 | val |= PIPECONF_PROGRESSIVE; |
5230 | val |= PIPECONF_PROGRESSIVE; |
5231 | 5231 | ||
5232 | I915_WRITE(PIPECONF(cpu_transcoder), val); |
5232 | I915_WRITE(PIPECONF(cpu_transcoder), val); |
5233 | POSTING_READ(PIPECONF(cpu_transcoder)); |
5233 | POSTING_READ(PIPECONF(cpu_transcoder)); |
5234 | } |
5234 | } |
5235 | 5235 | ||
5236 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, |
5236 | static bool ironlake_compute_clocks(struct drm_crtc *crtc, |
5237 | struct drm_display_mode *adjusted_mode, |
5237 | struct drm_display_mode *adjusted_mode, |
5238 | intel_clock_t *clock, |
5238 | intel_clock_t *clock, |
5239 | bool *has_reduced_clock, |
5239 | bool *has_reduced_clock, |
5240 | intel_clock_t *reduced_clock) |
5240 | intel_clock_t *reduced_clock) |
5241 | { |
5241 | { |
5242 | struct drm_device *dev = crtc->dev; |
5242 | struct drm_device *dev = crtc->dev; |
5243 | struct drm_i915_private *dev_priv = dev->dev_private; |
5243 | struct drm_i915_private *dev_priv = dev->dev_private; |
5244 | struct intel_encoder *intel_encoder; |
5244 | struct intel_encoder *intel_encoder; |
5245 | int refclk; |
5245 | int refclk; |
5246 | const intel_limit_t *limit; |
5246 | const intel_limit_t *limit; |
5247 | bool ret, is_sdvo = false, is_tv = false, is_lvds = false; |
5247 | bool ret, is_sdvo = false, is_tv = false, is_lvds = false; |
5248 | 5248 | ||
5249 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5249 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5250 | switch (intel_encoder->type) { |
5250 | switch (intel_encoder->type) { |
5251 | case INTEL_OUTPUT_LVDS: |
5251 | case INTEL_OUTPUT_LVDS: |
5252 | is_lvds = true; |
5252 | is_lvds = true; |
5253 | break; |
5253 | break; |
5254 | case INTEL_OUTPUT_SDVO: |
5254 | case INTEL_OUTPUT_SDVO: |
5255 | case INTEL_OUTPUT_HDMI: |
5255 | case INTEL_OUTPUT_HDMI: |
5256 | is_sdvo = true; |
5256 | is_sdvo = true; |
5257 | if (intel_encoder->needs_tv_clock) |
5257 | if (intel_encoder->needs_tv_clock) |
5258 | is_tv = true; |
5258 | is_tv = true; |
5259 | break; |
5259 | break; |
5260 | case INTEL_OUTPUT_TVOUT: |
5260 | case INTEL_OUTPUT_TVOUT: |
5261 | is_tv = true; |
5261 | is_tv = true; |
5262 | break; |
5262 | break; |
5263 | } |
5263 | } |
5264 | } |
5264 | } |
5265 | 5265 | ||
5266 | refclk = ironlake_get_refclk(crtc); |
5266 | refclk = ironlake_get_refclk(crtc); |
5267 | 5267 | ||
5268 | /* |
5268 | /* |
5269 | * Returns a set of divisors for the desired target clock with the given |
5269 | * Returns a set of divisors for the desired target clock with the given |
5270 | * refclk, or FALSE. The returned values represent the clock equation: |
5270 | * refclk, or FALSE. The returned values represent the clock equation: |
5271 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5271 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5272 | */ |
5272 | */ |
5273 | limit = intel_limit(crtc, refclk); |
5273 | limit = intel_limit(crtc, refclk); |
5274 | ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
5274 | ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, |
5275 | clock); |
5275 | clock); |
5276 | if (!ret) |
5276 | if (!ret) |
5277 | return false; |
5277 | return false; |
5278 | 5278 | ||
5279 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5279 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5280 | /* |
5280 | /* |
5281 | * Ensure we match the reduced clock's P to the target clock. |
5281 | * Ensure we match the reduced clock's P to the target clock. |
5282 | * If the clocks don't match, we can't switch the display clock |
5282 | * If the clocks don't match, we can't switch the display clock |
5283 | * by using the FP0/FP1. In such case we will disable the LVDS |
5283 | * by using the FP0/FP1. In such case we will disable the LVDS |
5284 | * downclock feature. |
5284 | * downclock feature. |
5285 | */ |
5285 | */ |
5286 | *has_reduced_clock = limit->find_pll(limit, crtc, |
5286 | *has_reduced_clock = limit->find_pll(limit, crtc, |
5287 | dev_priv->lvds_downclock, |
5287 | dev_priv->lvds_downclock, |
5288 | refclk, |
5288 | refclk, |
5289 | clock, |
5289 | clock, |
5290 | reduced_clock); |
5290 | reduced_clock); |
5291 | } |
5291 | } |
5292 | 5292 | ||
5293 | if (is_sdvo && is_tv) |
5293 | if (is_sdvo && is_tv) |
5294 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); |
5294 | i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock); |
5295 | 5295 | ||
5296 | return true; |
5296 | return true; |
5297 | } |
5297 | } |
5298 | 5298 | ||
5299 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) |
5299 | static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) |
5300 | { |
5300 | { |
5301 | struct drm_i915_private *dev_priv = dev->dev_private; |
5301 | struct drm_i915_private *dev_priv = dev->dev_private; |
5302 | uint32_t temp; |
5302 | uint32_t temp; |
5303 | 5303 | ||
5304 | temp = I915_READ(SOUTH_CHICKEN1); |
5304 | temp = I915_READ(SOUTH_CHICKEN1); |
5305 | if (temp & FDI_BC_BIFURCATION_SELECT) |
5305 | if (temp & FDI_BC_BIFURCATION_SELECT) |
5306 | return; |
5306 | return; |
5307 | 5307 | ||
5308 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
5308 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); |
5309 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
5309 | WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); |
5310 | 5310 | ||
5311 | temp |= FDI_BC_BIFURCATION_SELECT; |
5311 | temp |= FDI_BC_BIFURCATION_SELECT; |
5312 | DRM_DEBUG_KMS("enabling fdi C rx\n"); |
5312 | DRM_DEBUG_KMS("enabling fdi C rx\n"); |
5313 | I915_WRITE(SOUTH_CHICKEN1, temp); |
5313 | I915_WRITE(SOUTH_CHICKEN1, temp); |
5314 | POSTING_READ(SOUTH_CHICKEN1); |
5314 | POSTING_READ(SOUTH_CHICKEN1); |
5315 | } |
5315 | } |
5316 | 5316 | ||
5317 | static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) |
5317 | static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc) |
5318 | { |
5318 | { |
5319 | struct drm_device *dev = intel_crtc->base.dev; |
5319 | struct drm_device *dev = intel_crtc->base.dev; |
5320 | struct drm_i915_private *dev_priv = dev->dev_private; |
5320 | struct drm_i915_private *dev_priv = dev->dev_private; |
5321 | struct intel_crtc *pipe_B_crtc = |
5321 | struct intel_crtc *pipe_B_crtc = |
5322 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
5322 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); |
5323 | 5323 | ||
5324 | DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", |
5324 | DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n", |
5325 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5325 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5326 | if (intel_crtc->fdi_lanes > 4) { |
5326 | if (intel_crtc->fdi_lanes > 4) { |
5327 | DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", |
5327 | DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n", |
5328 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5328 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5329 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5329 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5330 | intel_crtc->fdi_lanes = 4; |
5330 | intel_crtc->fdi_lanes = 4; |
5331 | 5331 | ||
5332 | return false; |
5332 | return false; |
5333 | } |
5333 | } |
5334 | 5334 | ||
5335 | if (dev_priv->num_pipe == 2) |
5335 | if (dev_priv->num_pipe == 2) |
5336 | return true; |
5336 | return true; |
5337 | 5337 | ||
5338 | switch (intel_crtc->pipe) { |
5338 | switch (intel_crtc->pipe) { |
5339 | case PIPE_A: |
5339 | case PIPE_A: |
5340 | return true; |
5340 | return true; |
5341 | case PIPE_B: |
5341 | case PIPE_B: |
5342 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && |
5342 | if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && |
5343 | intel_crtc->fdi_lanes > 2) { |
5343 | intel_crtc->fdi_lanes > 2) { |
5344 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
5344 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
5345 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5345 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5346 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5346 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5347 | intel_crtc->fdi_lanes = 2; |
5347 | intel_crtc->fdi_lanes = 2; |
5348 | 5348 | ||
5349 | return false; |
5349 | return false; |
5350 | } |
5350 | } |
5351 | 5351 | ||
5352 | if (intel_crtc->fdi_lanes > 2) |
5352 | if (intel_crtc->fdi_lanes > 2) |
5353 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); |
5353 | WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); |
5354 | else |
5354 | else |
5355 | cpt_enable_fdi_bc_bifurcation(dev); |
5355 | cpt_enable_fdi_bc_bifurcation(dev); |
5356 | 5356 | ||
5357 | return true; |
5357 | return true; |
5358 | case PIPE_C: |
5358 | case PIPE_C: |
5359 | if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { |
5359 | if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) { |
5360 | if (intel_crtc->fdi_lanes > 2) { |
5360 | if (intel_crtc->fdi_lanes > 2) { |
5361 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
5361 | DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n", |
5362 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5362 | intel_crtc->pipe, intel_crtc->fdi_lanes); |
5363 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5363 | /* Clamp lanes to avoid programming the hw with bogus values. */ |
5364 | intel_crtc->fdi_lanes = 2; |
5364 | intel_crtc->fdi_lanes = 2; |
5365 | 5365 | ||
5366 | return false; |
5366 | return false; |
5367 | } |
5367 | } |
5368 | } else { |
5368 | } else { |
5369 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); |
5369 | DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); |
5370 | return false; |
5370 | return false; |
5371 | } |
5371 | } |
5372 | 5372 | ||
5373 | cpt_enable_fdi_bc_bifurcation(dev); |
5373 | cpt_enable_fdi_bc_bifurcation(dev); |
5374 | 5374 | ||
5375 | return true; |
5375 | return true; |
5376 | default: |
5376 | default: |
5377 | BUG(); |
5377 | BUG(); |
5378 | } |
5378 | } |
5379 | } |
5379 | } |
5380 | 5380 | ||
5381 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) |
5381 | int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) |
5382 | { |
5382 | { |
5383 | /* |
5383 | /* |
5384 | * Account for spread spectrum to avoid |
5384 | * Account for spread spectrum to avoid |
5385 | * oversubscribing the link. Max center spread |
5385 | * oversubscribing the link. Max center spread |
5386 | * is 2.5%; use 5% for safety's sake. |
5386 | * is 2.5%; use 5% for safety's sake. |
5387 | */ |
5387 | */ |
5388 | u32 bps = target_clock * bpp * 21 / 20; |
5388 | u32 bps = target_clock * bpp * 21 / 20; |
5389 | return bps / (link_bw * 8) + 1; |
5389 | return bps / (link_bw * 8) + 1; |
5390 | } |
5390 | } |
5391 | 5391 | ||
5392 | static void ironlake_set_m_n(struct drm_crtc *crtc, |
5392 | static void ironlake_set_m_n(struct drm_crtc *crtc, |
5393 | struct drm_display_mode *mode, |
5393 | struct drm_display_mode *mode, |
5394 | struct drm_display_mode *adjusted_mode) |
5394 | struct drm_display_mode *adjusted_mode) |
5395 | { |
5395 | { |
5396 | struct drm_device *dev = crtc->dev; |
5396 | struct drm_device *dev = crtc->dev; |
5397 | struct drm_i915_private *dev_priv = dev->dev_private; |
5397 | struct drm_i915_private *dev_priv = dev->dev_private; |
5398 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5398 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5399 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
5399 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
5400 | struct intel_encoder *intel_encoder, *edp_encoder = NULL; |
5400 | struct intel_encoder *intel_encoder, *edp_encoder = NULL; |
5401 | struct intel_link_m_n m_n = {0}; |
5401 | struct intel_link_m_n m_n = {0}; |
5402 | int target_clock, pixel_multiplier, lane, link_bw; |
5402 | int target_clock, pixel_multiplier, lane, link_bw; |
5403 | bool is_dp = false, is_cpu_edp = false; |
5403 | bool is_dp = false, is_cpu_edp = false; |
5404 | 5404 | ||
5405 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5405 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5406 | switch (intel_encoder->type) { |
5406 | switch (intel_encoder->type) { |
5407 | case INTEL_OUTPUT_DISPLAYPORT: |
5407 | case INTEL_OUTPUT_DISPLAYPORT: |
5408 | is_dp = true; |
5408 | is_dp = true; |
5409 | break; |
5409 | break; |
5410 | case INTEL_OUTPUT_EDP: |
5410 | case INTEL_OUTPUT_EDP: |
5411 | is_dp = true; |
5411 | is_dp = true; |
5412 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
5412 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
5413 | is_cpu_edp = true; |
5413 | is_cpu_edp = true; |
5414 | edp_encoder = intel_encoder; |
5414 | edp_encoder = intel_encoder; |
5415 | break; |
5415 | break; |
5416 | } |
5416 | } |
5417 | } |
5417 | } |
5418 | 5418 | ||
5419 | /* FDI link */ |
5419 | /* FDI link */ |
5420 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5420 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5421 | lane = 0; |
5421 | lane = 0; |
5422 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
5422 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
5423 | according to current link config */ |
5423 | according to current link config */ |
5424 | if (is_cpu_edp) { |
5424 | if (is_cpu_edp) { |
5425 | intel_edp_link_config(edp_encoder, &lane, &link_bw); |
5425 | intel_edp_link_config(edp_encoder, &lane, &link_bw); |
5426 | } else { |
5426 | } else { |
5427 | /* FDI is a binary signal running at ~2.7GHz, encoding |
5427 | /* FDI is a binary signal running at ~2.7GHz, encoding |
5428 | * each output octet as 10 bits. The actual frequency |
5428 | * each output octet as 10 bits. The actual frequency |
5429 | * is stored as a divider into a 100MHz clock, and the |
5429 | * is stored as a divider into a 100MHz clock, and the |
5430 | * mode pixel clock is stored in units of 1KHz. |
5430 | * mode pixel clock is stored in units of 1KHz. |
5431 | * Hence the bw of each lane in terms of the mode signal |
5431 | * Hence the bw of each lane in terms of the mode signal |
5432 | * is: |
5432 | * is: |
5433 | */ |
5433 | */ |
5434 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; |
5434 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; |
5435 | } |
5435 | } |
5436 | 5436 | ||
5437 | /* [e]DP over FDI requires target mode clock instead of link clock. */ |
5437 | /* [e]DP over FDI requires target mode clock instead of link clock. */ |
5438 | if (edp_encoder) |
5438 | if (edp_encoder) |
5439 | target_clock = intel_edp_target_clock(edp_encoder, mode); |
5439 | target_clock = intel_edp_target_clock(edp_encoder, mode); |
5440 | else if (is_dp) |
5440 | else if (is_dp) |
5441 | target_clock = mode->clock; |
5441 | target_clock = mode->clock; |
5442 | else |
5442 | else |
5443 | target_clock = adjusted_mode->clock; |
5443 | target_clock = adjusted_mode->clock; |
5444 | 5444 | ||
5445 | if (!lane) |
5445 | if (!lane) |
5446 | lane = ironlake_get_lanes_required(target_clock, link_bw, |
5446 | lane = ironlake_get_lanes_required(target_clock, link_bw, |
5447 | intel_crtc->bpp); |
5447 | intel_crtc->bpp); |
5448 | 5448 | ||
5449 | intel_crtc->fdi_lanes = lane; |
5449 | intel_crtc->fdi_lanes = lane; |
5450 | 5450 | ||
5451 | if (pixel_multiplier > 1) |
5451 | if (pixel_multiplier > 1) |
5452 | link_bw *= pixel_multiplier; |
5452 | link_bw *= pixel_multiplier; |
5453 | intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); |
5453 | intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); |
5454 | 5454 | ||
5455 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); |
5455 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); |
5456 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
5456 | I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
5457 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
5457 | I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m); |
5458 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
5458 | I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n); |
5459 | } |
5459 | } |
5460 | 5460 | ||
5461 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
5461 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
5462 | struct drm_display_mode *adjusted_mode, |
5462 | struct drm_display_mode *adjusted_mode, |
5463 | intel_clock_t *clock, u32 fp) |
5463 | intel_clock_t *clock, u32 fp) |
5464 | { |
5464 | { |
5465 | struct drm_crtc *crtc = &intel_crtc->base; |
5465 | struct drm_crtc *crtc = &intel_crtc->base; |
5466 | struct drm_device *dev = crtc->dev; |
5466 | struct drm_device *dev = crtc->dev; |
5467 | struct drm_i915_private *dev_priv = dev->dev_private; |
5467 | struct drm_i915_private *dev_priv = dev->dev_private; |
5468 | struct intel_encoder *intel_encoder; |
5468 | struct intel_encoder *intel_encoder; |
5469 | uint32_t dpll; |
5469 | uint32_t dpll; |
5470 | int factor, pixel_multiplier, num_connectors = 0; |
5470 | int factor, pixel_multiplier, num_connectors = 0; |
5471 | bool is_lvds = false, is_sdvo = false, is_tv = false; |
5471 | bool is_lvds = false, is_sdvo = false, is_tv = false; |
5472 | bool is_dp = false, is_cpu_edp = false; |
5472 | bool is_dp = false, is_cpu_edp = false; |
5473 | 5473 | ||
5474 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5474 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
5475 | switch (intel_encoder->type) { |
5475 | switch (intel_encoder->type) { |
5476 | case INTEL_OUTPUT_LVDS: |
5476 | case INTEL_OUTPUT_LVDS: |
5477 | is_lvds = true; |
5477 | is_lvds = true; |
5478 | break; |
5478 | break; |
5479 | case INTEL_OUTPUT_SDVO: |
5479 | case INTEL_OUTPUT_SDVO: |
5480 | case INTEL_OUTPUT_HDMI: |
5480 | case INTEL_OUTPUT_HDMI: |
5481 | is_sdvo = true; |
5481 | is_sdvo = true; |
5482 | if (intel_encoder->needs_tv_clock) |
5482 | if (intel_encoder->needs_tv_clock) |
5483 | is_tv = true; |
5483 | is_tv = true; |
5484 | break; |
5484 | break; |
5485 | case INTEL_OUTPUT_TVOUT: |
5485 | case INTEL_OUTPUT_TVOUT: |
5486 | is_tv = true; |
5486 | is_tv = true; |
5487 | break; |
5487 | break; |
5488 | case INTEL_OUTPUT_DISPLAYPORT: |
5488 | case INTEL_OUTPUT_DISPLAYPORT: |
5489 | is_dp = true; |
5489 | is_dp = true; |
5490 | break; |
5490 | break; |
5491 | case INTEL_OUTPUT_EDP: |
5491 | case INTEL_OUTPUT_EDP: |
5492 | is_dp = true; |
5492 | is_dp = true; |
5493 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
5493 | if (!intel_encoder_is_pch_edp(&intel_encoder->base)) |
5494 | is_cpu_edp = true; |
5494 | is_cpu_edp = true; |
5495 | break; |
5495 | break; |
5496 | } |
5496 | } |
5497 | 5497 | ||
5498 | num_connectors++; |
5498 | num_connectors++; |
5499 | } |
5499 | } |
5500 | 5500 | ||
5501 | /* Enable autotuning of the PLL clock (if permissible) */ |
5501 | /* Enable autotuning of the PLL clock (if permissible) */ |
5502 | factor = 21; |
5502 | factor = 21; |
5503 | if (is_lvds) { |
5503 | if (is_lvds) { |
5504 | if ((intel_panel_use_ssc(dev_priv) && |
5504 | if ((intel_panel_use_ssc(dev_priv) && |
5505 | dev_priv->lvds_ssc_freq == 100) || |
5505 | dev_priv->lvds_ssc_freq == 100) || |
5506 | intel_is_dual_link_lvds(dev)) |
5506 | intel_is_dual_link_lvds(dev)) |
5507 | factor = 25; |
5507 | factor = 25; |
5508 | } else if (is_sdvo && is_tv) |
5508 | } else if (is_sdvo && is_tv) |
5509 | factor = 20; |
5509 | factor = 20; |
5510 | 5510 | ||
5511 | if (clock->m < factor * clock->n) |
5511 | if (clock->m < factor * clock->n) |
5512 | fp |= FP_CB_TUNE; |
5512 | fp |= FP_CB_TUNE; |
5513 | 5513 | ||
5514 | dpll = 0; |
5514 | dpll = 0; |
5515 | 5515 | ||
5516 | if (is_lvds) |
5516 | if (is_lvds) |
5517 | dpll |= DPLLB_MODE_LVDS; |
5517 | dpll |= DPLLB_MODE_LVDS; |
5518 | else |
5518 | else |
5519 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5519 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5520 | if (is_sdvo) { |
5520 | if (is_sdvo) { |
5521 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5521 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5522 | if (pixel_multiplier > 1) { |
5522 | if (pixel_multiplier > 1) { |
5523 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
5523 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
5524 | } |
5524 | } |
5525 | dpll |= DPLL_DVO_HIGH_SPEED; |
5525 | dpll |= DPLL_DVO_HIGH_SPEED; |
5526 | } |
5526 | } |
5527 | if (is_dp && !is_cpu_edp) |
5527 | if (is_dp && !is_cpu_edp) |
5528 | dpll |= DPLL_DVO_HIGH_SPEED; |
5528 | dpll |= DPLL_DVO_HIGH_SPEED; |
5529 | 5529 | ||
5530 | /* compute bitmask from p1 value */ |
5530 | /* compute bitmask from p1 value */ |
5531 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5531 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5532 | /* also FPA1 */ |
5532 | /* also FPA1 */ |
5533 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5533 | dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5534 | 5534 | ||
5535 | switch (clock->p2) { |
5535 | switch (clock->p2) { |
5536 | case 5: |
5536 | case 5: |
5537 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5537 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5538 | break; |
5538 | break; |
5539 | case 7: |
5539 | case 7: |
5540 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5540 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5541 | break; |
5541 | break; |
5542 | case 10: |
5542 | case 10: |
5543 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5543 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5544 | break; |
5544 | break; |
5545 | case 14: |
5545 | case 14: |
5546 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5546 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5547 | break; |
5547 | break; |
5548 | } |
5548 | } |
5549 | 5549 | ||
5550 | if (is_sdvo && is_tv) |
5550 | if (is_sdvo && is_tv) |
5551 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5551 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5552 | else if (is_tv) |
5552 | else if (is_tv) |
5553 | /* XXX: just matching BIOS for now */ |
5553 | /* XXX: just matching BIOS for now */ |
5554 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5554 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5555 | dpll |= 3; |
5555 | dpll |= 3; |
5556 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5556 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5557 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5557 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5558 | else |
5558 | else |
5559 | dpll |= PLL_REF_INPUT_DREFCLK; |
5559 | dpll |= PLL_REF_INPUT_DREFCLK; |
5560 | 5560 | ||
5561 | return dpll; |
5561 | return dpll; |
5562 | } |
5562 | } |
5563 | 5563 | ||
5564 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5564 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5565 | struct drm_display_mode *mode, |
5565 | struct drm_display_mode *mode, |
5566 | struct drm_display_mode *adjusted_mode, |
5566 | struct drm_display_mode *adjusted_mode, |
5567 | int x, int y, |
5567 | int x, int y, |
5568 | struct drm_framebuffer *fb) |
5568 | struct drm_framebuffer *fb) |
5569 | { |
5569 | { |
5570 | struct drm_device *dev = crtc->dev; |
5570 | struct drm_device *dev = crtc->dev; |
5571 | struct drm_i915_private *dev_priv = dev->dev_private; |
5571 | struct drm_i915_private *dev_priv = dev->dev_private; |
5572 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5572 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5573 | int pipe = intel_crtc->pipe; |
5573 | int pipe = intel_crtc->pipe; |
5574 | int plane = intel_crtc->plane; |
5574 | int plane = intel_crtc->plane; |
5575 | int num_connectors = 0; |
5575 | int num_connectors = 0; |
5576 | intel_clock_t clock, reduced_clock; |
5576 | intel_clock_t clock, reduced_clock; |
5577 | u32 dpll, fp = 0, fp2 = 0; |
5577 | u32 dpll, fp = 0, fp2 = 0; |
5578 | bool ok, has_reduced_clock = false; |
5578 | bool ok, has_reduced_clock = false; |
5579 | bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
5579 | bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
5580 | struct intel_encoder *encoder; |
5580 | struct intel_encoder *encoder; |
5581 | int ret; |
5581 | int ret; |
5582 | bool dither, fdi_config_ok; |
5582 | bool dither, fdi_config_ok; |
5583 | 5583 | ||
5584 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5584 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5585 | switch (encoder->type) { |
5585 | switch (encoder->type) { |
5586 | case INTEL_OUTPUT_LVDS: |
5586 | case INTEL_OUTPUT_LVDS: |
5587 | is_lvds = true; |
5587 | is_lvds = true; |
5588 | break; |
5588 | break; |
5589 | case INTEL_OUTPUT_DISPLAYPORT: |
5589 | case INTEL_OUTPUT_DISPLAYPORT: |
5590 | is_dp = true; |
5590 | is_dp = true; |
5591 | break; |
5591 | break; |
5592 | case INTEL_OUTPUT_EDP: |
5592 | case INTEL_OUTPUT_EDP: |
5593 | is_dp = true; |
5593 | is_dp = true; |
5594 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
5594 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
5595 | is_cpu_edp = true; |
5595 | is_cpu_edp = true; |
5596 | break; |
5596 | break; |
5597 | } |
5597 | } |
5598 | 5598 | ||
5599 | num_connectors++; |
5599 | num_connectors++; |
5600 | } |
5600 | } |
5601 | 5601 | ||
5602 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), |
5602 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), |
5603 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); |
5603 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); |
5604 | 5604 | ||
5605 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
5605 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
5606 | &has_reduced_clock, &reduced_clock); |
5606 | &has_reduced_clock, &reduced_clock); |
5607 | if (!ok) { |
5607 | if (!ok) { |
5608 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5608 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5609 | return -EINVAL; |
5609 | return -EINVAL; |
5610 | } |
5610 | } |
5611 | 5611 | ||
5612 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5612 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5613 | // intel_crtc_update_cursor(crtc, true); |
5613 | // intel_crtc_update_cursor(crtc, true); |
5614 | 5614 | ||
5615 | /* determine panel color depth */ |
5615 | /* determine panel color depth */ |
5616 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
5616 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
5617 | adjusted_mode); |
5617 | adjusted_mode); |
5618 | if (is_lvds && dev_priv->lvds_dither) |
5618 | if (is_lvds && dev_priv->lvds_dither) |
5619 | dither = true; |
5619 | dither = true; |
5620 | 5620 | ||
5621 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5621 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5622 | if (has_reduced_clock) |
5622 | if (has_reduced_clock) |
5623 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5623 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5624 | reduced_clock.m2; |
5624 | reduced_clock.m2; |
5625 | 5625 | ||
5626 | dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); |
5626 | dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp); |
5627 | 5627 | ||
5628 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5628 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5629 | drm_mode_debug_printmodeline(mode); |
5629 | drm_mode_debug_printmodeline(mode); |
5630 | 5630 | ||
5631 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
5631 | /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ |
5632 | if (!is_cpu_edp) { |
5632 | if (!is_cpu_edp) { |
5633 | struct intel_pch_pll *pll; |
5633 | struct intel_pch_pll *pll; |
5634 | 5634 | ||
5635 | pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
5635 | pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
5636 | if (pll == NULL) { |
5636 | if (pll == NULL) { |
5637 | DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", |
5637 | DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", |
5638 | pipe); |
5638 | pipe); |
5639 | return -EINVAL; |
5639 | return -EINVAL; |
5640 | } |
5640 | } |
5641 | } else |
5641 | } else |
5642 | intel_put_pch_pll(intel_crtc); |
5642 | intel_put_pch_pll(intel_crtc); |
5643 | 5643 | ||
5644 | if (is_dp && !is_cpu_edp) |
5644 | if (is_dp && !is_cpu_edp) |
5645 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5645 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5646 | 5646 | ||
5647 | for_each_encoder_on_crtc(dev, crtc, encoder) |
5647 | for_each_encoder_on_crtc(dev, crtc, encoder) |
5648 | if (encoder->pre_pll_enable) |
5648 | if (encoder->pre_pll_enable) |
5649 | encoder->pre_pll_enable(encoder); |
5649 | encoder->pre_pll_enable(encoder); |
5650 | 5650 | ||
5651 | if (intel_crtc->pch_pll) { |
5651 | if (intel_crtc->pch_pll) { |
5652 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
5652 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
5653 | 5653 | ||
5654 | /* Wait for the clocks to stabilize. */ |
5654 | /* Wait for the clocks to stabilize. */ |
5655 | POSTING_READ(intel_crtc->pch_pll->pll_reg); |
5655 | POSTING_READ(intel_crtc->pch_pll->pll_reg); |
5656 | udelay(150); |
5656 | udelay(150); |
5657 | 5657 | ||
5658 | /* The pixel multiplier can only be updated once the |
5658 | /* The pixel multiplier can only be updated once the |
5659 | * DPLL is enabled and the clocks are stable. |
5659 | * DPLL is enabled and the clocks are stable. |
5660 | * |
5660 | * |
5661 | * So write it again. |
5661 | * So write it again. |
5662 | */ |
5662 | */ |
5663 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
5663 | I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
5664 | } |
5664 | } |
5665 | 5665 | ||
5666 | intel_crtc->lowfreq_avail = false; |
5666 | intel_crtc->lowfreq_avail = false; |
5667 | if (intel_crtc->pch_pll) { |
5667 | if (intel_crtc->pch_pll) { |
5668 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5668 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5669 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); |
5669 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); |
5670 | intel_crtc->lowfreq_avail = true; |
5670 | intel_crtc->lowfreq_avail = true; |
5671 | } else { |
5671 | } else { |
5672 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); |
5672 | I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); |
5673 | } |
5673 | } |
5674 | } |
5674 | } |
5675 | 5675 | ||
5676 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
5676 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
5677 | 5677 | ||
5678 | /* Note, this also computes intel_crtc->fdi_lanes which is used below in |
5678 | /* Note, this also computes intel_crtc->fdi_lanes which is used below in |
5679 | * ironlake_check_fdi_lanes. */ |
5679 | * ironlake_check_fdi_lanes. */ |
5680 | ironlake_set_m_n(crtc, mode, adjusted_mode); |
5680 | ironlake_set_m_n(crtc, mode, adjusted_mode); |
5681 | 5681 | ||
5682 | fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); |
5682 | fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); |
5683 | 5683 | ||
5684 | ironlake_set_pipeconf(crtc, adjusted_mode, dither); |
5684 | ironlake_set_pipeconf(crtc, adjusted_mode, dither); |
5685 | 5685 | ||
5686 | intel_wait_for_vblank(dev, pipe); |
5686 | intel_wait_for_vblank(dev, pipe); |
5687 | 5687 | ||
5688 | /* Set up the display plane register */ |
5688 | /* Set up the display plane register */ |
5689 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
5689 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
5690 | POSTING_READ(DSPCNTR(plane)); |
5690 | POSTING_READ(DSPCNTR(plane)); |
5691 | 5691 | ||
5692 | ret = intel_pipe_set_base(crtc, x, y, fb); |
5692 | ret = intel_pipe_set_base(crtc, x, y, fb); |
5693 | 5693 | ||
5694 | intel_update_watermarks(dev); |
5694 | intel_update_watermarks(dev); |
5695 | 5695 | ||
5696 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
5696 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
5697 | 5697 | ||
5698 | return fdi_config_ok ? ret : -EINVAL; |
5698 | return fdi_config_ok ? ret : -EINVAL; |
5699 | } |
5699 | } |
5700 | 5700 | ||
5701 | static void haswell_modeset_global_resources(struct drm_device *dev) |
5701 | static void haswell_modeset_global_resources(struct drm_device *dev) |
5702 | { |
5702 | { |
5703 | struct drm_i915_private *dev_priv = dev->dev_private; |
5703 | struct drm_i915_private *dev_priv = dev->dev_private; |
5704 | bool enable = false; |
5704 | bool enable = false; |
5705 | struct intel_crtc *crtc; |
5705 | struct intel_crtc *crtc; |
5706 | struct intel_encoder *encoder; |
5706 | struct intel_encoder *encoder; |
5707 | 5707 | ||
5708 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
5708 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
5709 | if (crtc->pipe != PIPE_A && crtc->base.enabled) |
5709 | if (crtc->pipe != PIPE_A && crtc->base.enabled) |
5710 | enable = true; |
5710 | enable = true; |
5711 | /* XXX: Should check for edp transcoder here, but thanks to init |
5711 | /* XXX: Should check for edp transcoder here, but thanks to init |
5712 | * sequence that's not yet available. Just in case desktop eDP |
5712 | * sequence that's not yet available. Just in case desktop eDP |
5713 | * on PORT D is possible on haswell, too. */ |
5713 | * on PORT D is possible on haswell, too. */ |
5714 | } |
5714 | } |
5715 | 5715 | ||
5716 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
5716 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
5717 | base.head) { |
5717 | base.head) { |
5718 | if (encoder->type != INTEL_OUTPUT_EDP && |
5718 | if (encoder->type != INTEL_OUTPUT_EDP && |
5719 | encoder->connectors_active) |
5719 | encoder->connectors_active) |
5720 | enable = true; |
5720 | enable = true; |
5721 | } |
5721 | } |
5722 | 5722 | ||
5723 | /* Even the eDP panel fitter is outside the always-on well. */ |
5723 | /* Even the eDP panel fitter is outside the always-on well. */ |
5724 | if (dev_priv->pch_pf_size) |
5724 | if (dev_priv->pch_pf_size) |
5725 | enable = true; |
5725 | enable = true; |
5726 | 5726 | ||
5727 | intel_set_power_well(dev, enable); |
5727 | intel_set_power_well(dev, enable); |
5728 | } |
5728 | } |
5729 | 5729 | ||
5730 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
5730 | static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
5731 | struct drm_display_mode *mode, |
5731 | struct drm_display_mode *mode, |
5732 | struct drm_display_mode *adjusted_mode, |
5732 | struct drm_display_mode *adjusted_mode, |
5733 | int x, int y, |
5733 | int x, int y, |
5734 | struct drm_framebuffer *fb) |
5734 | struct drm_framebuffer *fb) |
5735 | { |
5735 | { |
5736 | struct drm_device *dev = crtc->dev; |
5736 | struct drm_device *dev = crtc->dev; |
5737 | struct drm_i915_private *dev_priv = dev->dev_private; |
5737 | struct drm_i915_private *dev_priv = dev->dev_private; |
5738 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5738 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5739 | int pipe = intel_crtc->pipe; |
5739 | int pipe = intel_crtc->pipe; |
5740 | int plane = intel_crtc->plane; |
5740 | int plane = intel_crtc->plane; |
5741 | int num_connectors = 0; |
5741 | int num_connectors = 0; |
5742 | bool is_dp = false, is_cpu_edp = false; |
5742 | bool is_dp = false, is_cpu_edp = false; |
5743 | struct intel_encoder *encoder; |
5743 | struct intel_encoder *encoder; |
5744 | int ret; |
5744 | int ret; |
5745 | bool dither; |
5745 | bool dither; |
5746 | 5746 | ||
5747 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5747 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5748 | switch (encoder->type) { |
5748 | switch (encoder->type) { |
5749 | case INTEL_OUTPUT_DISPLAYPORT: |
5749 | case INTEL_OUTPUT_DISPLAYPORT: |
5750 | is_dp = true; |
5750 | is_dp = true; |
5751 | break; |
5751 | break; |
5752 | case INTEL_OUTPUT_EDP: |
5752 | case INTEL_OUTPUT_EDP: |
5753 | is_dp = true; |
5753 | is_dp = true; |
5754 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
5754 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
5755 | is_cpu_edp = true; |
5755 | is_cpu_edp = true; |
5756 | break; |
5756 | break; |
5757 | } |
5757 | } |
5758 | 5758 | ||
5759 | num_connectors++; |
5759 | num_connectors++; |
5760 | } |
5760 | } |
5761 | 5761 | ||
5762 | if (is_cpu_edp) |
5762 | if (is_cpu_edp) |
5763 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; |
5763 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; |
5764 | else |
5764 | else |
5765 | intel_crtc->cpu_transcoder = pipe; |
5765 | intel_crtc->cpu_transcoder = pipe; |
5766 | 5766 | ||
5767 | /* We are not sure yet this won't happen. */ |
5767 | /* We are not sure yet this won't happen. */ |
5768 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
5768 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
5769 | INTEL_PCH_TYPE(dev)); |
5769 | INTEL_PCH_TYPE(dev)); |
5770 | 5770 | ||
5771 | WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", |
5771 | WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", |
5772 | num_connectors, pipe_name(pipe)); |
5772 | num_connectors, pipe_name(pipe)); |
5773 | 5773 | ||
5774 | WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & |
5774 | WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & |
5775 | (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); |
5775 | (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); |
5776 | 5776 | ||
5777 | WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); |
5777 | WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); |
5778 | 5778 | ||
5779 | if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) |
5779 | if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) |
5780 | return -EINVAL; |
5780 | return -EINVAL; |
5781 | 5781 | ||
5782 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5782 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5783 | // intel_crtc_update_cursor(crtc, true); |
5783 | // intel_crtc_update_cursor(crtc, true); |
5784 | 5784 | ||
5785 | /* determine panel color depth */ |
5785 | /* determine panel color depth */ |
5786 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
5786 | dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
5787 | adjusted_mode); |
5787 | adjusted_mode); |
5788 | 5788 | ||
5789 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5789 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5790 | drm_mode_debug_printmodeline(mode); |
5790 | drm_mode_debug_printmodeline(mode); |
5791 | 5791 | ||
5792 | if (is_dp && !is_cpu_edp) |
5792 | if (is_dp && !is_cpu_edp) |
5793 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5793 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5794 | 5794 | ||
5795 | intel_crtc->lowfreq_avail = false; |
5795 | intel_crtc->lowfreq_avail = false; |
5796 | 5796 | ||
5797 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
5797 | intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
5798 | 5798 | ||
5799 | if (!is_dp || is_cpu_edp) |
5799 | if (!is_dp || is_cpu_edp) |
5800 | ironlake_set_m_n(crtc, mode, adjusted_mode); |
5800 | ironlake_set_m_n(crtc, mode, adjusted_mode); |
5801 | 5801 | ||
5802 | haswell_set_pipeconf(crtc, adjusted_mode, dither); |
5802 | haswell_set_pipeconf(crtc, adjusted_mode, dither); |
5803 | 5803 | ||
5804 | intel_set_pipe_csc(crtc, adjusted_mode); |
5804 | intel_set_pipe_csc(crtc, adjusted_mode); |
5805 | 5805 | ||
5806 | /* Set up the display plane register */ |
5806 | /* Set up the display plane register */ |
5807 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); |
5807 | I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); |
5808 | POSTING_READ(DSPCNTR(plane)); |
5808 | POSTING_READ(DSPCNTR(plane)); |
5809 | 5809 | ||
5810 | ret = intel_pipe_set_base(crtc, x, y, fb); |
5810 | ret = intel_pipe_set_base(crtc, x, y, fb); |
5811 | 5811 | ||
5812 | intel_update_watermarks(dev); |
5812 | intel_update_watermarks(dev); |
5813 | 5813 | ||
5814 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
5814 | intel_update_linetime_watermarks(dev, pipe, adjusted_mode); |
5815 | 5815 | ||
5816 | return ret; |
5816 | return ret; |
5817 | } |
5817 | } |
5818 | 5818 | ||
5819 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
5819 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
5820 | struct drm_display_mode *mode, |
5820 | struct drm_display_mode *mode, |
5821 | struct drm_display_mode *adjusted_mode, |
5821 | struct drm_display_mode *adjusted_mode, |
5822 | int x, int y, |
5822 | int x, int y, |
5823 | struct drm_framebuffer *fb) |
5823 | struct drm_framebuffer *fb) |
5824 | { |
5824 | { |
5825 | struct drm_device *dev = crtc->dev; |
5825 | struct drm_device *dev = crtc->dev; |
5826 | struct drm_i915_private *dev_priv = dev->dev_private; |
5826 | struct drm_i915_private *dev_priv = dev->dev_private; |
5827 | struct drm_encoder_helper_funcs *encoder_funcs; |
5827 | struct drm_encoder_helper_funcs *encoder_funcs; |
5828 | struct intel_encoder *encoder; |
5828 | struct intel_encoder *encoder; |
5829 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5829 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5830 | int pipe = intel_crtc->pipe; |
5830 | int pipe = intel_crtc->pipe; |
5831 | int ret; |
5831 | int ret; |
5832 | 5832 | ||
5833 | drm_vblank_pre_modeset(dev, pipe); |
5833 | drm_vblank_pre_modeset(dev, pipe); |
5834 | 5834 | ||
5835 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
5835 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
5836 | x, y, fb); |
5836 | x, y, fb); |
5837 | drm_vblank_post_modeset(dev, pipe); |
5837 | drm_vblank_post_modeset(dev, pipe); |
5838 | 5838 | ||
5839 | if (ret != 0) |
5839 | if (ret != 0) |
5840 | return ret; |
5840 | return ret; |
5841 | 5841 | ||
5842 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5842 | for_each_encoder_on_crtc(dev, crtc, encoder) { |
5843 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", |
5843 | DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", |
5844 | encoder->base.base.id, |
5844 | encoder->base.base.id, |
5845 | drm_get_encoder_name(&encoder->base), |
5845 | drm_get_encoder_name(&encoder->base), |
5846 | mode->base.id, mode->name); |
5846 | mode->base.id, mode->name); |
5847 | encoder_funcs = encoder->base.helper_private; |
5847 | encoder_funcs = encoder->base.helper_private; |
5848 | encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); |
5848 | encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); |
5849 | } |
5849 | } |
5850 | 5850 | ||
5851 | return 0; |
5851 | return 0; |
5852 | } |
5852 | } |
5853 | 5853 | ||
5854 | static bool intel_eld_uptodate(struct drm_connector *connector, |
5854 | static bool intel_eld_uptodate(struct drm_connector *connector, |
5855 | int reg_eldv, uint32_t bits_eldv, |
5855 | int reg_eldv, uint32_t bits_eldv, |
5856 | int reg_elda, uint32_t bits_elda, |
5856 | int reg_elda, uint32_t bits_elda, |
5857 | int reg_edid) |
5857 | int reg_edid) |
5858 | { |
5858 | { |
5859 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5859 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5860 | uint8_t *eld = connector->eld; |
5860 | uint8_t *eld = connector->eld; |
5861 | uint32_t i; |
5861 | uint32_t i; |
5862 | 5862 | ||
5863 | i = I915_READ(reg_eldv); |
5863 | i = I915_READ(reg_eldv); |
5864 | i &= bits_eldv; |
5864 | i &= bits_eldv; |
5865 | 5865 | ||
5866 | if (!eld[0]) |
5866 | if (!eld[0]) |
5867 | return !i; |
5867 | return !i; |
5868 | 5868 | ||
5869 | if (!i) |
5869 | if (!i) |
5870 | return false; |
5870 | return false; |
5871 | 5871 | ||
5872 | i = I915_READ(reg_elda); |
5872 | i = I915_READ(reg_elda); |
5873 | i &= ~bits_elda; |
5873 | i &= ~bits_elda; |
5874 | I915_WRITE(reg_elda, i); |
5874 | I915_WRITE(reg_elda, i); |
5875 | 5875 | ||
5876 | for (i = 0; i < eld[2]; i++) |
5876 | for (i = 0; i < eld[2]; i++) |
5877 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) |
5877 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) |
5878 | return false; |
5878 | return false; |
5879 | 5879 | ||
5880 | return true; |
5880 | return true; |
5881 | } |
5881 | } |
5882 | 5882 | ||
5883 | static void g4x_write_eld(struct drm_connector *connector, |
5883 | static void g4x_write_eld(struct drm_connector *connector, |
5884 | struct drm_crtc *crtc) |
5884 | struct drm_crtc *crtc) |
5885 | { |
5885 | { |
5886 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5886 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5887 | uint8_t *eld = connector->eld; |
5887 | uint8_t *eld = connector->eld; |
5888 | uint32_t eldv; |
5888 | uint32_t eldv; |
5889 | uint32_t len; |
5889 | uint32_t len; |
5890 | uint32_t i; |
5890 | uint32_t i; |
5891 | 5891 | ||
5892 | i = I915_READ(G4X_AUD_VID_DID); |
5892 | i = I915_READ(G4X_AUD_VID_DID); |
5893 | 5893 | ||
5894 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) |
5894 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) |
5895 | eldv = G4X_ELDV_DEVCL_DEVBLC; |
5895 | eldv = G4X_ELDV_DEVCL_DEVBLC; |
5896 | else |
5896 | else |
5897 | eldv = G4X_ELDV_DEVCTG; |
5897 | eldv = G4X_ELDV_DEVCTG; |
5898 | 5898 | ||
5899 | if (intel_eld_uptodate(connector, |
5899 | if (intel_eld_uptodate(connector, |
5900 | G4X_AUD_CNTL_ST, eldv, |
5900 | G4X_AUD_CNTL_ST, eldv, |
5901 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, |
5901 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, |
5902 | G4X_HDMIW_HDMIEDID)) |
5902 | G4X_HDMIW_HDMIEDID)) |
5903 | return; |
5903 | return; |
5904 | 5904 | ||
5905 | i = I915_READ(G4X_AUD_CNTL_ST); |
5905 | i = I915_READ(G4X_AUD_CNTL_ST); |
5906 | i &= ~(eldv | G4X_ELD_ADDR); |
5906 | i &= ~(eldv | G4X_ELD_ADDR); |
5907 | len = (i >> 9) & 0x1f; /* ELD buffer size */ |
5907 | len = (i >> 9) & 0x1f; /* ELD buffer size */ |
5908 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
5908 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
5909 | 5909 | ||
5910 | if (!eld[0]) |
5910 | if (!eld[0]) |
5911 | return; |
5911 | return; |
5912 | 5912 | ||
5913 | len = min_t(uint8_t, eld[2], len); |
5913 | len = min_t(uint8_t, eld[2], len); |
5914 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
5914 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
5915 | for (i = 0; i < len; i++) |
5915 | for (i = 0; i < len; i++) |
5916 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); |
5916 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); |
5917 | 5917 | ||
5918 | i = I915_READ(G4X_AUD_CNTL_ST); |
5918 | i = I915_READ(G4X_AUD_CNTL_ST); |
5919 | i |= eldv; |
5919 | i |= eldv; |
5920 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
5920 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
5921 | } |
5921 | } |
5922 | 5922 | ||
5923 | static void haswell_write_eld(struct drm_connector *connector, |
5923 | static void haswell_write_eld(struct drm_connector *connector, |
5924 | struct drm_crtc *crtc) |
5924 | struct drm_crtc *crtc) |
5925 | { |
5925 | { |
5926 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5926 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5927 | uint8_t *eld = connector->eld; |
5927 | uint8_t *eld = connector->eld; |
5928 | struct drm_device *dev = crtc->dev; |
5928 | struct drm_device *dev = crtc->dev; |
5929 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5929 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5930 | uint32_t eldv; |
5930 | uint32_t eldv; |
5931 | uint32_t i; |
5931 | uint32_t i; |
5932 | int len; |
5932 | int len; |
5933 | int pipe = to_intel_crtc(crtc)->pipe; |
5933 | int pipe = to_intel_crtc(crtc)->pipe; |
5934 | int tmp; |
5934 | int tmp; |
5935 | 5935 | ||
5936 | int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); |
5936 | int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe); |
5937 | int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); |
5937 | int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe); |
5938 | int aud_config = HSW_AUD_CFG(pipe); |
5938 | int aud_config = HSW_AUD_CFG(pipe); |
5939 | int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; |
5939 | int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD; |
5940 | 5940 | ||
5941 | 5941 | ||
5942 | DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); |
5942 | DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n"); |
5943 | 5943 | ||
5944 | /* Audio output enable */ |
5944 | /* Audio output enable */ |
5945 | DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); |
5945 | DRM_DEBUG_DRIVER("HDMI audio: enable codec\n"); |
5946 | tmp = I915_READ(aud_cntrl_st2); |
5946 | tmp = I915_READ(aud_cntrl_st2); |
5947 | tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); |
5947 | tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4)); |
5948 | I915_WRITE(aud_cntrl_st2, tmp); |
5948 | I915_WRITE(aud_cntrl_st2, tmp); |
5949 | 5949 | ||
5950 | /* Wait for 1 vertical blank */ |
5950 | /* Wait for 1 vertical blank */ |
5951 | intel_wait_for_vblank(dev, pipe); |
5951 | intel_wait_for_vblank(dev, pipe); |
5952 | 5952 | ||
5953 | /* Set ELD valid state */ |
5953 | /* Set ELD valid state */ |
5954 | tmp = I915_READ(aud_cntrl_st2); |
5954 | tmp = I915_READ(aud_cntrl_st2); |
5955 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); |
5955 | DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp); |
5956 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); |
5956 | tmp |= (AUDIO_ELD_VALID_A << (pipe * 4)); |
5957 | I915_WRITE(aud_cntrl_st2, tmp); |
5957 | I915_WRITE(aud_cntrl_st2, tmp); |
5958 | tmp = I915_READ(aud_cntrl_st2); |
5958 | tmp = I915_READ(aud_cntrl_st2); |
5959 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); |
5959 | DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp); |
5960 | 5960 | ||
5961 | /* Enable HDMI mode */ |
5961 | /* Enable HDMI mode */ |
5962 | tmp = I915_READ(aud_config); |
5962 | tmp = I915_READ(aud_config); |
5963 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); |
5963 | DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp); |
5964 | /* clear N_programing_enable and N_value_index */ |
5964 | /* clear N_programing_enable and N_value_index */ |
5965 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); |
5965 | tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE); |
5966 | I915_WRITE(aud_config, tmp); |
5966 | I915_WRITE(aud_config, tmp); |
5967 | 5967 | ||
5968 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
5968 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
5969 | 5969 | ||
5970 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); |
5970 | eldv = AUDIO_ELD_VALID_A << (pipe * 4); |
5971 | intel_crtc->eld_vld = true; |
5971 | intel_crtc->eld_vld = true; |
5972 | 5972 | ||
5973 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
5973 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
5974 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
5974 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
5975 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
5975 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
5976 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
5976 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
5977 | } else |
5977 | } else |
5978 | I915_WRITE(aud_config, 0); |
5978 | I915_WRITE(aud_config, 0); |
5979 | 5979 | ||
5980 | if (intel_eld_uptodate(connector, |
5980 | if (intel_eld_uptodate(connector, |
5981 | aud_cntrl_st2, eldv, |
5981 | aud_cntrl_st2, eldv, |
5982 | aud_cntl_st, IBX_ELD_ADDRESS, |
5982 | aud_cntl_st, IBX_ELD_ADDRESS, |
5983 | hdmiw_hdmiedid)) |
5983 | hdmiw_hdmiedid)) |
5984 | return; |
5984 | return; |
5985 | 5985 | ||
5986 | i = I915_READ(aud_cntrl_st2); |
5986 | i = I915_READ(aud_cntrl_st2); |
5987 | i &= ~eldv; |
5987 | i &= ~eldv; |
5988 | I915_WRITE(aud_cntrl_st2, i); |
5988 | I915_WRITE(aud_cntrl_st2, i); |
5989 | 5989 | ||
5990 | if (!eld[0]) |
5990 | if (!eld[0]) |
5991 | return; |
5991 | return; |
5992 | 5992 | ||
5993 | i = I915_READ(aud_cntl_st); |
5993 | i = I915_READ(aud_cntl_st); |
5994 | i &= ~IBX_ELD_ADDRESS; |
5994 | i &= ~IBX_ELD_ADDRESS; |
5995 | I915_WRITE(aud_cntl_st, i); |
5995 | I915_WRITE(aud_cntl_st, i); |
5996 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
5996 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
5997 | DRM_DEBUG_DRIVER("port num:%d\n", i); |
5997 | DRM_DEBUG_DRIVER("port num:%d\n", i); |
5998 | 5998 | ||
5999 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
5999 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
6000 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6000 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6001 | for (i = 0; i < len; i++) |
6001 | for (i = 0; i < len; i++) |
6002 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6002 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6003 | 6003 | ||
6004 | i = I915_READ(aud_cntrl_st2); |
6004 | i = I915_READ(aud_cntrl_st2); |
6005 | i |= eldv; |
6005 | i |= eldv; |
6006 | I915_WRITE(aud_cntrl_st2, i); |
6006 | I915_WRITE(aud_cntrl_st2, i); |
6007 | 6007 | ||
6008 | } |
6008 | } |
6009 | 6009 | ||
6010 | static void ironlake_write_eld(struct drm_connector *connector, |
6010 | static void ironlake_write_eld(struct drm_connector *connector, |
6011 | struct drm_crtc *crtc) |
6011 | struct drm_crtc *crtc) |
6012 | { |
6012 | { |
6013 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6013 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6014 | uint8_t *eld = connector->eld; |
6014 | uint8_t *eld = connector->eld; |
6015 | uint32_t eldv; |
6015 | uint32_t eldv; |
6016 | uint32_t i; |
6016 | uint32_t i; |
6017 | int len; |
6017 | int len; |
6018 | int hdmiw_hdmiedid; |
6018 | int hdmiw_hdmiedid; |
6019 | int aud_config; |
6019 | int aud_config; |
6020 | int aud_cntl_st; |
6020 | int aud_cntl_st; |
6021 | int aud_cntrl_st2; |
6021 | int aud_cntrl_st2; |
6022 | int pipe = to_intel_crtc(crtc)->pipe; |
6022 | int pipe = to_intel_crtc(crtc)->pipe; |
6023 | 6023 | ||
6024 | if (HAS_PCH_IBX(connector->dev)) { |
6024 | if (HAS_PCH_IBX(connector->dev)) { |
6025 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); |
6025 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe); |
6026 | aud_config = IBX_AUD_CFG(pipe); |
6026 | aud_config = IBX_AUD_CFG(pipe); |
6027 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); |
6027 | aud_cntl_st = IBX_AUD_CNTL_ST(pipe); |
6028 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6028 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6029 | } else { |
6029 | } else { |
6030 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); |
6030 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe); |
6031 | aud_config = CPT_AUD_CFG(pipe); |
6031 | aud_config = CPT_AUD_CFG(pipe); |
6032 | aud_cntl_st = CPT_AUD_CNTL_ST(pipe); |
6032 | aud_cntl_st = CPT_AUD_CNTL_ST(pipe); |
6033 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6033 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6034 | } |
6034 | } |
6035 | 6035 | ||
6036 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
6036 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
6037 | 6037 | ||
6038 | i = I915_READ(aud_cntl_st); |
6038 | i = I915_READ(aud_cntl_st); |
6039 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
6039 | i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */ |
6040 | if (!i) { |
6040 | if (!i) { |
6041 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
6041 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
6042 | /* operate blindly on all ports */ |
6042 | /* operate blindly on all ports */ |
6043 | eldv = IBX_ELD_VALIDB; |
6043 | eldv = IBX_ELD_VALIDB; |
6044 | eldv |= IBX_ELD_VALIDB << 4; |
6044 | eldv |= IBX_ELD_VALIDB << 4; |
6045 | eldv |= IBX_ELD_VALIDB << 8; |
6045 | eldv |= IBX_ELD_VALIDB << 8; |
6046 | } else { |
6046 | } else { |
6047 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); |
6047 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); |
6048 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); |
6048 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); |
6049 | } |
6049 | } |
6050 | 6050 | ||
6051 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6051 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6052 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6052 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6053 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6053 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6054 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6054 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6055 | } else |
6055 | } else |
6056 | I915_WRITE(aud_config, 0); |
6056 | I915_WRITE(aud_config, 0); |
6057 | 6057 | ||
6058 | if (intel_eld_uptodate(connector, |
6058 | if (intel_eld_uptodate(connector, |
6059 | aud_cntrl_st2, eldv, |
6059 | aud_cntrl_st2, eldv, |
6060 | aud_cntl_st, IBX_ELD_ADDRESS, |
6060 | aud_cntl_st, IBX_ELD_ADDRESS, |
6061 | hdmiw_hdmiedid)) |
6061 | hdmiw_hdmiedid)) |
6062 | return; |
6062 | return; |
6063 | 6063 | ||
6064 | i = I915_READ(aud_cntrl_st2); |
6064 | i = I915_READ(aud_cntrl_st2); |
6065 | i &= ~eldv; |
6065 | i &= ~eldv; |
6066 | I915_WRITE(aud_cntrl_st2, i); |
6066 | I915_WRITE(aud_cntrl_st2, i); |
6067 | 6067 | ||
6068 | if (!eld[0]) |
6068 | if (!eld[0]) |
6069 | return; |
6069 | return; |
6070 | 6070 | ||
6071 | i = I915_READ(aud_cntl_st); |
6071 | i = I915_READ(aud_cntl_st); |
6072 | i &= ~IBX_ELD_ADDRESS; |
6072 | i &= ~IBX_ELD_ADDRESS; |
6073 | I915_WRITE(aud_cntl_st, i); |
6073 | I915_WRITE(aud_cntl_st, i); |
6074 | 6074 | ||
6075 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
6075 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
6076 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6076 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6077 | for (i = 0; i < len; i++) |
6077 | for (i = 0; i < len; i++) |
6078 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6078 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6079 | 6079 | ||
6080 | i = I915_READ(aud_cntrl_st2); |
6080 | i = I915_READ(aud_cntrl_st2); |
6081 | i |= eldv; |
6081 | i |= eldv; |
6082 | I915_WRITE(aud_cntrl_st2, i); |
6082 | I915_WRITE(aud_cntrl_st2, i); |
6083 | } |
6083 | } |
6084 | 6084 | ||
6085 | void intel_write_eld(struct drm_encoder *encoder, |
6085 | void intel_write_eld(struct drm_encoder *encoder, |
6086 | struct drm_display_mode *mode) |
6086 | struct drm_display_mode *mode) |
6087 | { |
6087 | { |
6088 | struct drm_crtc *crtc = encoder->crtc; |
6088 | struct drm_crtc *crtc = encoder->crtc; |
6089 | struct drm_connector *connector; |
6089 | struct drm_connector *connector; |
6090 | struct drm_device *dev = encoder->dev; |
6090 | struct drm_device *dev = encoder->dev; |
6091 | struct drm_i915_private *dev_priv = dev->dev_private; |
6091 | struct drm_i915_private *dev_priv = dev->dev_private; |
6092 | 6092 | ||
6093 | connector = drm_select_eld(encoder, mode); |
6093 | connector = drm_select_eld(encoder, mode); |
6094 | if (!connector) |
6094 | if (!connector) |
6095 | return; |
6095 | return; |
6096 | 6096 | ||
6097 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6097 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6098 | connector->base.id, |
6098 | connector->base.id, |
6099 | drm_get_connector_name(connector), |
6099 | drm_get_connector_name(connector), |
6100 | connector->encoder->base.id, |
6100 | connector->encoder->base.id, |
6101 | drm_get_encoder_name(connector->encoder)); |
6101 | drm_get_encoder_name(connector->encoder)); |
6102 | 6102 | ||
6103 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
6103 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
6104 | 6104 | ||
6105 | if (dev_priv->display.write_eld) |
6105 | if (dev_priv->display.write_eld) |
6106 | dev_priv->display.write_eld(connector, crtc); |
6106 | dev_priv->display.write_eld(connector, crtc); |
6107 | } |
6107 | } |
6108 | 6108 | ||
6109 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
6109 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
6110 | void intel_crtc_load_lut(struct drm_crtc *crtc) |
6110 | void intel_crtc_load_lut(struct drm_crtc *crtc) |
6111 | { |
6111 | { |
6112 | struct drm_device *dev = crtc->dev; |
6112 | struct drm_device *dev = crtc->dev; |
6113 | struct drm_i915_private *dev_priv = dev->dev_private; |
6113 | struct drm_i915_private *dev_priv = dev->dev_private; |
6114 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6114 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6115 | int palreg = PALETTE(intel_crtc->pipe); |
6115 | int palreg = PALETTE(intel_crtc->pipe); |
6116 | int i; |
6116 | int i; |
6117 | 6117 | ||
6118 | /* The clocks have to be on to load the palette. */ |
6118 | /* The clocks have to be on to load the palette. */ |
6119 | if (!crtc->enabled || !intel_crtc->active) |
6119 | if (!crtc->enabled || !intel_crtc->active) |
6120 | return; |
6120 | return; |
6121 | 6121 | ||
6122 | /* use legacy palette for Ironlake */ |
6122 | /* use legacy palette for Ironlake */ |
6123 | if (HAS_PCH_SPLIT(dev)) |
6123 | if (HAS_PCH_SPLIT(dev)) |
6124 | palreg = LGC_PALETTE(intel_crtc->pipe); |
6124 | palreg = LGC_PALETTE(intel_crtc->pipe); |
6125 | 6125 | ||
6126 | for (i = 0; i < 256; i++) { |
6126 | for (i = 0; i < 256; i++) { |
6127 | I915_WRITE(palreg + 4 * i, |
6127 | I915_WRITE(palreg + 4 * i, |
6128 | (intel_crtc->lut_r[i] << 16) | |
6128 | (intel_crtc->lut_r[i] << 16) | |
6129 | (intel_crtc->lut_g[i] << 8) | |
6129 | (intel_crtc->lut_g[i] << 8) | |
6130 | intel_crtc->lut_b[i]); |
6130 | intel_crtc->lut_b[i]); |
6131 | } |
6131 | } |
6132 | } |
6132 | } |
6133 | 6133 | ||
6134 | #if 0 |
6134 | #if 0 |
6135 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) |
6135 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) |
6136 | { |
6136 | { |
6137 | struct drm_device *dev = crtc->dev; |
6137 | struct drm_device *dev = crtc->dev; |
6138 | struct drm_i915_private *dev_priv = dev->dev_private; |
6138 | struct drm_i915_private *dev_priv = dev->dev_private; |
6139 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6139 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6140 | bool visible = base != 0; |
6140 | bool visible = base != 0; |
6141 | u32 cntl; |
6141 | u32 cntl; |
6142 | 6142 | ||
6143 | if (intel_crtc->cursor_visible == visible) |
6143 | if (intel_crtc->cursor_visible == visible) |
6144 | return; |
6144 | return; |
6145 | 6145 | ||
6146 | cntl = I915_READ(_CURACNTR); |
6146 | cntl = I915_READ(_CURACNTR); |
6147 | if (visible) { |
6147 | if (visible) { |
6148 | /* On these chipsets we can only modify the base whilst |
6148 | /* On these chipsets we can only modify the base whilst |
6149 | * the cursor is disabled. |
6149 | * the cursor is disabled. |
6150 | */ |
6150 | */ |
6151 | I915_WRITE(_CURABASE, base); |
6151 | I915_WRITE(_CURABASE, base); |
6152 | 6152 | ||
6153 | cntl &= ~(CURSOR_FORMAT_MASK); |
6153 | cntl &= ~(CURSOR_FORMAT_MASK); |
6154 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
6154 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ |
6155 | cntl |= CURSOR_ENABLE | |
6155 | cntl |= CURSOR_ENABLE | |
6156 | CURSOR_GAMMA_ENABLE | |
6156 | CURSOR_GAMMA_ENABLE | |
6157 | CURSOR_FORMAT_ARGB; |
6157 | CURSOR_FORMAT_ARGB; |
6158 | } else |
6158 | } else |
6159 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
6159 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); |
6160 | I915_WRITE(_CURACNTR, cntl); |
6160 | I915_WRITE(_CURACNTR, cntl); |
6161 | 6161 | ||
6162 | intel_crtc->cursor_visible = visible; |
6162 | intel_crtc->cursor_visible = visible; |
6163 | } |
6163 | } |
6164 | 6164 | ||
6165 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) |
6165 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) |
6166 | { |
6166 | { |
6167 | struct drm_device *dev = crtc->dev; |
6167 | struct drm_device *dev = crtc->dev; |
6168 | struct drm_i915_private *dev_priv = dev->dev_private; |
6168 | struct drm_i915_private *dev_priv = dev->dev_private; |
6169 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6169 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6170 | int pipe = intel_crtc->pipe; |
6170 | int pipe = intel_crtc->pipe; |
6171 | bool visible = base != 0; |
6171 | bool visible = base != 0; |
6172 | 6172 | ||
6173 | if (intel_crtc->cursor_visible != visible) { |
6173 | if (intel_crtc->cursor_visible != visible) { |
6174 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
6174 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
6175 | if (base) { |
6175 | if (base) { |
6176 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
6176 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); |
6177 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
6177 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
6178 | cntl |= pipe << 28; /* Connect to correct pipe */ |
6178 | cntl |= pipe << 28; /* Connect to correct pipe */ |
6179 | } else { |
6179 | } else { |
6180 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
6180 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
6181 | cntl |= CURSOR_MODE_DISABLE; |
6181 | cntl |= CURSOR_MODE_DISABLE; |
6182 | } |
6182 | } |
6183 | I915_WRITE(CURCNTR(pipe), cntl); |
6183 | I915_WRITE(CURCNTR(pipe), cntl); |
6184 | 6184 | ||
6185 | intel_crtc->cursor_visible = visible; |
6185 | intel_crtc->cursor_visible = visible; |
6186 | } |
6186 | } |
6187 | /* and commit changes on next vblank */ |
6187 | /* and commit changes on next vblank */ |
6188 | I915_WRITE(CURBASE(pipe), base); |
6188 | I915_WRITE(CURBASE(pipe), base); |
6189 | } |
6189 | } |
6190 | 6190 | ||
6191 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) |
6191 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) |
6192 | { |
6192 | { |
6193 | struct drm_device *dev = crtc->dev; |
6193 | struct drm_device *dev = crtc->dev; |
6194 | struct drm_i915_private *dev_priv = dev->dev_private; |
6194 | struct drm_i915_private *dev_priv = dev->dev_private; |
6195 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6195 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6196 | int pipe = intel_crtc->pipe; |
6196 | int pipe = intel_crtc->pipe; |
6197 | bool visible = base != 0; |
6197 | bool visible = base != 0; |
6198 | 6198 | ||
6199 | if (intel_crtc->cursor_visible != visible) { |
6199 | if (intel_crtc->cursor_visible != visible) { |
6200 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); |
6200 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); |
6201 | if (base) { |
6201 | if (base) { |
6202 | cntl &= ~CURSOR_MODE; |
6202 | cntl &= ~CURSOR_MODE; |
6203 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
6203 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; |
6204 | } else { |
6204 | } else { |
6205 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
6205 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
6206 | cntl |= CURSOR_MODE_DISABLE; |
6206 | cntl |= CURSOR_MODE_DISABLE; |
6207 | } |
6207 | } |
6208 | if (IS_HASWELL(dev)) |
6208 | if (IS_HASWELL(dev)) |
6209 | cntl |= CURSOR_PIPE_CSC_ENABLE; |
6209 | cntl |= CURSOR_PIPE_CSC_ENABLE; |
6210 | I915_WRITE(CURCNTR_IVB(pipe), cntl); |
6210 | I915_WRITE(CURCNTR_IVB(pipe), cntl); |
6211 | 6211 | ||
6212 | intel_crtc->cursor_visible = visible; |
6212 | intel_crtc->cursor_visible = visible; |
6213 | } |
6213 | } |
6214 | /* and commit changes on next vblank */ |
6214 | /* and commit changes on next vblank */ |
6215 | I915_WRITE(CURBASE_IVB(pipe), base); |
6215 | I915_WRITE(CURBASE_IVB(pipe), base); |
6216 | } |
6216 | } |
6217 | 6217 | ||
6218 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
6218 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
6219 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
6219 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
6220 | bool on) |
6220 | bool on) |
6221 | { |
6221 | { |
6222 | struct drm_device *dev = crtc->dev; |
6222 | struct drm_device *dev = crtc->dev; |
6223 | struct drm_i915_private *dev_priv = dev->dev_private; |
6223 | struct drm_i915_private *dev_priv = dev->dev_private; |
6224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6225 | int pipe = intel_crtc->pipe; |
6225 | int pipe = intel_crtc->pipe; |
6226 | int x = intel_crtc->cursor_x; |
6226 | int x = intel_crtc->cursor_x; |
6227 | int y = intel_crtc->cursor_y; |
6227 | int y = intel_crtc->cursor_y; |
6228 | u32 base, pos; |
6228 | u32 base, pos; |
6229 | bool visible; |
6229 | bool visible; |
6230 | 6230 | ||
6231 | pos = 0; |
6231 | pos = 0; |
6232 | 6232 | ||
6233 | if (on && crtc->enabled && crtc->fb) { |
6233 | if (on && crtc->enabled && crtc->fb) { |
6234 | base = intel_crtc->cursor_addr; |
6234 | base = intel_crtc->cursor_addr; |
6235 | if (x > (int) crtc->fb->width) |
6235 | if (x > (int) crtc->fb->width) |
6236 | base = 0; |
6236 | base = 0; |
6237 | 6237 | ||
6238 | if (y > (int) crtc->fb->height) |
6238 | if (y > (int) crtc->fb->height) |
6239 | base = 0; |
6239 | base = 0; |
6240 | } else |
6240 | } else |
6241 | base = 0; |
6241 | base = 0; |
6242 | 6242 | ||
6243 | if (x < 0) { |
6243 | if (x < 0) { |
6244 | if (x + intel_crtc->cursor_width < 0) |
6244 | if (x + intel_crtc->cursor_width < 0) |
6245 | base = 0; |
6245 | base = 0; |
6246 | 6246 | ||
6247 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
6247 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
6248 | x = -x; |
6248 | x = -x; |
6249 | } |
6249 | } |
6250 | pos |= x << CURSOR_X_SHIFT; |
6250 | pos |= x << CURSOR_X_SHIFT; |
6251 | 6251 | ||
6252 | if (y < 0) { |
6252 | if (y < 0) { |
6253 | if (y + intel_crtc->cursor_height < 0) |
6253 | if (y + intel_crtc->cursor_height < 0) |
6254 | base = 0; |
6254 | base = 0; |
6255 | 6255 | ||
6256 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
6256 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
6257 | y = -y; |
6257 | y = -y; |
6258 | } |
6258 | } |
6259 | pos |= y << CURSOR_Y_SHIFT; |
6259 | pos |= y << CURSOR_Y_SHIFT; |
6260 | 6260 | ||
6261 | visible = base != 0; |
6261 | visible = base != 0; |
6262 | if (!visible && !intel_crtc->cursor_visible) |
6262 | if (!visible && !intel_crtc->cursor_visible) |
6263 | return; |
6263 | return; |
6264 | 6264 | ||
6265 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
6265 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
6266 | I915_WRITE(CURPOS_IVB(pipe), pos); |
6266 | I915_WRITE(CURPOS_IVB(pipe), pos); |
6267 | ivb_update_cursor(crtc, base); |
6267 | ivb_update_cursor(crtc, base); |
6268 | } else { |
6268 | } else { |
6269 | I915_WRITE(CURPOS(pipe), pos); |
6269 | I915_WRITE(CURPOS(pipe), pos); |
6270 | if (IS_845G(dev) || IS_I865G(dev)) |
6270 | if (IS_845G(dev) || IS_I865G(dev)) |
6271 | i845_update_cursor(crtc, base); |
6271 | i845_update_cursor(crtc, base); |
6272 | else |
6272 | else |
6273 | i9xx_update_cursor(crtc, base); |
6273 | i9xx_update_cursor(crtc, base); |
6274 | } |
6274 | } |
6275 | } |
6275 | } |
6276 | 6276 | ||
6277 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
6277 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
6278 | struct drm_file *file, |
6278 | struct drm_file *file, |
6279 | uint32_t handle, |
6279 | uint32_t handle, |
6280 | uint32_t width, uint32_t height) |
6280 | uint32_t width, uint32_t height) |
6281 | { |
6281 | { |
6282 | struct drm_device *dev = crtc->dev; |
6282 | struct drm_device *dev = crtc->dev; |
6283 | struct drm_i915_private *dev_priv = dev->dev_private; |
6283 | struct drm_i915_private *dev_priv = dev->dev_private; |
6284 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6284 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6285 | struct drm_i915_gem_object *obj; |
6285 | struct drm_i915_gem_object *obj; |
6286 | uint32_t addr; |
6286 | uint32_t addr; |
6287 | int ret; |
6287 | int ret; |
6288 | 6288 | ||
6289 | /* if we want to turn off the cursor ignore width and height */ |
6289 | /* if we want to turn off the cursor ignore width and height */ |
6290 | if (!handle) { |
6290 | if (!handle) { |
6291 | DRM_DEBUG_KMS("cursor off\n"); |
6291 | DRM_DEBUG_KMS("cursor off\n"); |
6292 | addr = 0; |
6292 | addr = 0; |
6293 | obj = NULL; |
6293 | obj = NULL; |
6294 | mutex_lock(&dev->struct_mutex); |
6294 | mutex_lock(&dev->struct_mutex); |
6295 | goto finish; |
6295 | goto finish; |
6296 | } |
6296 | } |
6297 | 6297 | ||
6298 | /* Currently we only support 64x64 cursors */ |
6298 | /* Currently we only support 64x64 cursors */ |
6299 | if (width != 64 || height != 64) { |
6299 | if (width != 64 || height != 64) { |
6300 | DRM_ERROR("we currently only support 64x64 cursors\n"); |
6300 | DRM_ERROR("we currently only support 64x64 cursors\n"); |
6301 | return -EINVAL; |
6301 | return -EINVAL; |
6302 | } |
6302 | } |
6303 | 6303 | ||
6304 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
6304 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
6305 | if (&obj->base == NULL) |
6305 | if (&obj->base == NULL) |
6306 | return -ENOENT; |
6306 | return -ENOENT; |
6307 | 6307 | ||
6308 | if (obj->base.size < width * height * 4) { |
6308 | if (obj->base.size < width * height * 4) { |
6309 | DRM_ERROR("buffer is to small\n"); |
6309 | DRM_ERROR("buffer is to small\n"); |
6310 | ret = -ENOMEM; |
6310 | ret = -ENOMEM; |
6311 | goto fail; |
6311 | goto fail; |
6312 | } |
6312 | } |
6313 | 6313 | ||
6314 | /* we only need to pin inside GTT if cursor is non-phy */ |
6314 | /* we only need to pin inside GTT if cursor is non-phy */ |
6315 | mutex_lock(&dev->struct_mutex); |
6315 | mutex_lock(&dev->struct_mutex); |
6316 | if (!dev_priv->info->cursor_needs_physical) { |
6316 | if (!dev_priv->info->cursor_needs_physical) { |
6317 | if (obj->tiling_mode) { |
6317 | if (obj->tiling_mode) { |
6318 | DRM_ERROR("cursor cannot be tiled\n"); |
6318 | DRM_ERROR("cursor cannot be tiled\n"); |
6319 | ret = -EINVAL; |
6319 | ret = -EINVAL; |
6320 | goto fail_locked; |
6320 | goto fail_locked; |
6321 | } |
6321 | } |
6322 | 6322 | ||
6323 | ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); |
6323 | ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); |
6324 | if (ret) { |
6324 | if (ret) { |
6325 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
6325 | DRM_ERROR("failed to move cursor bo into the GTT\n"); |
6326 | goto fail_locked; |
6326 | goto fail_locked; |
6327 | } |
6327 | } |
6328 | 6328 | ||
6329 | ret = i915_gem_object_put_fence(obj); |
6329 | ret = i915_gem_object_put_fence(obj); |
6330 | if (ret) { |
6330 | if (ret) { |
6331 | DRM_ERROR("failed to release fence for cursor"); |
6331 | DRM_ERROR("failed to release fence for cursor"); |
6332 | goto fail_unpin; |
6332 | goto fail_unpin; |
6333 | } |
6333 | } |
6334 | 6334 | ||
6335 | addr = obj->gtt_offset; |
6335 | addr = obj->gtt_offset; |
6336 | } else { |
6336 | } else { |
6337 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
6337 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
6338 | ret = i915_gem_attach_phys_object(dev, obj, |
6338 | ret = i915_gem_attach_phys_object(dev, obj, |
6339 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
6339 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
6340 | align); |
6340 | align); |
6341 | if (ret) { |
6341 | if (ret) { |
6342 | DRM_ERROR("failed to attach phys object\n"); |
6342 | DRM_ERROR("failed to attach phys object\n"); |
6343 | goto fail_locked; |
6343 | goto fail_locked; |
6344 | } |
6344 | } |
6345 | addr = obj->phys_obj->handle->busaddr; |
6345 | addr = obj->phys_obj->handle->busaddr; |
6346 | } |
6346 | } |
6347 | 6347 | ||
6348 | if (IS_GEN2(dev)) |
6348 | if (IS_GEN2(dev)) |
6349 | I915_WRITE(CURSIZE, (height << 12) | width); |
6349 | I915_WRITE(CURSIZE, (height << 12) | width); |
6350 | 6350 | ||
6351 | finish: |
6351 | finish: |
6352 | if (intel_crtc->cursor_bo) { |
6352 | if (intel_crtc->cursor_bo) { |
6353 | if (dev_priv->info->cursor_needs_physical) { |
6353 | if (dev_priv->info->cursor_needs_physical) { |
6354 | if (intel_crtc->cursor_bo != obj) |
6354 | if (intel_crtc->cursor_bo != obj) |
6355 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
6355 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
6356 | } else |
6356 | } else |
6357 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
6357 | i915_gem_object_unpin(intel_crtc->cursor_bo); |
6358 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
6358 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
6359 | } |
6359 | } |
6360 | 6360 | ||
6361 | mutex_unlock(&dev->struct_mutex); |
6361 | mutex_unlock(&dev->struct_mutex); |
6362 | 6362 | ||
6363 | intel_crtc->cursor_addr = addr; |
6363 | intel_crtc->cursor_addr = addr; |
6364 | intel_crtc->cursor_bo = obj; |
6364 | intel_crtc->cursor_bo = obj; |
6365 | intel_crtc->cursor_width = width; |
6365 | intel_crtc->cursor_width = width; |
6366 | intel_crtc->cursor_height = height; |
6366 | intel_crtc->cursor_height = height; |
6367 | 6367 | ||
6368 | // intel_crtc_update_cursor(crtc, true); |
6368 | // intel_crtc_update_cursor(crtc, true); |
6369 | 6369 | ||
6370 | return 0; |
6370 | return 0; |
6371 | fail_unpin: |
6371 | fail_unpin: |
6372 | i915_gem_object_unpin(obj); |
6372 | i915_gem_object_unpin(obj); |
6373 | fail_locked: |
6373 | fail_locked: |
6374 | mutex_unlock(&dev->struct_mutex); |
6374 | mutex_unlock(&dev->struct_mutex); |
6375 | fail: |
6375 | fail: |
6376 | drm_gem_object_unreference_unlocked(&obj->base); |
6376 | drm_gem_object_unreference_unlocked(&obj->base); |
6377 | return ret; |
6377 | return ret; |
6378 | } |
6378 | } |
6379 | 6379 | ||
6380 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
6380 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) |
6381 | { |
6381 | { |
6382 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6382 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6383 | 6383 | ||
6384 | intel_crtc->cursor_x = x; |
6384 | intel_crtc->cursor_x = x; |
6385 | intel_crtc->cursor_y = y; |
6385 | intel_crtc->cursor_y = y; |
6386 | 6386 | ||
6387 | // intel_crtc_update_cursor(crtc, true); |
6387 | // intel_crtc_update_cursor(crtc, true); |
6388 | 6388 | ||
6389 | return 0; |
6389 | return 0; |
6390 | } |
6390 | } |
6391 | #endif |
6391 | #endif |
6392 | 6392 | ||
6393 | /** Sets the color ramps on behalf of RandR */ |
6393 | /** Sets the color ramps on behalf of RandR */ |
6394 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
6394 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
6395 | u16 blue, int regno) |
6395 | u16 blue, int regno) |
6396 | { |
6396 | { |
6397 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6397 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6398 | 6398 | ||
6399 | intel_crtc->lut_r[regno] = red >> 8; |
6399 | intel_crtc->lut_r[regno] = red >> 8; |
6400 | intel_crtc->lut_g[regno] = green >> 8; |
6400 | intel_crtc->lut_g[regno] = green >> 8; |
6401 | intel_crtc->lut_b[regno] = blue >> 8; |
6401 | intel_crtc->lut_b[regno] = blue >> 8; |
6402 | } |
6402 | } |
6403 | 6403 | ||
6404 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
6404 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
6405 | u16 *blue, int regno) |
6405 | u16 *blue, int regno) |
6406 | { |
6406 | { |
6407 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6407 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6408 | 6408 | ||
6409 | *red = intel_crtc->lut_r[regno] << 8; |
6409 | *red = intel_crtc->lut_r[regno] << 8; |
6410 | *green = intel_crtc->lut_g[regno] << 8; |
6410 | *green = intel_crtc->lut_g[regno] << 8; |
6411 | *blue = intel_crtc->lut_b[regno] << 8; |
6411 | *blue = intel_crtc->lut_b[regno] << 8; |
6412 | } |
6412 | } |
6413 | 6413 | ||
6414 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
6414 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
6415 | u16 *blue, uint32_t start, uint32_t size) |
6415 | u16 *blue, uint32_t start, uint32_t size) |
6416 | { |
6416 | { |
6417 | int end = (start + size > 256) ? 256 : start + size, i; |
6417 | int end = (start + size > 256) ? 256 : start + size, i; |
6418 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6418 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6419 | 6419 | ||
6420 | for (i = start; i < end; i++) { |
6420 | for (i = start; i < end; i++) { |
6421 | intel_crtc->lut_r[i] = red[i] >> 8; |
6421 | intel_crtc->lut_r[i] = red[i] >> 8; |
6422 | intel_crtc->lut_g[i] = green[i] >> 8; |
6422 | intel_crtc->lut_g[i] = green[i] >> 8; |
6423 | intel_crtc->lut_b[i] = blue[i] >> 8; |
6423 | intel_crtc->lut_b[i] = blue[i] >> 8; |
6424 | } |
6424 | } |
6425 | 6425 | ||
6426 | intel_crtc_load_lut(crtc); |
6426 | intel_crtc_load_lut(crtc); |
6427 | } |
6427 | } |
6428 | 6428 | ||
6429 | /** |
6429 | /** |
6430 | * Get a pipe with a simple mode set on it for doing load-based monitor |
6430 | * Get a pipe with a simple mode set on it for doing load-based monitor |
6431 | * detection. |
6431 | * detection. |
6432 | * |
6432 | * |
6433 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
6433 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
6434 | * its requirements. The pipe will be connected to no other encoders. |
6434 | * its requirements. The pipe will be connected to no other encoders. |
6435 | * |
6435 | * |
6436 | * Currently this code will only succeed if there is a pipe with no encoders |
6436 | * Currently this code will only succeed if there is a pipe with no encoders |
6437 | * configured for it. In the future, it could choose to temporarily disable |
6437 | * configured for it. In the future, it could choose to temporarily disable |
6438 | * some outputs to free up a pipe for its use. |
6438 | * some outputs to free up a pipe for its use. |
6439 | * |
6439 | * |
6440 | * \return crtc, or NULL if no pipes are available. |
6440 | * \return crtc, or NULL if no pipes are available. |
6441 | */ |
6441 | */ |
6442 | 6442 | ||
6443 | /* VESA 640x480x72Hz mode to set on the pipe */ |
6443 | /* VESA 640x480x72Hz mode to set on the pipe */ |
6444 | static struct drm_display_mode load_detect_mode = { |
6444 | static struct drm_display_mode load_detect_mode = { |
6445 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, |
6445 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, |
6446 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
6446 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
6447 | }; |
6447 | }; |
6448 | 6448 | ||
6449 | static struct drm_framebuffer * |
6449 | static struct drm_framebuffer * |
6450 | intel_framebuffer_create(struct drm_device *dev, |
6450 | intel_framebuffer_create(struct drm_device *dev, |
6451 | struct drm_mode_fb_cmd2 *mode_cmd, |
6451 | struct drm_mode_fb_cmd2 *mode_cmd, |
6452 | struct drm_i915_gem_object *obj) |
6452 | struct drm_i915_gem_object *obj) |
6453 | { |
6453 | { |
6454 | struct intel_framebuffer *intel_fb; |
6454 | struct intel_framebuffer *intel_fb; |
6455 | int ret; |
6455 | int ret; |
6456 | 6456 | ||
6457 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
6457 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); |
6458 | if (!intel_fb) { |
6458 | if (!intel_fb) { |
6459 | drm_gem_object_unreference_unlocked(&obj->base); |
6459 | drm_gem_object_unreference_unlocked(&obj->base); |
6460 | return ERR_PTR(-ENOMEM); |
6460 | return ERR_PTR(-ENOMEM); |
6461 | } |
6461 | } |
6462 | 6462 | ||
6463 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
6463 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
6464 | if (ret) { |
6464 | if (ret) { |
6465 | drm_gem_object_unreference_unlocked(&obj->base); |
6465 | drm_gem_object_unreference_unlocked(&obj->base); |
6466 | kfree(intel_fb); |
6466 | kfree(intel_fb); |
6467 | return ERR_PTR(ret); |
6467 | return ERR_PTR(ret); |
6468 | } |
6468 | } |
6469 | 6469 | ||
6470 | return &intel_fb->base; |
6470 | return &intel_fb->base; |
6471 | } |
6471 | } |
6472 | 6472 | ||
6473 | static u32 |
6473 | static u32 |
6474 | intel_framebuffer_pitch_for_width(int width, int bpp) |
6474 | intel_framebuffer_pitch_for_width(int width, int bpp) |
6475 | { |
6475 | { |
6476 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); |
6476 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); |
6477 | return ALIGN(pitch, 64); |
6477 | return ALIGN(pitch, 64); |
6478 | } |
6478 | } |
6479 | 6479 | ||
6480 | static u32 |
6480 | static u32 |
6481 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) |
6481 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) |
6482 | { |
6482 | { |
6483 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); |
6483 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); |
6484 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); |
6484 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); |
6485 | } |
6485 | } |
6486 | 6486 | ||
6487 | static struct drm_framebuffer * |
6487 | static struct drm_framebuffer * |
6488 | intel_framebuffer_create_for_mode(struct drm_device *dev, |
6488 | intel_framebuffer_create_for_mode(struct drm_device *dev, |
6489 | struct drm_display_mode *mode, |
6489 | struct drm_display_mode *mode, |
6490 | int depth, int bpp) |
6490 | int depth, int bpp) |
6491 | { |
6491 | { |
6492 | struct drm_i915_gem_object *obj; |
6492 | struct drm_i915_gem_object *obj; |
6493 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
6493 | struct drm_mode_fb_cmd2 mode_cmd = { 0 }; |
6494 | 6494 | ||
6495 | // obj = i915_gem_alloc_object(dev, |
6495 | // obj = i915_gem_alloc_object(dev, |
6496 | // intel_framebuffer_size_for_mode(mode, bpp)); |
6496 | // intel_framebuffer_size_for_mode(mode, bpp)); |
6497 | // if (obj == NULL) |
6497 | // if (obj == NULL) |
6498 | return ERR_PTR(-ENOMEM); |
6498 | return ERR_PTR(-ENOMEM); |
6499 | 6499 | ||
6500 | // mode_cmd.width = mode->hdisplay; |
6500 | // mode_cmd.width = mode->hdisplay; |
6501 | // mode_cmd.height = mode->vdisplay; |
6501 | // mode_cmd.height = mode->vdisplay; |
6502 | // mode_cmd.depth = depth; |
6502 | // mode_cmd.depth = depth; |
6503 | // mode_cmd.bpp = bpp; |
6503 | // mode_cmd.bpp = bpp; |
6504 | // mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); |
6504 | // mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); |
6505 | 6505 | ||
6506 | // return intel_framebuffer_create(dev, &mode_cmd, obj); |
6506 | // return intel_framebuffer_create(dev, &mode_cmd, obj); |
6507 | } |
6507 | } |
6508 | 6508 | ||
6509 | static struct drm_framebuffer * |
6509 | static struct drm_framebuffer * |
6510 | mode_fits_in_fbdev(struct drm_device *dev, |
6510 | mode_fits_in_fbdev(struct drm_device *dev, |
6511 | struct drm_display_mode *mode) |
6511 | struct drm_display_mode *mode) |
6512 | { |
6512 | { |
6513 | struct drm_i915_private *dev_priv = dev->dev_private; |
6513 | struct drm_i915_private *dev_priv = dev->dev_private; |
6514 | struct drm_i915_gem_object *obj; |
6514 | struct drm_i915_gem_object *obj; |
6515 | struct drm_framebuffer *fb; |
6515 | struct drm_framebuffer *fb; |
6516 | 6516 | ||
6517 | // if (dev_priv->fbdev == NULL) |
6517 | // if (dev_priv->fbdev == NULL) |
6518 | // return NULL; |
6518 | // return NULL; |
6519 | 6519 | ||
6520 | // obj = dev_priv->fbdev->ifb.obj; |
6520 | // obj = dev_priv->fbdev->ifb.obj; |
6521 | // if (obj == NULL) |
6521 | // if (obj == NULL) |
6522 | return NULL; |
6522 | return NULL; |
6523 | 6523 | ||
6524 | // if (obj->base.size < mode->vdisplay * fb->pitch) |
6524 | // if (obj->base.size < mode->vdisplay * fb->pitch) |
6525 | if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, |
6525 | if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, |
6526 | fb->bits_per_pixel)) |
6526 | fb->bits_per_pixel)) |
6527 | // return NULL; |
6527 | // return NULL; |
6528 | 6528 | ||
6529 | if (obj->base.size < mode->vdisplay * fb->pitches[0]) |
6529 | if (obj->base.size < mode->vdisplay * fb->pitches[0]) |
6530 | return NULL; |
6530 | return NULL; |
6531 | 6531 | ||
6532 | // return fb; |
6532 | // return fb; |
6533 | } |
6533 | } |
6534 | 6534 | ||
6535 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
6535 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
6536 | struct drm_display_mode *mode, |
6536 | struct drm_display_mode *mode, |
6537 | struct intel_load_detect_pipe *old) |
6537 | struct intel_load_detect_pipe *old) |
6538 | { |
6538 | { |
6539 | struct intel_crtc *intel_crtc; |
6539 | struct intel_crtc *intel_crtc; |
6540 | struct intel_encoder *intel_encoder = |
6540 | struct intel_encoder *intel_encoder = |
6541 | intel_attached_encoder(connector); |
6541 | intel_attached_encoder(connector); |
6542 | struct drm_crtc *possible_crtc; |
6542 | struct drm_crtc *possible_crtc; |
6543 | struct drm_encoder *encoder = &intel_encoder->base; |
6543 | struct drm_encoder *encoder = &intel_encoder->base; |
6544 | struct drm_crtc *crtc = NULL; |
6544 | struct drm_crtc *crtc = NULL; |
6545 | struct drm_device *dev = encoder->dev; |
6545 | struct drm_device *dev = encoder->dev; |
6546 | struct drm_framebuffer *fb; |
6546 | struct drm_framebuffer *fb; |
6547 | int i = -1; |
6547 | int i = -1; |
6548 | 6548 | ||
6549 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6549 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6550 | connector->base.id, drm_get_connector_name(connector), |
6550 | connector->base.id, drm_get_connector_name(connector), |
6551 | encoder->base.id, drm_get_encoder_name(encoder)); |
6551 | encoder->base.id, drm_get_encoder_name(encoder)); |
6552 | 6552 | ||
6553 | /* |
6553 | /* |
6554 | * Algorithm gets a little messy: |
6554 | * Algorithm gets a little messy: |
6555 | * |
6555 | * |
6556 | * - if the connector already has an assigned crtc, use it (but make |
6556 | * - if the connector already has an assigned crtc, use it (but make |
6557 | * sure it's on first) |
6557 | * sure it's on first) |
6558 | * |
6558 | * |
6559 | * - try to find the first unused crtc that can drive this connector, |
6559 | * - try to find the first unused crtc that can drive this connector, |
6560 | * and use that if we find one |
6560 | * and use that if we find one |
6561 | */ |
6561 | */ |
6562 | 6562 | ||
6563 | /* See if we already have a CRTC for this connector */ |
6563 | /* See if we already have a CRTC for this connector */ |
6564 | if (encoder->crtc) { |
6564 | if (encoder->crtc) { |
6565 | crtc = encoder->crtc; |
6565 | crtc = encoder->crtc; |
6566 | 6566 | ||
6567 | mutex_lock(&crtc->mutex); |
6567 | mutex_lock(&crtc->mutex); |
6568 | 6568 | ||
6569 | old->dpms_mode = connector->dpms; |
6569 | old->dpms_mode = connector->dpms; |
6570 | old->load_detect_temp = false; |
6570 | old->load_detect_temp = false; |
6571 | 6571 | ||
6572 | /* Make sure the crtc and connector are running */ |
6572 | /* Make sure the crtc and connector are running */ |
6573 | if (connector->dpms != DRM_MODE_DPMS_ON) |
6573 | if (connector->dpms != DRM_MODE_DPMS_ON) |
6574 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); |
6574 | connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); |
6575 | 6575 | ||
6576 | return true; |
6576 | return true; |
6577 | } |
6577 | } |
6578 | 6578 | ||
6579 | /* Find an unused one (if possible) */ |
6579 | /* Find an unused one (if possible) */ |
6580 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { |
6580 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { |
6581 | i++; |
6581 | i++; |
6582 | if (!(encoder->possible_crtcs & (1 << i))) |
6582 | if (!(encoder->possible_crtcs & (1 << i))) |
6583 | continue; |
6583 | continue; |
6584 | if (!possible_crtc->enabled) { |
6584 | if (!possible_crtc->enabled) { |
6585 | crtc = possible_crtc; |
6585 | crtc = possible_crtc; |
6586 | break; |
6586 | break; |
6587 | } |
6587 | } |
6588 | } |
6588 | } |
6589 | 6589 | ||
6590 | /* |
6590 | /* |
6591 | * If we didn't find an unused CRTC, don't use any. |
6591 | * If we didn't find an unused CRTC, don't use any. |
6592 | */ |
6592 | */ |
6593 | if (!crtc) { |
6593 | if (!crtc) { |
6594 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
6594 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
6595 | return false; |
6595 | return false; |
6596 | } |
6596 | } |
6597 | 6597 | ||
6598 | mutex_lock(&crtc->mutex); |
6598 | mutex_lock(&crtc->mutex); |
6599 | intel_encoder->new_crtc = to_intel_crtc(crtc); |
6599 | intel_encoder->new_crtc = to_intel_crtc(crtc); |
6600 | to_intel_connector(connector)->new_encoder = intel_encoder; |
6600 | to_intel_connector(connector)->new_encoder = intel_encoder; |
6601 | 6601 | ||
6602 | intel_crtc = to_intel_crtc(crtc); |
6602 | intel_crtc = to_intel_crtc(crtc); |
6603 | old->dpms_mode = connector->dpms; |
6603 | old->dpms_mode = connector->dpms; |
6604 | old->load_detect_temp = true; |
6604 | old->load_detect_temp = true; |
6605 | old->release_fb = NULL; |
6605 | old->release_fb = NULL; |
6606 | 6606 | ||
6607 | if (!mode) |
6607 | if (!mode) |
6608 | mode = &load_detect_mode; |
6608 | mode = &load_detect_mode; |
6609 | 6609 | ||
6610 | /* We need a framebuffer large enough to accommodate all accesses |
6610 | /* We need a framebuffer large enough to accommodate all accesses |
6611 | * that the plane may generate whilst we perform load detection. |
6611 | * that the plane may generate whilst we perform load detection. |
6612 | * We can not rely on the fbcon either being present (we get called |
6612 | * We can not rely on the fbcon either being present (we get called |
6613 | * during its initialisation to detect all boot displays, or it may |
6613 | * during its initialisation to detect all boot displays, or it may |
6614 | * not even exist) or that it is large enough to satisfy the |
6614 | * not even exist) or that it is large enough to satisfy the |
6615 | * requested mode. |
6615 | * requested mode. |
6616 | */ |
6616 | */ |
6617 | fb = mode_fits_in_fbdev(dev, mode); |
6617 | fb = mode_fits_in_fbdev(dev, mode); |
6618 | if (fb == NULL) { |
6618 | if (fb == NULL) { |
6619 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); |
6619 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); |
6620 | fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); |
6620 | fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); |
6621 | old->release_fb = fb; |
6621 | old->release_fb = fb; |
6622 | } else |
6622 | } else |
6623 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
6623 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
6624 | if (IS_ERR(fb)) { |
6624 | if (IS_ERR(fb)) { |
6625 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
6625 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
6626 | mutex_unlock(&crtc->mutex); |
6626 | mutex_unlock(&crtc->mutex); |
6627 | return false; |
6627 | return false; |
6628 | } |
6628 | } |
6629 | 6629 | ||
6630 | if (intel_set_mode(crtc, mode, 0, 0, fb)) { |
6630 | if (intel_set_mode(crtc, mode, 0, 0, fb)) { |
6631 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
6631 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
6632 | if (old->release_fb) |
6632 | if (old->release_fb) |
6633 | old->release_fb->funcs->destroy(old->release_fb); |
6633 | old->release_fb->funcs->destroy(old->release_fb); |
6634 | mutex_unlock(&crtc->mutex); |
6634 | mutex_unlock(&crtc->mutex); |
6635 | return false; |
6635 | return false; |
6636 | } |
6636 | } |
6637 | 6637 | ||
6638 | /* let the connector get through one full cycle before testing */ |
6638 | /* let the connector get through one full cycle before testing */ |
6639 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
6639 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
6640 | return true; |
6640 | return true; |
6641 | } |
6641 | } |
6642 | 6642 | ||
6643 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
6643 | void intel_release_load_detect_pipe(struct drm_connector *connector, |
6644 | struct intel_load_detect_pipe *old) |
6644 | struct intel_load_detect_pipe *old) |
6645 | { |
6645 | { |
6646 | struct intel_encoder *intel_encoder = |
6646 | struct intel_encoder *intel_encoder = |
6647 | intel_attached_encoder(connector); |
6647 | intel_attached_encoder(connector); |
6648 | struct drm_encoder *encoder = &intel_encoder->base; |
6648 | struct drm_encoder *encoder = &intel_encoder->base; |
6649 | struct drm_crtc *crtc = encoder->crtc; |
6649 | struct drm_crtc *crtc = encoder->crtc; |
6650 | 6650 | ||
6651 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6651 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6652 | connector->base.id, drm_get_connector_name(connector), |
6652 | connector->base.id, drm_get_connector_name(connector), |
6653 | encoder->base.id, drm_get_encoder_name(encoder)); |
6653 | encoder->base.id, drm_get_encoder_name(encoder)); |
6654 | 6654 | ||
6655 | if (old->load_detect_temp) { |
6655 | if (old->load_detect_temp) { |
6656 | to_intel_connector(connector)->new_encoder = NULL; |
6656 | to_intel_connector(connector)->new_encoder = NULL; |
6657 | intel_encoder->new_crtc = NULL; |
6657 | intel_encoder->new_crtc = NULL; |
6658 | intel_set_mode(crtc, NULL, 0, 0, NULL); |
6658 | intel_set_mode(crtc, NULL, 0, 0, NULL); |
6659 | 6659 | ||
6660 | if (old->release_fb) { |
6660 | if (old->release_fb) { |
6661 | drm_framebuffer_unregister_private(old->release_fb); |
6661 | drm_framebuffer_unregister_private(old->release_fb); |
6662 | drm_framebuffer_unreference(old->release_fb); |
6662 | drm_framebuffer_unreference(old->release_fb); |
6663 | } |
6663 | } |
6664 | 6664 | ||
6665 | mutex_unlock(&crtc->mutex); |
6665 | mutex_unlock(&crtc->mutex); |
6666 | return; |
6666 | return; |
6667 | } |
6667 | } |
6668 | 6668 | ||
6669 | /* Switch crtc and encoder back off if necessary */ |
6669 | /* Switch crtc and encoder back off if necessary */ |
6670 | if (old->dpms_mode != DRM_MODE_DPMS_ON) |
6670 | if (old->dpms_mode != DRM_MODE_DPMS_ON) |
6671 | connector->funcs->dpms(connector, old->dpms_mode); |
6671 | connector->funcs->dpms(connector, old->dpms_mode); |
6672 | 6672 | ||
6673 | mutex_unlock(&crtc->mutex); |
6673 | mutex_unlock(&crtc->mutex); |
6674 | } |
6674 | } |
6675 | 6675 | ||
6676 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
6676 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
6677 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) |
6677 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) |
6678 | { |
6678 | { |
6679 | struct drm_i915_private *dev_priv = dev->dev_private; |
6679 | struct drm_i915_private *dev_priv = dev->dev_private; |
6680 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6680 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6681 | int pipe = intel_crtc->pipe; |
6681 | int pipe = intel_crtc->pipe; |
6682 | u32 dpll = I915_READ(DPLL(pipe)); |
6682 | u32 dpll = I915_READ(DPLL(pipe)); |
6683 | u32 fp; |
6683 | u32 fp; |
6684 | intel_clock_t clock; |
6684 | intel_clock_t clock; |
6685 | 6685 | ||
6686 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
6686 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
6687 | fp = I915_READ(FP0(pipe)); |
6687 | fp = I915_READ(FP0(pipe)); |
6688 | else |
6688 | else |
6689 | fp = I915_READ(FP1(pipe)); |
6689 | fp = I915_READ(FP1(pipe)); |
6690 | 6690 | ||
6691 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
6691 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
6692 | if (IS_PINEVIEW(dev)) { |
6692 | if (IS_PINEVIEW(dev)) { |
6693 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
6693 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
6694 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6694 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6695 | } else { |
6695 | } else { |
6696 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
6696 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
6697 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6697 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6698 | } |
6698 | } |
6699 | 6699 | ||
6700 | if (!IS_GEN2(dev)) { |
6700 | if (!IS_GEN2(dev)) { |
6701 | if (IS_PINEVIEW(dev)) |
6701 | if (IS_PINEVIEW(dev)) |
6702 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
6702 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
6703 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
6703 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
6704 | else |
6704 | else |
6705 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
6705 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
6706 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6706 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6707 | 6707 | ||
6708 | switch (dpll & DPLL_MODE_MASK) { |
6708 | switch (dpll & DPLL_MODE_MASK) { |
6709 | case DPLLB_MODE_DAC_SERIAL: |
6709 | case DPLLB_MODE_DAC_SERIAL: |
6710 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? |
6710 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? |
6711 | 5 : 10; |
6711 | 5 : 10; |
6712 | break; |
6712 | break; |
6713 | case DPLLB_MODE_LVDS: |
6713 | case DPLLB_MODE_LVDS: |
6714 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? |
6714 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? |
6715 | 7 : 14; |
6715 | 7 : 14; |
6716 | break; |
6716 | break; |
6717 | default: |
6717 | default: |
6718 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
6718 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
6719 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
6719 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
6720 | return 0; |
6720 | return 0; |
6721 | } |
6721 | } |
6722 | 6722 | ||
6723 | /* XXX: Handle the 100Mhz refclk */ |
6723 | /* XXX: Handle the 100Mhz refclk */ |
6724 | intel_clock(dev, 96000, &clock); |
6724 | intel_clock(dev, 96000, &clock); |
6725 | } else { |
6725 | } else { |
6726 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
6726 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
6727 | 6727 | ||
6728 | if (is_lvds) { |
6728 | if (is_lvds) { |
6729 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
6729 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
6730 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6730 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6731 | clock.p2 = 14; |
6731 | clock.p2 = 14; |
6732 | 6732 | ||
6733 | if ((dpll & PLL_REF_INPUT_MASK) == |
6733 | if ((dpll & PLL_REF_INPUT_MASK) == |
6734 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
6734 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
6735 | /* XXX: might not be 66MHz */ |
6735 | /* XXX: might not be 66MHz */ |
6736 | intel_clock(dev, 66000, &clock); |
6736 | intel_clock(dev, 66000, &clock); |
6737 | } else |
6737 | } else |
6738 | intel_clock(dev, 48000, &clock); |
6738 | intel_clock(dev, 48000, &clock); |
6739 | } else { |
6739 | } else { |
6740 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
6740 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
6741 | clock.p1 = 2; |
6741 | clock.p1 = 2; |
6742 | else { |
6742 | else { |
6743 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> |
6743 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> |
6744 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; |
6744 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; |
6745 | } |
6745 | } |
6746 | if (dpll & PLL_P2_DIVIDE_BY_4) |
6746 | if (dpll & PLL_P2_DIVIDE_BY_4) |
6747 | clock.p2 = 4; |
6747 | clock.p2 = 4; |
6748 | else |
6748 | else |
6749 | clock.p2 = 2; |
6749 | clock.p2 = 2; |
6750 | 6750 | ||
6751 | intel_clock(dev, 48000, &clock); |
6751 | intel_clock(dev, 48000, &clock); |
6752 | } |
6752 | } |
6753 | } |
6753 | } |
6754 | 6754 | ||
6755 | /* XXX: It would be nice to validate the clocks, but we can't reuse |
6755 | /* XXX: It would be nice to validate the clocks, but we can't reuse |
6756 | * i830PllIsValid() because it relies on the xf86_config connector |
6756 | * i830PllIsValid() because it relies on the xf86_config connector |
6757 | * configuration being accurate, which it isn't necessarily. |
6757 | * configuration being accurate, which it isn't necessarily. |
6758 | */ |
6758 | */ |
6759 | 6759 | ||
6760 | return clock.dot; |
6760 | return clock.dot; |
6761 | } |
6761 | } |
6762 | 6762 | ||
6763 | /** Returns the currently programmed mode of the given pipe. */ |
6763 | /** Returns the currently programmed mode of the given pipe. */ |
6764 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
6764 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
6765 | struct drm_crtc *crtc) |
6765 | struct drm_crtc *crtc) |
6766 | { |
6766 | { |
6767 | struct drm_i915_private *dev_priv = dev->dev_private; |
6767 | struct drm_i915_private *dev_priv = dev->dev_private; |
6768 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6768 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6769 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
6769 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
6770 | struct drm_display_mode *mode; |
6770 | struct drm_display_mode *mode; |
6771 | int htot = I915_READ(HTOTAL(cpu_transcoder)); |
6771 | int htot = I915_READ(HTOTAL(cpu_transcoder)); |
6772 | int hsync = I915_READ(HSYNC(cpu_transcoder)); |
6772 | int hsync = I915_READ(HSYNC(cpu_transcoder)); |
6773 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); |
6773 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); |
6774 | int vsync = I915_READ(VSYNC(cpu_transcoder)); |
6774 | int vsync = I915_READ(VSYNC(cpu_transcoder)); |
6775 | 6775 | ||
6776 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
6776 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
6777 | if (!mode) |
6777 | if (!mode) |
6778 | return NULL; |
6778 | return NULL; |
6779 | 6779 | ||
6780 | mode->clock = intel_crtc_clock_get(dev, crtc); |
6780 | mode->clock = intel_crtc_clock_get(dev, crtc); |
6781 | mode->hdisplay = (htot & 0xffff) + 1; |
6781 | mode->hdisplay = (htot & 0xffff) + 1; |
6782 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
6782 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
6783 | mode->hsync_start = (hsync & 0xffff) + 1; |
6783 | mode->hsync_start = (hsync & 0xffff) + 1; |
6784 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; |
6784 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; |
6785 | mode->vdisplay = (vtot & 0xffff) + 1; |
6785 | mode->vdisplay = (vtot & 0xffff) + 1; |
6786 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; |
6786 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; |
6787 | mode->vsync_start = (vsync & 0xffff) + 1; |
6787 | mode->vsync_start = (vsync & 0xffff) + 1; |
6788 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; |
6788 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; |
6789 | 6789 | ||
6790 | drm_mode_set_name(mode); |
6790 | drm_mode_set_name(mode); |
6791 | 6791 | ||
6792 | return mode; |
6792 | return mode; |
6793 | } |
6793 | } |
6794 | 6794 | ||
6795 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
6795 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
6796 | { |
6796 | { |
6797 | struct drm_device *dev = crtc->dev; |
6797 | struct drm_device *dev = crtc->dev; |
6798 | drm_i915_private_t *dev_priv = dev->dev_private; |
6798 | drm_i915_private_t *dev_priv = dev->dev_private; |
6799 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6799 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6800 | int pipe = intel_crtc->pipe; |
6800 | int pipe = intel_crtc->pipe; |
6801 | int dpll_reg = DPLL(pipe); |
6801 | int dpll_reg = DPLL(pipe); |
6802 | int dpll; |
6802 | int dpll; |
6803 | 6803 | ||
6804 | if (HAS_PCH_SPLIT(dev)) |
6804 | if (HAS_PCH_SPLIT(dev)) |
6805 | return; |
6805 | return; |
6806 | 6806 | ||
6807 | if (!dev_priv->lvds_downclock_avail) |
6807 | if (!dev_priv->lvds_downclock_avail) |
6808 | return; |
6808 | return; |
6809 | 6809 | ||
6810 | dpll = I915_READ(dpll_reg); |
6810 | dpll = I915_READ(dpll_reg); |
6811 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
6811 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
6812 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
6812 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
6813 | 6813 | ||
6814 | assert_panel_unlocked(dev_priv, pipe); |
6814 | assert_panel_unlocked(dev_priv, pipe); |
6815 | 6815 | ||
6816 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
6816 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
6817 | I915_WRITE(dpll_reg, dpll); |
6817 | I915_WRITE(dpll_reg, dpll); |
6818 | intel_wait_for_vblank(dev, pipe); |
6818 | intel_wait_for_vblank(dev, pipe); |
6819 | 6819 | ||
6820 | dpll = I915_READ(dpll_reg); |
6820 | dpll = I915_READ(dpll_reg); |
6821 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
6821 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
6822 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
6822 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
6823 | } |
6823 | } |
6824 | } |
6824 | } |
6825 | 6825 | ||
6826 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
6826 | static void intel_decrease_pllclock(struct drm_crtc *crtc) |
6827 | { |
6827 | { |
6828 | struct drm_device *dev = crtc->dev; |
6828 | struct drm_device *dev = crtc->dev; |
6829 | drm_i915_private_t *dev_priv = dev->dev_private; |
6829 | drm_i915_private_t *dev_priv = dev->dev_private; |
6830 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6830 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6831 | 6831 | ||
6832 | if (HAS_PCH_SPLIT(dev)) |
6832 | if (HAS_PCH_SPLIT(dev)) |
6833 | return; |
6833 | return; |
6834 | 6834 | ||
6835 | if (!dev_priv->lvds_downclock_avail) |
6835 | if (!dev_priv->lvds_downclock_avail) |
6836 | return; |
6836 | return; |
6837 | 6837 | ||
6838 | /* |
6838 | /* |
6839 | * Since this is called by a timer, we should never get here in |
6839 | * Since this is called by a timer, we should never get here in |
6840 | * the manual case. |
6840 | * the manual case. |
6841 | */ |
6841 | */ |
6842 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { |
6842 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { |
6843 | int pipe = intel_crtc->pipe; |
6843 | int pipe = intel_crtc->pipe; |
6844 | int dpll_reg = DPLL(pipe); |
6844 | int dpll_reg = DPLL(pipe); |
6845 | int dpll; |
6845 | int dpll; |
6846 | 6846 | ||
6847 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
6847 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
6848 | 6848 | ||
6849 | assert_panel_unlocked(dev_priv, pipe); |
6849 | assert_panel_unlocked(dev_priv, pipe); |
6850 | 6850 | ||
6851 | dpll = I915_READ(dpll_reg); |
6851 | dpll = I915_READ(dpll_reg); |
6852 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
6852 | dpll |= DISPLAY_RATE_SELECT_FPA1; |
6853 | I915_WRITE(dpll_reg, dpll); |
6853 | I915_WRITE(dpll_reg, dpll); |
6854 | intel_wait_for_vblank(dev, pipe); |
6854 | intel_wait_for_vblank(dev, pipe); |
6855 | dpll = I915_READ(dpll_reg); |
6855 | dpll = I915_READ(dpll_reg); |
6856 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
6856 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) |
6857 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
6857 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
6858 | } |
6858 | } |
6859 | 6859 | ||
6860 | } |
6860 | } |
6861 | 6861 | ||
6862 | void intel_mark_busy(struct drm_device *dev) |
6862 | void intel_mark_busy(struct drm_device *dev) |
6863 | { |
6863 | { |
6864 | i915_update_gfx_val(dev->dev_private); |
6864 | i915_update_gfx_val(dev->dev_private); |
6865 | } |
6865 | } |
6866 | 6866 | ||
6867 | void intel_mark_idle(struct drm_device *dev) |
6867 | void intel_mark_idle(struct drm_device *dev) |
6868 | { |
6868 | { |
6869 | struct drm_crtc *crtc; |
6869 | struct drm_crtc *crtc; |
6870 | 6870 | ||
6871 | if (!i915_powersave) |
6871 | if (!i915_powersave) |
6872 | return; |
6872 | return; |
6873 | 6873 | ||
6874 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6874 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6875 | if (!crtc->fb) |
6875 | if (!crtc->fb) |
6876 | continue; |
6876 | continue; |
6877 | 6877 | ||
6878 | intel_decrease_pllclock(crtc); |
6878 | intel_decrease_pllclock(crtc); |
6879 | } |
6879 | } |
6880 | } |
6880 | } |
6881 | 6881 | ||
6882 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
6882 | void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
6883 | { |
6883 | { |
6884 | struct drm_device *dev = obj->base.dev; |
6884 | struct drm_device *dev = obj->base.dev; |
6885 | struct drm_crtc *crtc; |
6885 | struct drm_crtc *crtc; |
6886 | 6886 | ||
6887 | if (!i915_powersave) |
6887 | if (!i915_powersave) |
6888 | return; |
6888 | return; |
6889 | 6889 | ||
6890 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6890 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6891 | if (!crtc->fb) |
6891 | if (!crtc->fb) |
6892 | continue; |
6892 | continue; |
6893 | 6893 | ||
6894 | if (to_intel_framebuffer(crtc->fb)->obj == obj) |
6894 | if (to_intel_framebuffer(crtc->fb)->obj == obj) |
6895 | intel_increase_pllclock(crtc); |
6895 | intel_increase_pllclock(crtc); |
6896 | } |
6896 | } |
6897 | } |
6897 | } |
6898 | 6898 | ||
6899 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
6899 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
6900 | { |
6900 | { |
6901 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6901 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6902 | struct drm_device *dev = crtc->dev; |
6902 | struct drm_device *dev = crtc->dev; |
6903 | struct intel_unpin_work *work; |
6903 | struct intel_unpin_work *work; |
6904 | unsigned long flags; |
6904 | unsigned long flags; |
6905 | 6905 | ||
6906 | spin_lock_irqsave(&dev->event_lock, flags); |
6906 | spin_lock_irqsave(&dev->event_lock, flags); |
6907 | work = intel_crtc->unpin_work; |
6907 | work = intel_crtc->unpin_work; |
6908 | intel_crtc->unpin_work = NULL; |
6908 | intel_crtc->unpin_work = NULL; |
6909 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6909 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6910 | 6910 | ||
6911 | if (work) { |
6911 | if (work) { |
6912 | // cancel_work_sync(&work->work); |
6912 | // cancel_work_sync(&work->work); |
6913 | kfree(work); |
6913 | kfree(work); |
6914 | } |
6914 | } |
6915 | 6915 | ||
6916 | drm_crtc_cleanup(crtc); |
6916 | drm_crtc_cleanup(crtc); |
6917 | 6917 | ||
6918 | kfree(intel_crtc); |
6918 | kfree(intel_crtc); |
6919 | } |
6919 | } |
6920 | 6920 | ||
6921 | #if 0 |
6921 | #if 0 |
6922 | static void intel_unpin_work_fn(struct work_struct *__work) |
6922 | static void intel_unpin_work_fn(struct work_struct *__work) |
6923 | { |
6923 | { |
6924 | struct intel_unpin_work *work = |
6924 | struct intel_unpin_work *work = |
6925 | container_of(__work, struct intel_unpin_work, work); |
6925 | container_of(__work, struct intel_unpin_work, work); |
6926 | struct drm_device *dev = work->crtc->dev; |
6926 | struct drm_device *dev = work->crtc->dev; |
6927 | 6927 | ||
6928 | mutex_lock(&dev->struct_mutex); |
6928 | mutex_lock(&dev->struct_mutex); |
6929 | intel_unpin_fb_obj(work->old_fb_obj); |
6929 | intel_unpin_fb_obj(work->old_fb_obj); |
6930 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
6930 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
6931 | drm_gem_object_unreference(&work->old_fb_obj->base); |
6931 | drm_gem_object_unreference(&work->old_fb_obj->base); |
6932 | 6932 | ||
6933 | intel_update_fbc(dev); |
6933 | intel_update_fbc(dev); |
6934 | mutex_unlock(&dev->struct_mutex); |
6934 | mutex_unlock(&dev->struct_mutex); |
6935 | 6935 | ||
6936 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
6936 | BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); |
6937 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
6937 | atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); |
6938 | 6938 | ||
6939 | kfree(work); |
6939 | kfree(work); |
6940 | } |
6940 | } |
6941 | 6941 | ||
6942 | static void do_intel_finish_page_flip(struct drm_device *dev, |
6942 | static void do_intel_finish_page_flip(struct drm_device *dev, |
6943 | struct drm_crtc *crtc) |
6943 | struct drm_crtc *crtc) |
6944 | { |
6944 | { |
6945 | drm_i915_private_t *dev_priv = dev->dev_private; |
6945 | drm_i915_private_t *dev_priv = dev->dev_private; |
6946 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6946 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6947 | struct intel_unpin_work *work; |
6947 | struct intel_unpin_work *work; |
6948 | struct drm_i915_gem_object *obj; |
6948 | struct drm_i915_gem_object *obj; |
6949 | unsigned long flags; |
6949 | unsigned long flags; |
6950 | 6950 | ||
6951 | /* Ignore early vblank irqs */ |
6951 | /* Ignore early vblank irqs */ |
6952 | if (intel_crtc == NULL) |
6952 | if (intel_crtc == NULL) |
6953 | return; |
6953 | return; |
6954 | 6954 | ||
6955 | spin_lock_irqsave(&dev->event_lock, flags); |
6955 | spin_lock_irqsave(&dev->event_lock, flags); |
6956 | work = intel_crtc->unpin_work; |
6956 | work = intel_crtc->unpin_work; |
6957 | 6957 | ||
6958 | /* Ensure we don't miss a work->pending update ... */ |
6958 | /* Ensure we don't miss a work->pending update ... */ |
6959 | smp_rmb(); |
6959 | smp_rmb(); |
6960 | 6960 | ||
6961 | if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { |
6961 | if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { |
6962 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6962 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6963 | return; |
6963 | return; |
6964 | } |
6964 | } |
6965 | 6965 | ||
6966 | /* and that the unpin work is consistent wrt ->pending. */ |
6966 | /* and that the unpin work is consistent wrt ->pending. */ |
6967 | smp_rmb(); |
6967 | smp_rmb(); |
6968 | 6968 | ||
6969 | intel_crtc->unpin_work = NULL; |
6969 | intel_crtc->unpin_work = NULL; |
6970 | 6970 | ||
6971 | if (work->event) |
6971 | if (work->event) |
6972 | drm_send_vblank_event(dev, intel_crtc->pipe, work->event); |
6972 | drm_send_vblank_event(dev, intel_crtc->pipe, work->event); |
6973 | 6973 | ||
6974 | drm_vblank_put(dev, intel_crtc->pipe); |
6974 | drm_vblank_put(dev, intel_crtc->pipe); |
6975 | 6975 | ||
6976 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6976 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6977 | 6977 | ||
6978 | obj = work->old_fb_obj; |
6978 | obj = work->old_fb_obj; |
6979 | 6979 | ||
6980 | wake_up_all(&dev_priv->pending_flip_queue); |
6980 | wake_up_all(&dev_priv->pending_flip_queue); |
6981 | 6981 | ||
6982 | queue_work(dev_priv->wq, &work->work); |
6982 | queue_work(dev_priv->wq, &work->work); |
6983 | 6983 | ||
6984 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
6984 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); |
6985 | } |
6985 | } |
6986 | 6986 | ||
6987 | void intel_finish_page_flip(struct drm_device *dev, int pipe) |
6987 | void intel_finish_page_flip(struct drm_device *dev, int pipe) |
6988 | { |
6988 | { |
6989 | drm_i915_private_t *dev_priv = dev->dev_private; |
6989 | drm_i915_private_t *dev_priv = dev->dev_private; |
6990 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
6990 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
6991 | 6991 | ||
6992 | do_intel_finish_page_flip(dev, crtc); |
6992 | do_intel_finish_page_flip(dev, crtc); |
6993 | } |
6993 | } |
6994 | 6994 | ||
6995 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) |
6995 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) |
6996 | { |
6996 | { |
6997 | drm_i915_private_t *dev_priv = dev->dev_private; |
6997 | drm_i915_private_t *dev_priv = dev->dev_private; |
6998 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; |
6998 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; |
6999 | 6999 | ||
7000 | do_intel_finish_page_flip(dev, crtc); |
7000 | do_intel_finish_page_flip(dev, crtc); |
7001 | } |
7001 | } |
7002 | 7002 | ||
7003 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
7003 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
7004 | { |
7004 | { |
7005 | drm_i915_private_t *dev_priv = dev->dev_private; |
7005 | drm_i915_private_t *dev_priv = dev->dev_private; |
7006 | struct intel_crtc *intel_crtc = |
7006 | struct intel_crtc *intel_crtc = |
7007 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); |
7007 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); |
7008 | unsigned long flags; |
7008 | unsigned long flags; |
7009 | 7009 | ||
7010 | /* NB: An MMIO update of the plane base pointer will also |
7010 | /* NB: An MMIO update of the plane base pointer will also |
7011 | * generate a page-flip completion irq, i.e. every modeset |
7011 | * generate a page-flip completion irq, i.e. every modeset |
7012 | * is also accompanied by a spurious intel_prepare_page_flip(). |
7012 | * is also accompanied by a spurious intel_prepare_page_flip(). |
7013 | */ |
7013 | */ |
7014 | spin_lock_irqsave(&dev->event_lock, flags); |
7014 | spin_lock_irqsave(&dev->event_lock, flags); |
7015 | if (intel_crtc->unpin_work) |
7015 | if (intel_crtc->unpin_work) |
7016 | atomic_inc_not_zero(&intel_crtc->unpin_work->pending); |
7016 | atomic_inc_not_zero(&intel_crtc->unpin_work->pending); |
7017 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7017 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7018 | } |
7018 | } |
7019 | 7019 | ||
7020 | inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) |
7020 | inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) |
7021 | { |
7021 | { |
7022 | /* Ensure that the work item is consistent when activating it ... */ |
7022 | /* Ensure that the work item is consistent when activating it ... */ |
7023 | smp_wmb(); |
7023 | smp_wmb(); |
7024 | atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); |
7024 | atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); |
7025 | /* and that it is marked active as soon as the irq could fire. */ |
7025 | /* and that it is marked active as soon as the irq could fire. */ |
7026 | smp_wmb(); |
7026 | smp_wmb(); |
7027 | } |
7027 | } |
7028 | 7028 | ||
7029 | static int intel_gen2_queue_flip(struct drm_device *dev, |
7029 | static int intel_gen2_queue_flip(struct drm_device *dev, |
7030 | struct drm_crtc *crtc, |
7030 | struct drm_crtc *crtc, |
7031 | struct drm_framebuffer *fb, |
7031 | struct drm_framebuffer *fb, |
7032 | struct drm_i915_gem_object *obj) |
7032 | struct drm_i915_gem_object *obj) |
7033 | { |
7033 | { |
7034 | struct drm_i915_private *dev_priv = dev->dev_private; |
7034 | struct drm_i915_private *dev_priv = dev->dev_private; |
7035 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7035 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7036 | u32 flip_mask; |
7036 | u32 flip_mask; |
7037 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7037 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7038 | int ret; |
7038 | int ret; |
7039 | 7039 | ||
7040 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7040 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7041 | if (ret) |
7041 | if (ret) |
7042 | goto err; |
7042 | goto err; |
7043 | 7043 | ||
7044 | ret = intel_ring_begin(ring, 6); |
7044 | ret = intel_ring_begin(ring, 6); |
7045 | if (ret) |
7045 | if (ret) |
7046 | goto err_unpin; |
7046 | goto err_unpin; |
7047 | 7047 | ||
7048 | /* Can't queue multiple flips, so wait for the previous |
7048 | /* Can't queue multiple flips, so wait for the previous |
7049 | * one to finish before executing the next. |
7049 | * one to finish before executing the next. |
7050 | */ |
7050 | */ |
7051 | if (intel_crtc->plane) |
7051 | if (intel_crtc->plane) |
7052 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
7052 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
7053 | else |
7053 | else |
7054 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
7054 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
7055 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
7055 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
7056 | intel_ring_emit(ring, MI_NOOP); |
7056 | intel_ring_emit(ring, MI_NOOP); |
7057 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7057 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7058 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7058 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7059 | intel_ring_emit(ring, fb->pitches[0]); |
7059 | intel_ring_emit(ring, fb->pitches[0]); |
7060 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7060 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7061 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
7061 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
7062 | 7062 | ||
7063 | intel_mark_page_flip_active(intel_crtc); |
7063 | intel_mark_page_flip_active(intel_crtc); |
7064 | intel_ring_advance(ring); |
7064 | intel_ring_advance(ring); |
7065 | return 0; |
7065 | return 0; |
7066 | 7066 | ||
7067 | err_unpin: |
7067 | err_unpin: |
7068 | intel_unpin_fb_obj(obj); |
7068 | intel_unpin_fb_obj(obj); |
7069 | err: |
7069 | err: |
7070 | return ret; |
7070 | return ret; |
7071 | } |
7071 | } |
7072 | 7072 | ||
7073 | static int intel_gen3_queue_flip(struct drm_device *dev, |
7073 | static int intel_gen3_queue_flip(struct drm_device *dev, |
7074 | struct drm_crtc *crtc, |
7074 | struct drm_crtc *crtc, |
7075 | struct drm_framebuffer *fb, |
7075 | struct drm_framebuffer *fb, |
7076 | struct drm_i915_gem_object *obj) |
7076 | struct drm_i915_gem_object *obj) |
7077 | { |
7077 | { |
7078 | struct drm_i915_private *dev_priv = dev->dev_private; |
7078 | struct drm_i915_private *dev_priv = dev->dev_private; |
7079 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7079 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7080 | u32 flip_mask; |
7080 | u32 flip_mask; |
7081 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7081 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7082 | int ret; |
7082 | int ret; |
7083 | 7083 | ||
7084 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7084 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7085 | if (ret) |
7085 | if (ret) |
7086 | goto err; |
7086 | goto err; |
7087 | 7087 | ||
7088 | ret = intel_ring_begin(ring, 6); |
7088 | ret = intel_ring_begin(ring, 6); |
7089 | if (ret) |
7089 | if (ret) |
7090 | goto err_unpin; |
7090 | goto err_unpin; |
7091 | 7091 | ||
7092 | if (intel_crtc->plane) |
7092 | if (intel_crtc->plane) |
7093 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
7093 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
7094 | else |
7094 | else |
7095 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
7095 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
7096 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
7096 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
7097 | intel_ring_emit(ring, MI_NOOP); |
7097 | intel_ring_emit(ring, MI_NOOP); |
7098 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | |
7098 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | |
7099 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7099 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7100 | intel_ring_emit(ring, fb->pitches[0]); |
7100 | intel_ring_emit(ring, fb->pitches[0]); |
7101 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7101 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7102 | intel_ring_emit(ring, MI_NOOP); |
7102 | intel_ring_emit(ring, MI_NOOP); |
7103 | 7103 | ||
7104 | intel_mark_page_flip_active(intel_crtc); |
7104 | intel_mark_page_flip_active(intel_crtc); |
7105 | intel_ring_advance(ring); |
7105 | intel_ring_advance(ring); |
7106 | return 0; |
7106 | return 0; |
7107 | 7107 | ||
7108 | err_unpin: |
7108 | err_unpin: |
7109 | intel_unpin_fb_obj(obj); |
7109 | intel_unpin_fb_obj(obj); |
7110 | err: |
7110 | err: |
7111 | return ret; |
7111 | return ret; |
7112 | } |
7112 | } |
7113 | 7113 | ||
7114 | static int intel_gen4_queue_flip(struct drm_device *dev, |
7114 | static int intel_gen4_queue_flip(struct drm_device *dev, |
7115 | struct drm_crtc *crtc, |
7115 | struct drm_crtc *crtc, |
7116 | struct drm_framebuffer *fb, |
7116 | struct drm_framebuffer *fb, |
7117 | struct drm_i915_gem_object *obj) |
7117 | struct drm_i915_gem_object *obj) |
7118 | { |
7118 | { |
7119 | struct drm_i915_private *dev_priv = dev->dev_private; |
7119 | struct drm_i915_private *dev_priv = dev->dev_private; |
7120 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7120 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7121 | uint32_t pf, pipesrc; |
7121 | uint32_t pf, pipesrc; |
7122 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7122 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7123 | int ret; |
7123 | int ret; |
7124 | 7124 | ||
7125 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7125 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7126 | if (ret) |
7126 | if (ret) |
7127 | goto err; |
7127 | goto err; |
7128 | 7128 | ||
7129 | ret = intel_ring_begin(ring, 4); |
7129 | ret = intel_ring_begin(ring, 4); |
7130 | if (ret) |
7130 | if (ret) |
7131 | goto err_unpin; |
7131 | goto err_unpin; |
7132 | 7132 | ||
7133 | /* i965+ uses the linear or tiled offsets from the |
7133 | /* i965+ uses the linear or tiled offsets from the |
7134 | * Display Registers (which do not change across a page-flip) |
7134 | * Display Registers (which do not change across a page-flip) |
7135 | * so we need only reprogram the base address. |
7135 | * so we need only reprogram the base address. |
7136 | */ |
7136 | */ |
7137 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7137 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7138 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7138 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7139 | intel_ring_emit(ring, fb->pitches[0]); |
7139 | intel_ring_emit(ring, fb->pitches[0]); |
7140 | intel_ring_emit(ring, |
7140 | intel_ring_emit(ring, |
7141 | (obj->gtt_offset + intel_crtc->dspaddr_offset) | |
7141 | (obj->gtt_offset + intel_crtc->dspaddr_offset) | |
7142 | obj->tiling_mode); |
7142 | obj->tiling_mode); |
7143 | 7143 | ||
7144 | /* XXX Enabling the panel-fitter across page-flip is so far |
7144 | /* XXX Enabling the panel-fitter across page-flip is so far |
7145 | * untested on non-native modes, so ignore it for now. |
7145 | * untested on non-native modes, so ignore it for now. |
7146 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
7146 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; |
7147 | */ |
7147 | */ |
7148 | pf = 0; |
7148 | pf = 0; |
7149 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
7149 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
7150 | intel_ring_emit(ring, pf | pipesrc); |
7150 | intel_ring_emit(ring, pf | pipesrc); |
7151 | 7151 | ||
7152 | intel_mark_page_flip_active(intel_crtc); |
7152 | intel_mark_page_flip_active(intel_crtc); |
7153 | intel_ring_advance(ring); |
7153 | intel_ring_advance(ring); |
7154 | return 0; |
7154 | return 0; |
7155 | 7155 | ||
7156 | err_unpin: |
7156 | err_unpin: |
7157 | intel_unpin_fb_obj(obj); |
7157 | intel_unpin_fb_obj(obj); |
7158 | err: |
7158 | err: |
7159 | return ret; |
7159 | return ret; |
7160 | } |
7160 | } |
7161 | 7161 | ||
7162 | static int intel_gen6_queue_flip(struct drm_device *dev, |
7162 | static int intel_gen6_queue_flip(struct drm_device *dev, |
7163 | struct drm_crtc *crtc, |
7163 | struct drm_crtc *crtc, |
7164 | struct drm_framebuffer *fb, |
7164 | struct drm_framebuffer *fb, |
7165 | struct drm_i915_gem_object *obj) |
7165 | struct drm_i915_gem_object *obj) |
7166 | { |
7166 | { |
7167 | struct drm_i915_private *dev_priv = dev->dev_private; |
7167 | struct drm_i915_private *dev_priv = dev->dev_private; |
7168 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7168 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7169 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7169 | struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; |
7170 | uint32_t pf, pipesrc; |
7170 | uint32_t pf, pipesrc; |
7171 | int ret; |
7171 | int ret; |
7172 | 7172 | ||
7173 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7173 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7174 | if (ret) |
7174 | if (ret) |
7175 | goto err; |
7175 | goto err; |
7176 | 7176 | ||
7177 | ret = intel_ring_begin(ring, 4); |
7177 | ret = intel_ring_begin(ring, 4); |
7178 | if (ret) |
7178 | if (ret) |
7179 | goto err_unpin; |
7179 | goto err_unpin; |
7180 | 7180 | ||
7181 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7181 | intel_ring_emit(ring, MI_DISPLAY_FLIP | |
7182 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7182 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
7183 | intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); |
7183 | intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); |
7184 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7184 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7185 | 7185 | ||
7186 | /* Contrary to the suggestions in the documentation, |
7186 | /* Contrary to the suggestions in the documentation, |
7187 | * "Enable Panel Fitter" does not seem to be required when page |
7187 | * "Enable Panel Fitter" does not seem to be required when page |
7188 | * flipping with a non-native mode, and worse causes a normal |
7188 | * flipping with a non-native mode, and worse causes a normal |
7189 | * modeset to fail. |
7189 | * modeset to fail. |
7190 | * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; |
7190 | * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; |
7191 | */ |
7191 | */ |
7192 | pf = 0; |
7192 | pf = 0; |
7193 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
7193 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
7194 | intel_ring_emit(ring, pf | pipesrc); |
7194 | intel_ring_emit(ring, pf | pipesrc); |
7195 | 7195 | ||
7196 | intel_mark_page_flip_active(intel_crtc); |
7196 | intel_mark_page_flip_active(intel_crtc); |
7197 | intel_ring_advance(ring); |
7197 | intel_ring_advance(ring); |
7198 | return 0; |
7198 | return 0; |
7199 | 7199 | ||
7200 | err_unpin: |
7200 | err_unpin: |
7201 | intel_unpin_fb_obj(obj); |
7201 | intel_unpin_fb_obj(obj); |
7202 | err: |
7202 | err: |
7203 | return ret; |
7203 | return ret; |
7204 | } |
7204 | } |
7205 | 7205 | ||
7206 | /* |
7206 | /* |
7207 | * On gen7 we currently use the blit ring because (in early silicon at least) |
7207 | * On gen7 we currently use the blit ring because (in early silicon at least) |
7208 | * the render ring doesn't give us interrpts for page flip completion, which |
7208 | * the render ring doesn't give us interrpts for page flip completion, which |
7209 | * means clients will hang after the first flip is queued. Fortunately the |
7209 | * means clients will hang after the first flip is queued. Fortunately the |
7210 | * blit ring generates interrupts properly, so use it instead. |
7210 | * blit ring generates interrupts properly, so use it instead. |
7211 | */ |
7211 | */ |
7212 | static int intel_gen7_queue_flip(struct drm_device *dev, |
7212 | static int intel_gen7_queue_flip(struct drm_device *dev, |
7213 | struct drm_crtc *crtc, |
7213 | struct drm_crtc *crtc, |
7214 | struct drm_framebuffer *fb, |
7214 | struct drm_framebuffer *fb, |
7215 | struct drm_i915_gem_object *obj) |
7215 | struct drm_i915_gem_object *obj) |
7216 | { |
7216 | { |
7217 | struct drm_i915_private *dev_priv = dev->dev_private; |
7217 | struct drm_i915_private *dev_priv = dev->dev_private; |
7218 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7218 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7219 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
7219 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; |
7220 | uint32_t plane_bit = 0; |
7220 | uint32_t plane_bit = 0; |
7221 | int ret; |
7221 | int ret; |
7222 | 7222 | ||
7223 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7223 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); |
7224 | if (ret) |
7224 | if (ret) |
7225 | goto err; |
7225 | goto err; |
7226 | 7226 | ||
7227 | switch(intel_crtc->plane) { |
7227 | switch(intel_crtc->plane) { |
7228 | case PLANE_A: |
7228 | case PLANE_A: |
7229 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; |
7229 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; |
7230 | break; |
7230 | break; |
7231 | case PLANE_B: |
7231 | case PLANE_B: |
7232 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; |
7232 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; |
7233 | break; |
7233 | break; |
7234 | case PLANE_C: |
7234 | case PLANE_C: |
7235 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; |
7235 | plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; |
7236 | break; |
7236 | break; |
7237 | default: |
7237 | default: |
7238 | WARN_ONCE(1, "unknown plane in flip command\n"); |
7238 | WARN_ONCE(1, "unknown plane in flip command\n"); |
7239 | ret = -ENODEV; |
7239 | ret = -ENODEV; |
7240 | goto err_unpin; |
7240 | goto err_unpin; |
7241 | } |
7241 | } |
7242 | 7242 | ||
7243 | ret = intel_ring_begin(ring, 4); |
7243 | ret = intel_ring_begin(ring, 4); |
7244 | if (ret) |
7244 | if (ret) |
7245 | goto err_unpin; |
7245 | goto err_unpin; |
7246 | 7246 | ||
7247 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
7247 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); |
7248 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
7248 | intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); |
7249 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7249 | intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); |
7250 | intel_ring_emit(ring, (MI_NOOP)); |
7250 | intel_ring_emit(ring, (MI_NOOP)); |
7251 | 7251 | ||
7252 | intel_mark_page_flip_active(intel_crtc); |
7252 | intel_mark_page_flip_active(intel_crtc); |
7253 | intel_ring_advance(ring); |
7253 | intel_ring_advance(ring); |
7254 | return 0; |
7254 | return 0; |
7255 | 7255 | ||
7256 | err_unpin: |
7256 | err_unpin: |
7257 | intel_unpin_fb_obj(obj); |
7257 | intel_unpin_fb_obj(obj); |
7258 | err: |
7258 | err: |
7259 | return ret; |
7259 | return ret; |
7260 | } |
7260 | } |
7261 | 7261 | ||
7262 | static int intel_default_queue_flip(struct drm_device *dev, |
7262 | static int intel_default_queue_flip(struct drm_device *dev, |
7263 | struct drm_crtc *crtc, |
7263 | struct drm_crtc *crtc, |
7264 | struct drm_framebuffer *fb, |
7264 | struct drm_framebuffer *fb, |
7265 | struct drm_i915_gem_object *obj) |
7265 | struct drm_i915_gem_object *obj) |
7266 | { |
7266 | { |
7267 | return -ENODEV; |
7267 | return -ENODEV; |
7268 | } |
7268 | } |
7269 | 7269 | ||
7270 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
7270 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
7271 | struct drm_framebuffer *fb, |
7271 | struct drm_framebuffer *fb, |
7272 | struct drm_pending_vblank_event *event) |
7272 | struct drm_pending_vblank_event *event) |
7273 | { |
7273 | { |
7274 | struct drm_device *dev = crtc->dev; |
7274 | struct drm_device *dev = crtc->dev; |
7275 | struct drm_i915_private *dev_priv = dev->dev_private; |
7275 | struct drm_i915_private *dev_priv = dev->dev_private; |
7276 | struct drm_framebuffer *old_fb = crtc->fb; |
7276 | struct drm_framebuffer *old_fb = crtc->fb; |
7277 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
7277 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
7278 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7278 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7279 | struct intel_unpin_work *work; |
7279 | struct intel_unpin_work *work; |
7280 | unsigned long flags; |
7280 | unsigned long flags; |
7281 | int ret; |
7281 | int ret; |
7282 | 7282 | ||
7283 | /* Can't change pixel format via MI display flips. */ |
7283 | /* Can't change pixel format via MI display flips. */ |
7284 | if (fb->pixel_format != crtc->fb->pixel_format) |
7284 | if (fb->pixel_format != crtc->fb->pixel_format) |
7285 | return -EINVAL; |
7285 | return -EINVAL; |
7286 | 7286 | ||
7287 | /* |
7287 | /* |
7288 | * TILEOFF/LINOFF registers can't be changed via MI display flips. |
7288 | * TILEOFF/LINOFF registers can't be changed via MI display flips. |
7289 | * Note that pitch changes could also affect these register. |
7289 | * Note that pitch changes could also affect these register. |
7290 | */ |
7290 | */ |
7291 | if (INTEL_INFO(dev)->gen > 3 && |
7291 | if (INTEL_INFO(dev)->gen > 3 && |
7292 | (fb->offsets[0] != crtc->fb->offsets[0] || |
7292 | (fb->offsets[0] != crtc->fb->offsets[0] || |
7293 | fb->pitches[0] != crtc->fb->pitches[0])) |
7293 | fb->pitches[0] != crtc->fb->pitches[0])) |
7294 | return -EINVAL; |
7294 | return -EINVAL; |
7295 | 7295 | ||
7296 | work = kzalloc(sizeof *work, GFP_KERNEL); |
7296 | work = kzalloc(sizeof *work, GFP_KERNEL); |
7297 | if (work == NULL) |
7297 | if (work == NULL) |
7298 | return -ENOMEM; |
7298 | return -ENOMEM; |
7299 | 7299 | ||
7300 | work->event = event; |
7300 | work->event = event; |
7301 | work->crtc = crtc; |
7301 | work->crtc = crtc; |
7302 | work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; |
7302 | work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; |
7303 | INIT_WORK(&work->work, intel_unpin_work_fn); |
7303 | INIT_WORK(&work->work, intel_unpin_work_fn); |
7304 | 7304 | ||
7305 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
7305 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
7306 | if (ret) |
7306 | if (ret) |
7307 | goto free_work; |
7307 | goto free_work; |
7308 | 7308 | ||
7309 | /* We borrow the event spin lock for protecting unpin_work */ |
7309 | /* We borrow the event spin lock for protecting unpin_work */ |
7310 | spin_lock_irqsave(&dev->event_lock, flags); |
7310 | spin_lock_irqsave(&dev->event_lock, flags); |
7311 | if (intel_crtc->unpin_work) { |
7311 | if (intel_crtc->unpin_work) { |
7312 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7312 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7313 | kfree(work); |
7313 | kfree(work); |
7314 | drm_vblank_put(dev, intel_crtc->pipe); |
7314 | drm_vblank_put(dev, intel_crtc->pipe); |
7315 | 7315 | ||
7316 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); |
7316 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); |
7317 | return -EBUSY; |
7317 | return -EBUSY; |
7318 | } |
7318 | } |
7319 | intel_crtc->unpin_work = work; |
7319 | intel_crtc->unpin_work = work; |
7320 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7320 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7321 | 7321 | ||
7322 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
7322 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
7323 | flush_workqueue(dev_priv->wq); |
7323 | flush_workqueue(dev_priv->wq); |
7324 | 7324 | ||
7325 | ret = i915_mutex_lock_interruptible(dev); |
7325 | ret = i915_mutex_lock_interruptible(dev); |
7326 | if (ret) |
7326 | if (ret) |
7327 | goto cleanup; |
7327 | goto cleanup; |
7328 | 7328 | ||
7329 | /* Reference the objects for the scheduled work. */ |
7329 | /* Reference the objects for the scheduled work. */ |
7330 | drm_gem_object_reference(&work->old_fb_obj->base); |
7330 | drm_gem_object_reference(&work->old_fb_obj->base); |
7331 | drm_gem_object_reference(&obj->base); |
7331 | drm_gem_object_reference(&obj->base); |
7332 | 7332 | ||
7333 | crtc->fb = fb; |
7333 | crtc->fb = fb; |
7334 | 7334 | ||
7335 | work->pending_flip_obj = obj; |
7335 | work->pending_flip_obj = obj; |
7336 | 7336 | ||
7337 | work->enable_stall_check = true; |
7337 | work->enable_stall_check = true; |
7338 | 7338 | ||
7339 | atomic_inc(&intel_crtc->unpin_work_count); |
7339 | atomic_inc(&intel_crtc->unpin_work_count); |
7340 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
7340 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
7341 | 7341 | ||
7342 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
7342 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
7343 | if (ret) |
7343 | if (ret) |
7344 | goto cleanup_pending; |
7344 | goto cleanup_pending; |
7345 | 7345 | ||
7346 | intel_disable_fbc(dev); |
7346 | intel_disable_fbc(dev); |
7347 | intel_mark_fb_busy(obj); |
7347 | intel_mark_fb_busy(obj); |
7348 | mutex_unlock(&dev->struct_mutex); |
7348 | mutex_unlock(&dev->struct_mutex); |
7349 | 7349 | ||
7350 | trace_i915_flip_request(intel_crtc->plane, obj); |
7350 | trace_i915_flip_request(intel_crtc->plane, obj); |
7351 | 7351 | ||
7352 | return 0; |
7352 | return 0; |
7353 | 7353 | ||
7354 | cleanup_pending: |
7354 | cleanup_pending: |
7355 | atomic_dec(&intel_crtc->unpin_work_count); |
7355 | atomic_dec(&intel_crtc->unpin_work_count); |
7356 | crtc->fb = old_fb; |
7356 | crtc->fb = old_fb; |
7357 | drm_gem_object_unreference(&work->old_fb_obj->base); |
7357 | drm_gem_object_unreference(&work->old_fb_obj->base); |
7358 | drm_gem_object_unreference(&obj->base); |
7358 | drm_gem_object_unreference(&obj->base); |
7359 | mutex_unlock(&dev->struct_mutex); |
7359 | mutex_unlock(&dev->struct_mutex); |
7360 | 7360 | ||
7361 | cleanup: |
7361 | cleanup: |
7362 | spin_lock_irqsave(&dev->event_lock, flags); |
7362 | spin_lock_irqsave(&dev->event_lock, flags); |
7363 | intel_crtc->unpin_work = NULL; |
7363 | intel_crtc->unpin_work = NULL; |
7364 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7364 | spin_unlock_irqrestore(&dev->event_lock, flags); |
7365 | 7365 | ||
7366 | drm_vblank_put(dev, intel_crtc->pipe); |
7366 | drm_vblank_put(dev, intel_crtc->pipe); |
7367 | free_work: |
7367 | free_work: |
7368 | kfree(work); |
7368 | kfree(work); |
7369 | 7369 | ||
7370 | return ret; |
7370 | return ret; |
7371 | } |
7371 | } |
7372 | 7372 | ||
7373 | #endif |
7373 | #endif |
7374 | 7374 | ||
7375 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
7375 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
7376 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
7376 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
7377 | .load_lut = intel_crtc_load_lut, |
7377 | .load_lut = intel_crtc_load_lut, |
7378 | }; |
7378 | }; |
7379 | 7379 | ||
7380 | bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) |
7380 | bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) |
7381 | { |
7381 | { |
7382 | struct intel_encoder *other_encoder; |
7382 | struct intel_encoder *other_encoder; |
7383 | struct drm_crtc *crtc = &encoder->new_crtc->base; |
7383 | struct drm_crtc *crtc = &encoder->new_crtc->base; |
7384 | 7384 | ||
7385 | if (WARN_ON(!crtc)) |
7385 | if (WARN_ON(!crtc)) |
7386 | return false; |
7386 | return false; |
7387 | 7387 | ||
7388 | list_for_each_entry(other_encoder, |
7388 | list_for_each_entry(other_encoder, |
7389 | &crtc->dev->mode_config.encoder_list, |
7389 | &crtc->dev->mode_config.encoder_list, |
7390 | base.head) { |
7390 | base.head) { |
7391 | 7391 | ||
7392 | if (&other_encoder->new_crtc->base != crtc || |
7392 | if (&other_encoder->new_crtc->base != crtc || |
7393 | encoder == other_encoder) |
7393 | encoder == other_encoder) |
7394 | continue; |
7394 | continue; |
7395 | else |
7395 | else |
7396 | return true; |
7396 | return true; |
7397 | } |
7397 | } |
7398 | 7398 | ||
7399 | return false; |
7399 | return false; |
7400 | } |
7400 | } |
7401 | 7401 | ||
7402 | static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, |
7402 | static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, |
7403 | struct drm_crtc *crtc) |
7403 | struct drm_crtc *crtc) |
7404 | { |
7404 | { |
7405 | struct drm_device *dev; |
7405 | struct drm_device *dev; |
7406 | struct drm_crtc *tmp; |
7406 | struct drm_crtc *tmp; |
7407 | int crtc_mask = 1; |
7407 | int crtc_mask = 1; |
7408 | 7408 | ||
7409 | WARN(!crtc, "checking null crtc?\n"); |
7409 | WARN(!crtc, "checking null crtc?\n"); |
7410 | 7410 | ||
7411 | dev = crtc->dev; |
7411 | dev = crtc->dev; |
7412 | 7412 | ||
7413 | list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { |
7413 | list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { |
7414 | if (tmp == crtc) |
7414 | if (tmp == crtc) |
7415 | break; |
7415 | break; |
7416 | crtc_mask <<= 1; |
7416 | crtc_mask <<= 1; |
7417 | } |
7417 | } |
7418 | 7418 | ||
7419 | if (encoder->possible_crtcs & crtc_mask) |
7419 | if (encoder->possible_crtcs & crtc_mask) |
7420 | return true; |
7420 | return true; |
7421 | return false; |
7421 | return false; |
7422 | } |
7422 | } |
7423 | 7423 | ||
7424 | /** |
7424 | /** |
7425 | * intel_modeset_update_staged_output_state |
7425 | * intel_modeset_update_staged_output_state |
7426 | * |
7426 | * |
7427 | * Updates the staged output configuration state, e.g. after we've read out the |
7427 | * Updates the staged output configuration state, e.g. after we've read out the |
7428 | * current hw state. |
7428 | * current hw state. |
7429 | */ |
7429 | */ |
7430 | static void intel_modeset_update_staged_output_state(struct drm_device *dev) |
7430 | static void intel_modeset_update_staged_output_state(struct drm_device *dev) |
7431 | { |
7431 | { |
7432 | struct intel_encoder *encoder; |
7432 | struct intel_encoder *encoder; |
7433 | struct intel_connector *connector; |
7433 | struct intel_connector *connector; |
7434 | 7434 | ||
7435 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7435 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7436 | base.head) { |
7436 | base.head) { |
7437 | connector->new_encoder = |
7437 | connector->new_encoder = |
7438 | to_intel_encoder(connector->base.encoder); |
7438 | to_intel_encoder(connector->base.encoder); |
7439 | } |
7439 | } |
7440 | 7440 | ||
7441 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7441 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7442 | base.head) { |
7442 | base.head) { |
7443 | encoder->new_crtc = |
7443 | encoder->new_crtc = |
7444 | to_intel_crtc(encoder->base.crtc); |
7444 | to_intel_crtc(encoder->base.crtc); |
7445 | } |
7445 | } |
7446 | } |
7446 | } |
7447 | 7447 | ||
7448 | /** |
7448 | /** |
7449 | * intel_modeset_commit_output_state |
7449 | * intel_modeset_commit_output_state |
7450 | * |
7450 | * |
7451 | * This function copies the stage display pipe configuration to the real one. |
7451 | * This function copies the stage display pipe configuration to the real one. |
7452 | */ |
7452 | */ |
7453 | static void intel_modeset_commit_output_state(struct drm_device *dev) |
7453 | static void intel_modeset_commit_output_state(struct drm_device *dev) |
7454 | { |
7454 | { |
7455 | struct intel_encoder *encoder; |
7455 | struct intel_encoder *encoder; |
7456 | struct intel_connector *connector; |
7456 | struct intel_connector *connector; |
7457 | 7457 | ||
7458 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7458 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7459 | base.head) { |
7459 | base.head) { |
7460 | connector->base.encoder = &connector->new_encoder->base; |
7460 | connector->base.encoder = &connector->new_encoder->base; |
7461 | } |
7461 | } |
7462 | 7462 | ||
7463 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7463 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7464 | base.head) { |
7464 | base.head) { |
7465 | encoder->base.crtc = &encoder->new_crtc->base; |
7465 | encoder->base.crtc = &encoder->new_crtc->base; |
7466 | } |
7466 | } |
7467 | } |
7467 | } |
7468 | 7468 | ||
7469 | static struct drm_display_mode * |
7469 | static struct drm_display_mode * |
7470 | intel_modeset_adjusted_mode(struct drm_crtc *crtc, |
7470 | intel_modeset_adjusted_mode(struct drm_crtc *crtc, |
7471 | struct drm_display_mode *mode) |
7471 | struct drm_display_mode *mode) |
7472 | { |
7472 | { |
7473 | struct drm_device *dev = crtc->dev; |
7473 | struct drm_device *dev = crtc->dev; |
7474 | struct drm_display_mode *adjusted_mode; |
7474 | struct drm_display_mode *adjusted_mode; |
7475 | struct drm_encoder_helper_funcs *encoder_funcs; |
7475 | struct drm_encoder_helper_funcs *encoder_funcs; |
7476 | struct intel_encoder *encoder; |
7476 | struct intel_encoder *encoder; |
7477 | 7477 | ||
7478 | adjusted_mode = drm_mode_duplicate(dev, mode); |
7478 | adjusted_mode = drm_mode_duplicate(dev, mode); |
7479 | if (!adjusted_mode) |
7479 | if (!adjusted_mode) |
7480 | return ERR_PTR(-ENOMEM); |
7480 | return ERR_PTR(-ENOMEM); |
7481 | 7481 | ||
7482 | /* Pass our mode to the connectors and the CRTC to give them a chance to |
7482 | /* Pass our mode to the connectors and the CRTC to give them a chance to |
7483 | * adjust it according to limitations or connector properties, and also |
7483 | * adjust it according to limitations or connector properties, and also |
7484 | * a chance to reject the mode entirely. |
7484 | * a chance to reject the mode entirely. |
7485 | */ |
7485 | */ |
7486 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7486 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7487 | base.head) { |
7487 | base.head) { |
7488 | 7488 | ||
7489 | if (&encoder->new_crtc->base != crtc) |
7489 | if (&encoder->new_crtc->base != crtc) |
7490 | continue; |
7490 | continue; |
7491 | encoder_funcs = encoder->base.helper_private; |
7491 | encoder_funcs = encoder->base.helper_private; |
7492 | if (!(encoder_funcs->mode_fixup(&encoder->base, mode, |
7492 | if (!(encoder_funcs->mode_fixup(&encoder->base, mode, |
7493 | adjusted_mode))) { |
7493 | adjusted_mode))) { |
7494 | DRM_DEBUG_KMS("Encoder fixup failed\n"); |
7494 | DRM_DEBUG_KMS("Encoder fixup failed\n"); |
7495 | goto fail; |
7495 | goto fail; |
7496 | } |
7496 | } |
7497 | } |
7497 | } |
7498 | 7498 | ||
7499 | if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { |
7499 | if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) { |
7500 | DRM_DEBUG_KMS("CRTC fixup failed\n"); |
7500 | DRM_DEBUG_KMS("CRTC fixup failed\n"); |
7501 | goto fail; |
7501 | goto fail; |
7502 | } |
7502 | } |
7503 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); |
7503 | DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); |
7504 | 7504 | ||
7505 | return adjusted_mode; |
7505 | return adjusted_mode; |
7506 | fail: |
7506 | fail: |
7507 | drm_mode_destroy(dev, adjusted_mode); |
7507 | drm_mode_destroy(dev, adjusted_mode); |
7508 | return ERR_PTR(-EINVAL); |
7508 | return ERR_PTR(-EINVAL); |
7509 | } |
7509 | } |
7510 | 7510 | ||
7511 | /* Computes which crtcs are affected and sets the relevant bits in the mask. For |
7511 | /* Computes which crtcs are affected and sets the relevant bits in the mask. For |
7512 | * simplicity we use the crtc's pipe number (because it's easier to obtain). */ |
7512 | * simplicity we use the crtc's pipe number (because it's easier to obtain). */ |
7513 | static void |
7513 | static void |
7514 | intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, |
7514 | intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, |
7515 | unsigned *prepare_pipes, unsigned *disable_pipes) |
7515 | unsigned *prepare_pipes, unsigned *disable_pipes) |
7516 | { |
7516 | { |
7517 | struct intel_crtc *intel_crtc; |
7517 | struct intel_crtc *intel_crtc; |
7518 | struct drm_device *dev = crtc->dev; |
7518 | struct drm_device *dev = crtc->dev; |
7519 | struct intel_encoder *encoder; |
7519 | struct intel_encoder *encoder; |
7520 | struct intel_connector *connector; |
7520 | struct intel_connector *connector; |
7521 | struct drm_crtc *tmp_crtc; |
7521 | struct drm_crtc *tmp_crtc; |
7522 | 7522 | ||
7523 | *disable_pipes = *modeset_pipes = *prepare_pipes = 0; |
7523 | *disable_pipes = *modeset_pipes = *prepare_pipes = 0; |
7524 | 7524 | ||
7525 | /* Check which crtcs have changed outputs connected to them, these need |
7525 | /* Check which crtcs have changed outputs connected to them, these need |
7526 | * to be part of the prepare_pipes mask. We don't (yet) support global |
7526 | * to be part of the prepare_pipes mask. We don't (yet) support global |
7527 | * modeset across multiple crtcs, so modeset_pipes will only have one |
7527 | * modeset across multiple crtcs, so modeset_pipes will only have one |
7528 | * bit set at most. */ |
7528 | * bit set at most. */ |
7529 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7529 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7530 | base.head) { |
7530 | base.head) { |
7531 | if (connector->base.encoder == &connector->new_encoder->base) |
7531 | if (connector->base.encoder == &connector->new_encoder->base) |
7532 | continue; |
7532 | continue; |
7533 | 7533 | ||
7534 | if (connector->base.encoder) { |
7534 | if (connector->base.encoder) { |
7535 | tmp_crtc = connector->base.encoder->crtc; |
7535 | tmp_crtc = connector->base.encoder->crtc; |
7536 | 7536 | ||
7537 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
7537 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
7538 | } |
7538 | } |
7539 | 7539 | ||
7540 | if (connector->new_encoder) |
7540 | if (connector->new_encoder) |
7541 | *prepare_pipes |= |
7541 | *prepare_pipes |= |
7542 | 1 << connector->new_encoder->new_crtc->pipe; |
7542 | 1 << connector->new_encoder->new_crtc->pipe; |
7543 | } |
7543 | } |
7544 | 7544 | ||
7545 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7545 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7546 | base.head) { |
7546 | base.head) { |
7547 | if (encoder->base.crtc == &encoder->new_crtc->base) |
7547 | if (encoder->base.crtc == &encoder->new_crtc->base) |
7548 | continue; |
7548 | continue; |
7549 | 7549 | ||
7550 | if (encoder->base.crtc) { |
7550 | if (encoder->base.crtc) { |
7551 | tmp_crtc = encoder->base.crtc; |
7551 | tmp_crtc = encoder->base.crtc; |
7552 | 7552 | ||
7553 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
7553 | *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; |
7554 | } |
7554 | } |
7555 | 7555 | ||
7556 | if (encoder->new_crtc) |
7556 | if (encoder->new_crtc) |
7557 | *prepare_pipes |= 1 << encoder->new_crtc->pipe; |
7557 | *prepare_pipes |= 1 << encoder->new_crtc->pipe; |
7558 | } |
7558 | } |
7559 | 7559 | ||
7560 | /* Check for any pipes that will be fully disabled ... */ |
7560 | /* Check for any pipes that will be fully disabled ... */ |
7561 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
7561 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
7562 | base.head) { |
7562 | base.head) { |
7563 | bool used = false; |
7563 | bool used = false; |
7564 | 7564 | ||
7565 | /* Don't try to disable disabled crtcs. */ |
7565 | /* Don't try to disable disabled crtcs. */ |
7566 | if (!intel_crtc->base.enabled) |
7566 | if (!intel_crtc->base.enabled) |
7567 | continue; |
7567 | continue; |
7568 | 7568 | ||
7569 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7569 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7570 | base.head) { |
7570 | base.head) { |
7571 | if (encoder->new_crtc == intel_crtc) |
7571 | if (encoder->new_crtc == intel_crtc) |
7572 | used = true; |
7572 | used = true; |
7573 | } |
7573 | } |
7574 | 7574 | ||
7575 | if (!used) |
7575 | if (!used) |
7576 | *disable_pipes |= 1 << intel_crtc->pipe; |
7576 | *disable_pipes |= 1 << intel_crtc->pipe; |
7577 | } |
7577 | } |
7578 | 7578 | ||
7579 | 7579 | ||
7580 | /* set_mode is also used to update properties on life display pipes. */ |
7580 | /* set_mode is also used to update properties on life display pipes. */ |
7581 | intel_crtc = to_intel_crtc(crtc); |
7581 | intel_crtc = to_intel_crtc(crtc); |
7582 | if (crtc->enabled) |
7582 | if (crtc->enabled) |
7583 | *prepare_pipes |= 1 << intel_crtc->pipe; |
7583 | *prepare_pipes |= 1 << intel_crtc->pipe; |
7584 | 7584 | ||
7585 | /* We only support modeset on one single crtc, hence we need to do that |
7585 | /* We only support modeset on one single crtc, hence we need to do that |
7586 | * only for the passed in crtc iff we change anything else than just |
7586 | * only for the passed in crtc iff we change anything else than just |
7587 | * disable crtcs. |
7587 | * disable crtcs. |
7588 | * |
7588 | * |
7589 | * This is actually not true, to be fully compatible with the old crtc |
7589 | * This is actually not true, to be fully compatible with the old crtc |
7590 | * helper we automatically disable _any_ output (i.e. doesn't need to be |
7590 | * helper we automatically disable _any_ output (i.e. doesn't need to be |
7591 | * connected to the crtc we're modesetting on) if it's disconnected. |
7591 | * connected to the crtc we're modesetting on) if it's disconnected. |
7592 | * Which is a rather nutty api (since changed the output configuration |
7592 | * Which is a rather nutty api (since changed the output configuration |
7593 | * without userspace's explicit request can lead to confusion), but |
7593 | * without userspace's explicit request can lead to confusion), but |
7594 | * alas. Hence we currently need to modeset on all pipes we prepare. */ |
7594 | * alas. Hence we currently need to modeset on all pipes we prepare. */ |
7595 | if (*prepare_pipes) |
7595 | if (*prepare_pipes) |
7596 | *modeset_pipes = *prepare_pipes; |
7596 | *modeset_pipes = *prepare_pipes; |
7597 | 7597 | ||
7598 | /* ... and mask these out. */ |
7598 | /* ... and mask these out. */ |
7599 | *modeset_pipes &= ~(*disable_pipes); |
7599 | *modeset_pipes &= ~(*disable_pipes); |
7600 | *prepare_pipes &= ~(*disable_pipes); |
7600 | *prepare_pipes &= ~(*disable_pipes); |
7601 | } |
7601 | } |
7602 | 7602 | ||
7603 | static bool intel_crtc_in_use(struct drm_crtc *crtc) |
7603 | static bool intel_crtc_in_use(struct drm_crtc *crtc) |
7604 | { |
7604 | { |
7605 | struct drm_encoder *encoder; |
7605 | struct drm_encoder *encoder; |
7606 | struct drm_device *dev = crtc->dev; |
7606 | struct drm_device *dev = crtc->dev; |
7607 | 7607 | ||
7608 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
7608 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) |
7609 | if (encoder->crtc == crtc) |
7609 | if (encoder->crtc == crtc) |
7610 | return true; |
7610 | return true; |
7611 | 7611 | ||
7612 | return false; |
7612 | return false; |
7613 | } |
7613 | } |
7614 | 7614 | ||
7615 | static void |
7615 | static void |
7616 | intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) |
7616 | intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) |
7617 | { |
7617 | { |
7618 | struct intel_encoder *intel_encoder; |
7618 | struct intel_encoder *intel_encoder; |
7619 | struct intel_crtc *intel_crtc; |
7619 | struct intel_crtc *intel_crtc; |
7620 | struct drm_connector *connector; |
7620 | struct drm_connector *connector; |
7621 | 7621 | ||
7622 | list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, |
7622 | list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, |
7623 | base.head) { |
7623 | base.head) { |
7624 | if (!intel_encoder->base.crtc) |
7624 | if (!intel_encoder->base.crtc) |
7625 | continue; |
7625 | continue; |
7626 | 7626 | ||
7627 | intel_crtc = to_intel_crtc(intel_encoder->base.crtc); |
7627 | intel_crtc = to_intel_crtc(intel_encoder->base.crtc); |
7628 | 7628 | ||
7629 | if (prepare_pipes & (1 << intel_crtc->pipe)) |
7629 | if (prepare_pipes & (1 << intel_crtc->pipe)) |
7630 | intel_encoder->connectors_active = false; |
7630 | intel_encoder->connectors_active = false; |
7631 | } |
7631 | } |
7632 | 7632 | ||
7633 | intel_modeset_commit_output_state(dev); |
7633 | intel_modeset_commit_output_state(dev); |
7634 | 7634 | ||
7635 | /* Update computed state. */ |
7635 | /* Update computed state. */ |
7636 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
7636 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
7637 | base.head) { |
7637 | base.head) { |
7638 | intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); |
7638 | intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); |
7639 | } |
7639 | } |
7640 | 7640 | ||
7641 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
7641 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
7642 | if (!connector->encoder || !connector->encoder->crtc) |
7642 | if (!connector->encoder || !connector->encoder->crtc) |
7643 | continue; |
7643 | continue; |
7644 | 7644 | ||
7645 | intel_crtc = to_intel_crtc(connector->encoder->crtc); |
7645 | intel_crtc = to_intel_crtc(connector->encoder->crtc); |
7646 | 7646 | ||
7647 | if (prepare_pipes & (1 << intel_crtc->pipe)) { |
7647 | if (prepare_pipes & (1 << intel_crtc->pipe)) { |
7648 | struct drm_property *dpms_property = |
7648 | struct drm_property *dpms_property = |
7649 | dev->mode_config.dpms_property; |
7649 | dev->mode_config.dpms_property; |
7650 | 7650 | ||
7651 | connector->dpms = DRM_MODE_DPMS_ON; |
7651 | connector->dpms = DRM_MODE_DPMS_ON; |
7652 | drm_object_property_set_value(&connector->base, |
7652 | drm_object_property_set_value(&connector->base, |
7653 | dpms_property, |
7653 | dpms_property, |
7654 | DRM_MODE_DPMS_ON); |
7654 | DRM_MODE_DPMS_ON); |
7655 | 7655 | ||
7656 | intel_encoder = to_intel_encoder(connector->encoder); |
7656 | intel_encoder = to_intel_encoder(connector->encoder); |
7657 | intel_encoder->connectors_active = true; |
7657 | intel_encoder->connectors_active = true; |
7658 | } |
7658 | } |
7659 | } |
7659 | } |
7660 | 7660 | ||
7661 | } |
7661 | } |
7662 | 7662 | ||
7663 | #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ |
7663 | #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ |
7664 | list_for_each_entry((intel_crtc), \ |
7664 | list_for_each_entry((intel_crtc), \ |
7665 | &(dev)->mode_config.crtc_list, \ |
7665 | &(dev)->mode_config.crtc_list, \ |
7666 | base.head) \ |
7666 | base.head) \ |
7667 | if (mask & (1 <<(intel_crtc)->pipe)) \ |
7667 | if (mask & (1 <<(intel_crtc)->pipe)) \ |
7668 | 7668 | ||
7669 | void |
7669 | void |
7670 | intel_modeset_check_state(struct drm_device *dev) |
7670 | intel_modeset_check_state(struct drm_device *dev) |
7671 | { |
7671 | { |
7672 | struct intel_crtc *crtc; |
7672 | struct intel_crtc *crtc; |
7673 | struct intel_encoder *encoder; |
7673 | struct intel_encoder *encoder; |
7674 | struct intel_connector *connector; |
7674 | struct intel_connector *connector; |
7675 | 7675 | ||
7676 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7676 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7677 | base.head) { |
7677 | base.head) { |
7678 | /* This also checks the encoder/connector hw state with the |
7678 | /* This also checks the encoder/connector hw state with the |
7679 | * ->get_hw_state callbacks. */ |
7679 | * ->get_hw_state callbacks. */ |
7680 | intel_connector_check_state(connector); |
7680 | intel_connector_check_state(connector); |
7681 | 7681 | ||
7682 | WARN(&connector->new_encoder->base != connector->base.encoder, |
7682 | WARN(&connector->new_encoder->base != connector->base.encoder, |
7683 | "connector's staged encoder doesn't match current encoder\n"); |
7683 | "connector's staged encoder doesn't match current encoder\n"); |
7684 | } |
7684 | } |
7685 | 7685 | ||
7686 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7686 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7687 | base.head) { |
7687 | base.head) { |
7688 | bool enabled = false; |
7688 | bool enabled = false; |
7689 | bool active = false; |
7689 | bool active = false; |
7690 | enum pipe pipe, tracked_pipe; |
7690 | enum pipe pipe, tracked_pipe; |
7691 | 7691 | ||
7692 | DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", |
7692 | DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", |
7693 | encoder->base.base.id, |
7693 | encoder->base.base.id, |
7694 | drm_get_encoder_name(&encoder->base)); |
7694 | drm_get_encoder_name(&encoder->base)); |
7695 | 7695 | ||
7696 | WARN(&encoder->new_crtc->base != encoder->base.crtc, |
7696 | WARN(&encoder->new_crtc->base != encoder->base.crtc, |
7697 | "encoder's stage crtc doesn't match current crtc\n"); |
7697 | "encoder's stage crtc doesn't match current crtc\n"); |
7698 | WARN(encoder->connectors_active && !encoder->base.crtc, |
7698 | WARN(encoder->connectors_active && !encoder->base.crtc, |
7699 | "encoder's active_connectors set, but no crtc\n"); |
7699 | "encoder's active_connectors set, but no crtc\n"); |
7700 | 7700 | ||
7701 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7701 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7702 | base.head) { |
7702 | base.head) { |
7703 | if (connector->base.encoder != &encoder->base) |
7703 | if (connector->base.encoder != &encoder->base) |
7704 | continue; |
7704 | continue; |
7705 | enabled = true; |
7705 | enabled = true; |
7706 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) |
7706 | if (connector->base.dpms != DRM_MODE_DPMS_OFF) |
7707 | active = true; |
7707 | active = true; |
7708 | } |
7708 | } |
7709 | WARN(!!encoder->base.crtc != enabled, |
7709 | WARN(!!encoder->base.crtc != enabled, |
7710 | "encoder's enabled state mismatch " |
7710 | "encoder's enabled state mismatch " |
7711 | "(expected %i, found %i)\n", |
7711 | "(expected %i, found %i)\n", |
7712 | !!encoder->base.crtc, enabled); |
7712 | !!encoder->base.crtc, enabled); |
7713 | WARN(active && !encoder->base.crtc, |
7713 | WARN(active && !encoder->base.crtc, |
7714 | "active encoder with no crtc\n"); |
7714 | "active encoder with no crtc\n"); |
7715 | 7715 | ||
7716 | WARN(encoder->connectors_active != active, |
7716 | WARN(encoder->connectors_active != active, |
7717 | "encoder's computed active state doesn't match tracked active state " |
7717 | "encoder's computed active state doesn't match tracked active state " |
7718 | "(expected %i, found %i)\n", active, encoder->connectors_active); |
7718 | "(expected %i, found %i)\n", active, encoder->connectors_active); |
7719 | 7719 | ||
7720 | active = encoder->get_hw_state(encoder, &pipe); |
7720 | active = encoder->get_hw_state(encoder, &pipe); |
7721 | WARN(active != encoder->connectors_active, |
7721 | WARN(active != encoder->connectors_active, |
7722 | "encoder's hw state doesn't match sw tracking " |
7722 | "encoder's hw state doesn't match sw tracking " |
7723 | "(expected %i, found %i)\n", |
7723 | "(expected %i, found %i)\n", |
7724 | encoder->connectors_active, active); |
7724 | encoder->connectors_active, active); |
7725 | 7725 | ||
7726 | if (!encoder->base.crtc) |
7726 | if (!encoder->base.crtc) |
7727 | continue; |
7727 | continue; |
7728 | 7728 | ||
7729 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; |
7729 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; |
7730 | WARN(active && pipe != tracked_pipe, |
7730 | WARN(active && pipe != tracked_pipe, |
7731 | "active encoder's pipe doesn't match" |
7731 | "active encoder's pipe doesn't match" |
7732 | "(expected %i, found %i)\n", |
7732 | "(expected %i, found %i)\n", |
7733 | tracked_pipe, pipe); |
7733 | tracked_pipe, pipe); |
7734 | 7734 | ||
7735 | } |
7735 | } |
7736 | 7736 | ||
7737 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
7737 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
7738 | base.head) { |
7738 | base.head) { |
7739 | bool enabled = false; |
7739 | bool enabled = false; |
7740 | bool active = false; |
7740 | bool active = false; |
7741 | 7741 | ||
7742 | DRM_DEBUG_KMS("[CRTC:%d]\n", |
7742 | DRM_DEBUG_KMS("[CRTC:%d]\n", |
7743 | crtc->base.base.id); |
7743 | crtc->base.base.id); |
7744 | 7744 | ||
7745 | WARN(crtc->active && !crtc->base.enabled, |
7745 | WARN(crtc->active && !crtc->base.enabled, |
7746 | "active crtc, but not enabled in sw tracking\n"); |
7746 | "active crtc, but not enabled in sw tracking\n"); |
7747 | 7747 | ||
7748 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7748 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
7749 | base.head) { |
7749 | base.head) { |
7750 | if (encoder->base.crtc != &crtc->base) |
7750 | if (encoder->base.crtc != &crtc->base) |
7751 | continue; |
7751 | continue; |
7752 | enabled = true; |
7752 | enabled = true; |
7753 | if (encoder->connectors_active) |
7753 | if (encoder->connectors_active) |
7754 | active = true; |
7754 | active = true; |
7755 | } |
7755 | } |
7756 | WARN(active != crtc->active, |
7756 | WARN(active != crtc->active, |
7757 | "crtc's computed active state doesn't match tracked active state " |
7757 | "crtc's computed active state doesn't match tracked active state " |
7758 | "(expected %i, found %i)\n", active, crtc->active); |
7758 | "(expected %i, found %i)\n", active, crtc->active); |
7759 | WARN(enabled != crtc->base.enabled, |
7759 | WARN(enabled != crtc->base.enabled, |
7760 | "crtc's computed enabled state doesn't match tracked enabled state " |
7760 | "crtc's computed enabled state doesn't match tracked enabled state " |
7761 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); |
7761 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); |
7762 | 7762 | ||
7763 | assert_pipe(dev->dev_private, crtc->pipe, crtc->active); |
7763 | assert_pipe(dev->dev_private, crtc->pipe, crtc->active); |
7764 | } |
7764 | } |
7765 | } |
7765 | } |
7766 | 7766 | ||
7767 | int intel_set_mode(struct drm_crtc *crtc, |
7767 | int intel_set_mode(struct drm_crtc *crtc, |
7768 | struct drm_display_mode *mode, |
7768 | struct drm_display_mode *mode, |
7769 | int x, int y, struct drm_framebuffer *fb) |
7769 | int x, int y, struct drm_framebuffer *fb) |
7770 | { |
7770 | { |
7771 | struct drm_device *dev = crtc->dev; |
7771 | struct drm_device *dev = crtc->dev; |
7772 | drm_i915_private_t *dev_priv = dev->dev_private; |
7772 | drm_i915_private_t *dev_priv = dev->dev_private; |
7773 | struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode; |
7773 | struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode; |
7774 | struct intel_crtc *intel_crtc; |
7774 | struct intel_crtc *intel_crtc; |
7775 | unsigned disable_pipes, prepare_pipes, modeset_pipes; |
7775 | unsigned disable_pipes, prepare_pipes, modeset_pipes; |
7776 | int ret = 0; |
7776 | int ret = 0; |
7777 | 7777 | ||
7778 | saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); |
7778 | saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); |
7779 | if (!saved_mode) |
7779 | if (!saved_mode) |
7780 | return -ENOMEM; |
7780 | return -ENOMEM; |
7781 | saved_hwmode = saved_mode + 1; |
7781 | saved_hwmode = saved_mode + 1; |
7782 | 7782 | ||
7783 | intel_modeset_affected_pipes(crtc, &modeset_pipes, |
7783 | intel_modeset_affected_pipes(crtc, &modeset_pipes, |
7784 | &prepare_pipes, &disable_pipes); |
7784 | &prepare_pipes, &disable_pipes); |
7785 | 7785 | ||
7786 | DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", |
7786 | DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", |
7787 | modeset_pipes, prepare_pipes, disable_pipes); |
7787 | modeset_pipes, prepare_pipes, disable_pipes); |
7788 | 7788 | ||
7789 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) |
7789 | for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) |
7790 | intel_crtc_disable(&intel_crtc->base); |
7790 | intel_crtc_disable(&intel_crtc->base); |
7791 | 7791 | ||
7792 | *saved_hwmode = crtc->hwmode; |
7792 | *saved_hwmode = crtc->hwmode; |
7793 | *saved_mode = crtc->mode; |
7793 | *saved_mode = crtc->mode; |
7794 | 7794 | ||
7795 | /* Hack: Because we don't (yet) support global modeset on multiple |
7795 | /* Hack: Because we don't (yet) support global modeset on multiple |
7796 | * crtcs, we don't keep track of the new mode for more than one crtc. |
7796 | * crtcs, we don't keep track of the new mode for more than one crtc. |
7797 | * Hence simply check whether any bit is set in modeset_pipes in all the |
7797 | * Hence simply check whether any bit is set in modeset_pipes in all the |
7798 | * pieces of code that are not yet converted to deal with mutliple crtcs |
7798 | * pieces of code that are not yet converted to deal with mutliple crtcs |
7799 | * changing their mode at the same time. */ |
7799 | * changing their mode at the same time. */ |
7800 | adjusted_mode = NULL; |
7800 | adjusted_mode = NULL; |
7801 | if (modeset_pipes) { |
7801 | if (modeset_pipes) { |
7802 | adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); |
7802 | adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); |
7803 | if (IS_ERR(adjusted_mode)) { |
7803 | if (IS_ERR(adjusted_mode)) { |
7804 | ret = PTR_ERR(adjusted_mode); |
7804 | ret = PTR_ERR(adjusted_mode); |
7805 | goto out; |
7805 | goto out; |
7806 | } |
7806 | } |
7807 | } |
7807 | } |
7808 | 7808 | ||
7809 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { |
7809 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { |
7810 | if (intel_crtc->base.enabled) |
7810 | if (intel_crtc->base.enabled) |
7811 | dev_priv->display.crtc_disable(&intel_crtc->base); |
7811 | dev_priv->display.crtc_disable(&intel_crtc->base); |
7812 | } |
7812 | } |
7813 | 7813 | ||
7814 | /* crtc->mode is already used by the ->mode_set callbacks, hence we need |
7814 | /* crtc->mode is already used by the ->mode_set callbacks, hence we need |
7815 | * to set it here already despite that we pass it down the callchain. |
7815 | * to set it here already despite that we pass it down the callchain. |
7816 | */ |
7816 | */ |
7817 | if (modeset_pipes) |
7817 | if (modeset_pipes) |
7818 | crtc->mode = *mode; |
7818 | crtc->mode = *mode; |
7819 | 7819 | ||
7820 | /* Only after disabling all output pipelines that will be changed can we |
7820 | /* Only after disabling all output pipelines that will be changed can we |
7821 | * update the the output configuration. */ |
7821 | * update the the output configuration. */ |
7822 | intel_modeset_update_state(dev, prepare_pipes); |
7822 | intel_modeset_update_state(dev, prepare_pipes); |
7823 | 7823 | ||
7824 | if (dev_priv->display.modeset_global_resources) |
7824 | if (dev_priv->display.modeset_global_resources) |
7825 | dev_priv->display.modeset_global_resources(dev); |
7825 | dev_priv->display.modeset_global_resources(dev); |
7826 | 7826 | ||
7827 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
7827 | /* Set up the DPLL and any encoders state that needs to adjust or depend |
7828 | * on the DPLL. |
7828 | * on the DPLL. |
7829 | */ |
7829 | */ |
7830 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
7830 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
7831 | ret = intel_crtc_mode_set(&intel_crtc->base, |
7831 | ret = intel_crtc_mode_set(&intel_crtc->base, |
7832 | mode, adjusted_mode, |
7832 | mode, adjusted_mode, |
7833 | x, y, fb); |
7833 | x, y, fb); |
7834 | if (ret) |
7834 | if (ret) |
7835 | goto done; |
7835 | goto done; |
7836 | } |
7836 | } |
7837 | 7837 | ||
7838 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
7838 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
7839 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) |
7839 | for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) |
7840 | dev_priv->display.crtc_enable(&intel_crtc->base); |
7840 | dev_priv->display.crtc_enable(&intel_crtc->base); |
7841 | 7841 | ||
7842 | if (modeset_pipes) { |
7842 | if (modeset_pipes) { |
7843 | /* Store real post-adjustment hardware mode. */ |
7843 | /* Store real post-adjustment hardware mode. */ |
7844 | crtc->hwmode = *adjusted_mode; |
7844 | crtc->hwmode = *adjusted_mode; |
7845 | 7845 | ||
7846 | /* Calculate and store various constants which |
7846 | /* Calculate and store various constants which |
7847 | * are later needed by vblank and swap-completion |
7847 | * are later needed by vblank and swap-completion |
7848 | * timestamping. They are derived from true hwmode. |
7848 | * timestamping. They are derived from true hwmode. |
7849 | */ |
7849 | */ |
7850 | drm_calc_timestamping_constants(crtc); |
7850 | drm_calc_timestamping_constants(crtc); |
7851 | } |
7851 | } |
7852 | 7852 | ||
7853 | /* FIXME: add subpixel order */ |
7853 | /* FIXME: add subpixel order */ |
7854 | done: |
7854 | done: |
7855 | drm_mode_destroy(dev, adjusted_mode); |
7855 | drm_mode_destroy(dev, adjusted_mode); |
7856 | if (ret && crtc->enabled) { |
7856 | if (ret && crtc->enabled) { |
7857 | crtc->hwmode = *saved_hwmode; |
7857 | crtc->hwmode = *saved_hwmode; |
7858 | crtc->mode = *saved_mode; |
7858 | crtc->mode = *saved_mode; |
7859 | } else { |
7859 | } else { |
7860 | intel_modeset_check_state(dev); |
7860 | intel_modeset_check_state(dev); |
7861 | } |
7861 | } |
7862 | 7862 | ||
7863 | out: |
7863 | out: |
7864 | kfree(saved_mode); |
7864 | kfree(saved_mode); |
7865 | return ret; |
7865 | return ret; |
7866 | } |
7866 | } |
7867 | 7867 | ||
7868 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
7868 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
7869 | { |
7869 | { |
7870 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); |
7870 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); |
7871 | } |
7871 | } |
7872 | 7872 | ||
7873 | #undef for_each_intel_crtc_masked |
7873 | #undef for_each_intel_crtc_masked |
7874 | 7874 | ||
7875 | static void intel_set_config_free(struct intel_set_config *config) |
7875 | static void intel_set_config_free(struct intel_set_config *config) |
7876 | { |
7876 | { |
7877 | if (!config) |
7877 | if (!config) |
7878 | return; |
7878 | return; |
7879 | 7879 | ||
7880 | kfree(config->save_connector_encoders); |
7880 | kfree(config->save_connector_encoders); |
7881 | kfree(config->save_encoder_crtcs); |
7881 | kfree(config->save_encoder_crtcs); |
7882 | kfree(config); |
7882 | kfree(config); |
7883 | } |
7883 | } |
7884 | 7884 | ||
7885 | static int intel_set_config_save_state(struct drm_device *dev, |
7885 | static int intel_set_config_save_state(struct drm_device *dev, |
7886 | struct intel_set_config *config) |
7886 | struct intel_set_config *config) |
7887 | { |
7887 | { |
7888 | struct drm_encoder *encoder; |
7888 | struct drm_encoder *encoder; |
7889 | struct drm_connector *connector; |
7889 | struct drm_connector *connector; |
7890 | int count; |
7890 | int count; |
7891 | 7891 | ||
7892 | config->save_encoder_crtcs = |
7892 | config->save_encoder_crtcs = |
7893 | kcalloc(dev->mode_config.num_encoder, |
7893 | kcalloc(dev->mode_config.num_encoder, |
7894 | sizeof(struct drm_crtc *), GFP_KERNEL); |
7894 | sizeof(struct drm_crtc *), GFP_KERNEL); |
7895 | if (!config->save_encoder_crtcs) |
7895 | if (!config->save_encoder_crtcs) |
7896 | return -ENOMEM; |
7896 | return -ENOMEM; |
7897 | 7897 | ||
7898 | config->save_connector_encoders = |
7898 | config->save_connector_encoders = |
7899 | kcalloc(dev->mode_config.num_connector, |
7899 | kcalloc(dev->mode_config.num_connector, |
7900 | sizeof(struct drm_encoder *), GFP_KERNEL); |
7900 | sizeof(struct drm_encoder *), GFP_KERNEL); |
7901 | if (!config->save_connector_encoders) |
7901 | if (!config->save_connector_encoders) |
7902 | return -ENOMEM; |
7902 | return -ENOMEM; |
7903 | 7903 | ||
7904 | /* Copy data. Note that driver private data is not affected. |
7904 | /* Copy data. Note that driver private data is not affected. |
7905 | * Should anything bad happen only the expected state is |
7905 | * Should anything bad happen only the expected state is |
7906 | * restored, not the drivers personal bookkeeping. |
7906 | * restored, not the drivers personal bookkeeping. |
7907 | */ |
7907 | */ |
7908 | count = 0; |
7908 | count = 0; |
7909 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
7909 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
7910 | config->save_encoder_crtcs[count++] = encoder->crtc; |
7910 | config->save_encoder_crtcs[count++] = encoder->crtc; |
7911 | } |
7911 | } |
7912 | 7912 | ||
7913 | count = 0; |
7913 | count = 0; |
7914 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
7914 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
7915 | config->save_connector_encoders[count++] = connector->encoder; |
7915 | config->save_connector_encoders[count++] = connector->encoder; |
7916 | } |
7916 | } |
7917 | 7917 | ||
7918 | return 0; |
7918 | return 0; |
7919 | } |
7919 | } |
7920 | 7920 | ||
7921 | static void intel_set_config_restore_state(struct drm_device *dev, |
7921 | static void intel_set_config_restore_state(struct drm_device *dev, |
7922 | struct intel_set_config *config) |
7922 | struct intel_set_config *config) |
7923 | { |
7923 | { |
7924 | struct intel_encoder *encoder; |
7924 | struct intel_encoder *encoder; |
7925 | struct intel_connector *connector; |
7925 | struct intel_connector *connector; |
7926 | int count; |
7926 | int count; |
7927 | 7927 | ||
7928 | count = 0; |
7928 | count = 0; |
7929 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
7929 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
7930 | encoder->new_crtc = |
7930 | encoder->new_crtc = |
7931 | to_intel_crtc(config->save_encoder_crtcs[count++]); |
7931 | to_intel_crtc(config->save_encoder_crtcs[count++]); |
7932 | } |
7932 | } |
7933 | 7933 | ||
7934 | count = 0; |
7934 | count = 0; |
7935 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { |
7935 | list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { |
7936 | connector->new_encoder = |
7936 | connector->new_encoder = |
7937 | to_intel_encoder(config->save_connector_encoders[count++]); |
7937 | to_intel_encoder(config->save_connector_encoders[count++]); |
7938 | } |
7938 | } |
7939 | } |
7939 | } |
7940 | 7940 | ||
7941 | static void |
7941 | static void |
7942 | intel_set_config_compute_mode_changes(struct drm_mode_set *set, |
7942 | intel_set_config_compute_mode_changes(struct drm_mode_set *set, |
7943 | struct intel_set_config *config) |
7943 | struct intel_set_config *config) |
7944 | { |
7944 | { |
7945 | 7945 | ||
7946 | /* We should be able to check here if the fb has the same properties |
7946 | /* We should be able to check here if the fb has the same properties |
7947 | * and then just flip_or_move it */ |
7947 | * and then just flip_or_move it */ |
7948 | if (set->crtc->fb != set->fb) { |
7948 | if (set->crtc->fb != set->fb) { |
7949 | /* If we have no fb then treat it as a full mode set */ |
7949 | /* If we have no fb then treat it as a full mode set */ |
7950 | if (set->crtc->fb == NULL) { |
7950 | if (set->crtc->fb == NULL) { |
7951 | DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); |
7951 | DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); |
7952 | config->mode_changed = true; |
7952 | config->mode_changed = true; |
7953 | } else if (set->fb == NULL) { |
7953 | } else if (set->fb == NULL) { |
7954 | config->mode_changed = true; |
7954 | config->mode_changed = true; |
7955 | } else if (set->fb->depth != set->crtc->fb->depth) { |
7955 | } else if (set->fb->depth != set->crtc->fb->depth) { |
7956 | config->mode_changed = true; |
7956 | config->mode_changed = true; |
7957 | } else if (set->fb->bits_per_pixel != |
7957 | } else if (set->fb->bits_per_pixel != |
7958 | set->crtc->fb->bits_per_pixel) { |
7958 | set->crtc->fb->bits_per_pixel) { |
7959 | config->mode_changed = true; |
7959 | config->mode_changed = true; |
7960 | } else |
7960 | } else |
7961 | config->fb_changed = true; |
7961 | config->fb_changed = true; |
7962 | } |
7962 | } |
7963 | 7963 | ||
7964 | if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) |
7964 | if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) |
7965 | config->fb_changed = true; |
7965 | config->fb_changed = true; |
7966 | 7966 | ||
7967 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { |
7967 | if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { |
7968 | DRM_DEBUG_KMS("modes are different, full mode set\n"); |
7968 | DRM_DEBUG_KMS("modes are different, full mode set\n"); |
7969 | drm_mode_debug_printmodeline(&set->crtc->mode); |
7969 | drm_mode_debug_printmodeline(&set->crtc->mode); |
7970 | drm_mode_debug_printmodeline(set->mode); |
7970 | drm_mode_debug_printmodeline(set->mode); |
7971 | config->mode_changed = true; |
7971 | config->mode_changed = true; |
7972 | } |
7972 | } |
7973 | } |
7973 | } |
7974 | 7974 | ||
7975 | static int |
7975 | static int |
7976 | intel_modeset_stage_output_state(struct drm_device *dev, |
7976 | intel_modeset_stage_output_state(struct drm_device *dev, |
7977 | struct drm_mode_set *set, |
7977 | struct drm_mode_set *set, |
7978 | struct intel_set_config *config) |
7978 | struct intel_set_config *config) |
7979 | { |
7979 | { |
7980 | struct drm_crtc *new_crtc; |
7980 | struct drm_crtc *new_crtc; |
7981 | struct intel_connector *connector; |
7981 | struct intel_connector *connector; |
7982 | struct intel_encoder *encoder; |
7982 | struct intel_encoder *encoder; |
7983 | int count, ro; |
7983 | int count, ro; |
7984 | 7984 | ||
7985 | /* The upper layers ensure that we either disable a crtc or have a list |
7985 | /* The upper layers ensure that we either disable a crtc or have a list |
7986 | * of connectors. For paranoia, double-check this. */ |
7986 | * of connectors. For paranoia, double-check this. */ |
7987 | WARN_ON(!set->fb && (set->num_connectors != 0)); |
7987 | WARN_ON(!set->fb && (set->num_connectors != 0)); |
7988 | WARN_ON(set->fb && (set->num_connectors == 0)); |
7988 | WARN_ON(set->fb && (set->num_connectors == 0)); |
7989 | 7989 | ||
7990 | count = 0; |
7990 | count = 0; |
7991 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7991 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
7992 | base.head) { |
7992 | base.head) { |
7993 | /* Otherwise traverse passed in connector list and get encoders |
7993 | /* Otherwise traverse passed in connector list and get encoders |
7994 | * for them. */ |
7994 | * for them. */ |
7995 | for (ro = 0; ro < set->num_connectors; ro++) { |
7995 | for (ro = 0; ro < set->num_connectors; ro++) { |
7996 | if (set->connectors[ro] == &connector->base) { |
7996 | if (set->connectors[ro] == &connector->base) { |
7997 | connector->new_encoder = connector->encoder; |
7997 | connector->new_encoder = connector->encoder; |
7998 | break; |
7998 | break; |
7999 | } |
7999 | } |
8000 | } |
8000 | } |
8001 | 8001 | ||
8002 | /* If we disable the crtc, disable all its connectors. Also, if |
8002 | /* If we disable the crtc, disable all its connectors. Also, if |
8003 | * the connector is on the changing crtc but not on the new |
8003 | * the connector is on the changing crtc but not on the new |
8004 | * connector list, disable it. */ |
8004 | * connector list, disable it. */ |
8005 | if ((!set->fb || ro == set->num_connectors) && |
8005 | if ((!set->fb || ro == set->num_connectors) && |
8006 | connector->base.encoder && |
8006 | connector->base.encoder && |
8007 | connector->base.encoder->crtc == set->crtc) { |
8007 | connector->base.encoder->crtc == set->crtc) { |
8008 | connector->new_encoder = NULL; |
8008 | connector->new_encoder = NULL; |
8009 | 8009 | ||
8010 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", |
8010 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", |
8011 | connector->base.base.id, |
8011 | connector->base.base.id, |
8012 | drm_get_connector_name(&connector->base)); |
8012 | drm_get_connector_name(&connector->base)); |
8013 | } |
8013 | } |
8014 | 8014 | ||
8015 | 8015 | ||
8016 | if (&connector->new_encoder->base != connector->base.encoder) { |
8016 | if (&connector->new_encoder->base != connector->base.encoder) { |
8017 | DRM_DEBUG_KMS("encoder changed, full mode switch\n"); |
8017 | DRM_DEBUG_KMS("encoder changed, full mode switch\n"); |
8018 | config->mode_changed = true; |
8018 | config->mode_changed = true; |
8019 | } |
8019 | } |
8020 | } |
8020 | } |
8021 | /* connector->new_encoder is now updated for all connectors. */ |
8021 | /* connector->new_encoder is now updated for all connectors. */ |
8022 | 8022 | ||
8023 | /* Update crtc of enabled connectors. */ |
8023 | /* Update crtc of enabled connectors. */ |
8024 | count = 0; |
8024 | count = 0; |
8025 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
8025 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
8026 | base.head) { |
8026 | base.head) { |
8027 | if (!connector->new_encoder) |
8027 | if (!connector->new_encoder) |
8028 | continue; |
8028 | continue; |
8029 | 8029 | ||
8030 | new_crtc = connector->new_encoder->base.crtc; |
8030 | new_crtc = connector->new_encoder->base.crtc; |
8031 | 8031 | ||
8032 | for (ro = 0; ro < set->num_connectors; ro++) { |
8032 | for (ro = 0; ro < set->num_connectors; ro++) { |
8033 | if (set->connectors[ro] == &connector->base) |
8033 | if (set->connectors[ro] == &connector->base) |
8034 | new_crtc = set->crtc; |
8034 | new_crtc = set->crtc; |
8035 | } |
8035 | } |
8036 | 8036 | ||
8037 | /* Make sure the new CRTC will work with the encoder */ |
8037 | /* Make sure the new CRTC will work with the encoder */ |
8038 | if (!intel_encoder_crtc_ok(&connector->new_encoder->base, |
8038 | if (!intel_encoder_crtc_ok(&connector->new_encoder->base, |
8039 | new_crtc)) { |
8039 | new_crtc)) { |
8040 | return -EINVAL; |
8040 | return -EINVAL; |
8041 | } |
8041 | } |
8042 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); |
8042 | connector->encoder->new_crtc = to_intel_crtc(new_crtc); |
8043 | 8043 | ||
8044 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", |
8044 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", |
8045 | connector->base.base.id, |
8045 | connector->base.base.id, |
8046 | drm_get_connector_name(&connector->base), |
8046 | drm_get_connector_name(&connector->base), |
8047 | new_crtc->base.id); |
8047 | new_crtc->base.id); |
8048 | } |
8048 | } |
8049 | 8049 | ||
8050 | /* Check for any encoders that needs to be disabled. */ |
8050 | /* Check for any encoders that needs to be disabled. */ |
8051 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
8051 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
8052 | base.head) { |
8052 | base.head) { |
8053 | list_for_each_entry(connector, |
8053 | list_for_each_entry(connector, |
8054 | &dev->mode_config.connector_list, |
8054 | &dev->mode_config.connector_list, |
8055 | base.head) { |
8055 | base.head) { |
8056 | if (connector->new_encoder == encoder) { |
8056 | if (connector->new_encoder == encoder) { |
8057 | WARN_ON(!connector->new_encoder->new_crtc); |
8057 | WARN_ON(!connector->new_encoder->new_crtc); |
8058 | 8058 | ||
8059 | goto next_encoder; |
8059 | goto next_encoder; |
8060 | } |
8060 | } |
8061 | } |
8061 | } |
8062 | encoder->new_crtc = NULL; |
8062 | encoder->new_crtc = NULL; |
8063 | next_encoder: |
8063 | next_encoder: |
8064 | /* Only now check for crtc changes so we don't miss encoders |
8064 | /* Only now check for crtc changes so we don't miss encoders |
8065 | * that will be disabled. */ |
8065 | * that will be disabled. */ |
8066 | if (&encoder->new_crtc->base != encoder->base.crtc) { |
8066 | if (&encoder->new_crtc->base != encoder->base.crtc) { |
8067 | DRM_DEBUG_KMS("crtc changed, full mode switch\n"); |
8067 | DRM_DEBUG_KMS("crtc changed, full mode switch\n"); |
8068 | config->mode_changed = true; |
8068 | config->mode_changed = true; |
8069 | } |
8069 | } |
8070 | } |
8070 | } |
8071 | /* Now we've also updated encoder->new_crtc for all encoders. */ |
8071 | /* Now we've also updated encoder->new_crtc for all encoders. */ |
8072 | 8072 | ||
8073 | return 0; |
8073 | return 0; |
8074 | } |
8074 | } |
8075 | 8075 | ||
8076 | static int intel_crtc_set_config(struct drm_mode_set *set) |
8076 | static int intel_crtc_set_config(struct drm_mode_set *set) |
8077 | { |
8077 | { |
8078 | struct drm_device *dev; |
8078 | struct drm_device *dev; |
8079 | struct drm_mode_set save_set; |
8079 | struct drm_mode_set save_set; |
8080 | struct intel_set_config *config; |
8080 | struct intel_set_config *config; |
8081 | int ret; |
8081 | int ret; |
8082 | 8082 | ||
8083 | BUG_ON(!set); |
8083 | BUG_ON(!set); |
8084 | BUG_ON(!set->crtc); |
8084 | BUG_ON(!set->crtc); |
8085 | BUG_ON(!set->crtc->helper_private); |
8085 | BUG_ON(!set->crtc->helper_private); |
8086 | 8086 | ||
8087 | /* Enforce sane interface api - has been abused by the fb helper. */ |
8087 | /* Enforce sane interface api - has been abused by the fb helper. */ |
8088 | BUG_ON(!set->mode && set->fb); |
8088 | BUG_ON(!set->mode && set->fb); |
8089 | BUG_ON(set->fb && set->num_connectors == 0); |
8089 | BUG_ON(set->fb && set->num_connectors == 0); |
8090 | 8090 | ||
8091 | if (set->fb) { |
8091 | if (set->fb) { |
8092 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
8092 | DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
8093 | set->crtc->base.id, set->fb->base.id, |
8093 | set->crtc->base.id, set->fb->base.id, |
8094 | (int)set->num_connectors, set->x, set->y); |
8094 | (int)set->num_connectors, set->x, set->y); |
8095 | } else { |
8095 | } else { |
8096 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); |
8096 | DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); |
8097 | } |
8097 | } |
8098 | 8098 | ||
8099 | dev = set->crtc->dev; |
8099 | dev = set->crtc->dev; |
8100 | 8100 | ||
8101 | ret = -ENOMEM; |
8101 | ret = -ENOMEM; |
8102 | config = kzalloc(sizeof(*config), GFP_KERNEL); |
8102 | config = kzalloc(sizeof(*config), GFP_KERNEL); |
8103 | if (!config) |
8103 | if (!config) |
8104 | goto out_config; |
8104 | goto out_config; |
8105 | 8105 | ||
8106 | ret = intel_set_config_save_state(dev, config); |
8106 | ret = intel_set_config_save_state(dev, config); |
8107 | if (ret) |
8107 | if (ret) |
8108 | goto out_config; |
8108 | goto out_config; |
8109 | 8109 | ||
8110 | save_set.crtc = set->crtc; |
8110 | save_set.crtc = set->crtc; |
8111 | save_set.mode = &set->crtc->mode; |
8111 | save_set.mode = &set->crtc->mode; |
8112 | save_set.x = set->crtc->x; |
8112 | save_set.x = set->crtc->x; |
8113 | save_set.y = set->crtc->y; |
8113 | save_set.y = set->crtc->y; |
8114 | save_set.fb = set->crtc->fb; |
8114 | save_set.fb = set->crtc->fb; |
8115 | 8115 | ||
8116 | /* Compute whether we need a full modeset, only an fb base update or no |
8116 | /* Compute whether we need a full modeset, only an fb base update or no |
8117 | * change at all. In the future we might also check whether only the |
8117 | * change at all. In the future we might also check whether only the |
8118 | * mode changed, e.g. for LVDS where we only change the panel fitter in |
8118 | * mode changed, e.g. for LVDS where we only change the panel fitter in |
8119 | * such cases. */ |
8119 | * such cases. */ |
8120 | intel_set_config_compute_mode_changes(set, config); |
8120 | intel_set_config_compute_mode_changes(set, config); |
8121 | 8121 | ||
8122 | ret = intel_modeset_stage_output_state(dev, set, config); |
8122 | ret = intel_modeset_stage_output_state(dev, set, config); |
8123 | if (ret) |
8123 | if (ret) |
8124 | goto fail; |
8124 | goto fail; |
8125 | 8125 | ||
8126 | if (config->mode_changed) { |
8126 | if (config->mode_changed) { |
8127 | if (set->mode) { |
8127 | if (set->mode) { |
8128 | DRM_DEBUG_KMS("attempting to set mode from" |
8128 | DRM_DEBUG_KMS("attempting to set mode from" |
8129 | " userspace\n"); |
8129 | " userspace\n"); |
8130 | drm_mode_debug_printmodeline(set->mode); |
8130 | drm_mode_debug_printmodeline(set->mode); |
8131 | } |
8131 | } |
8132 | 8132 | ||
8133 | ret = intel_set_mode(set->crtc, set->mode, |
8133 | ret = intel_set_mode(set->crtc, set->mode, |
8134 | set->x, set->y, set->fb); |
8134 | set->x, set->y, set->fb); |
8135 | if (ret) { |
8135 | if (ret) { |
8136 | DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", |
8136 | DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", |
8137 | set->crtc->base.id, ret); |
8137 | set->crtc->base.id, ret); |
8138 | goto fail; |
8138 | goto fail; |
8139 | } |
8139 | } |
8140 | } else if (config->fb_changed) { |
8140 | } else if (config->fb_changed) { |
8141 | ret = intel_pipe_set_base(set->crtc, |
8141 | ret = intel_pipe_set_base(set->crtc, |
8142 | set->x, set->y, set->fb); |
8142 | set->x, set->y, set->fb); |
8143 | } |
8143 | } |
8144 | 8144 | ||
8145 | intel_set_config_free(config); |
8145 | intel_set_config_free(config); |
8146 | 8146 | ||
8147 | return 0; |
8147 | return 0; |
8148 | 8148 | ||
8149 | fail: |
8149 | fail: |
8150 | intel_set_config_restore_state(dev, config); |
8150 | intel_set_config_restore_state(dev, config); |
8151 | 8151 | ||
8152 | /* Try to restore the config */ |
8152 | /* Try to restore the config */ |
8153 | if (config->mode_changed && |
8153 | if (config->mode_changed && |
8154 | intel_set_mode(save_set.crtc, save_set.mode, |
8154 | intel_set_mode(save_set.crtc, save_set.mode, |
8155 | save_set.x, save_set.y, save_set.fb)) |
8155 | save_set.x, save_set.y, save_set.fb)) |
8156 | DRM_ERROR("failed to restore config after modeset failure\n"); |
8156 | DRM_ERROR("failed to restore config after modeset failure\n"); |
8157 | 8157 | ||
8158 | out_config: |
8158 | out_config: |
8159 | intel_set_config_free(config); |
8159 | intel_set_config_free(config); |
8160 | return ret; |
8160 | return ret; |
8161 | } |
8161 | } |
8162 | 8162 | ||
8163 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
8163 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
8164 | // .cursor_set = intel_crtc_cursor_set, |
8164 | // .cursor_set = intel_crtc_cursor_set, |
8165 | // .cursor_move = intel_crtc_cursor_move, |
8165 | // .cursor_move = intel_crtc_cursor_move, |
8166 | .gamma_set = intel_crtc_gamma_set, |
8166 | .gamma_set = intel_crtc_gamma_set, |
8167 | .set_config = intel_crtc_set_config, |
8167 | .set_config = intel_crtc_set_config, |
8168 | .destroy = intel_crtc_destroy, |
8168 | .destroy = intel_crtc_destroy, |
8169 | // .page_flip = intel_crtc_page_flip, |
8169 | // .page_flip = intel_crtc_page_flip, |
8170 | }; |
8170 | }; |
8171 | 8171 | ||
8172 | static void intel_cpu_pll_init(struct drm_device *dev) |
8172 | static void intel_cpu_pll_init(struct drm_device *dev) |
8173 | { |
8173 | { |
8174 | if (HAS_DDI(dev)) |
8174 | if (HAS_DDI(dev)) |
8175 | intel_ddi_pll_init(dev); |
8175 | intel_ddi_pll_init(dev); |
8176 | } |
8176 | } |
8177 | 8177 | ||
8178 | static void intel_pch_pll_init(struct drm_device *dev) |
8178 | static void intel_pch_pll_init(struct drm_device *dev) |
8179 | { |
8179 | { |
8180 | drm_i915_private_t *dev_priv = dev->dev_private; |
8180 | drm_i915_private_t *dev_priv = dev->dev_private; |
8181 | int i; |
8181 | int i; |
8182 | 8182 | ||
8183 | if (dev_priv->num_pch_pll == 0) { |
8183 | if (dev_priv->num_pch_pll == 0) { |
8184 | DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); |
8184 | DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); |
8185 | return; |
8185 | return; |
8186 | } |
8186 | } |
8187 | 8187 | ||
8188 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
8188 | for (i = 0; i < dev_priv->num_pch_pll; i++) { |
8189 | dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); |
8189 | dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); |
8190 | dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); |
8190 | dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); |
8191 | dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); |
8191 | dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); |
8192 | } |
8192 | } |
8193 | } |
8193 | } |
8194 | 8194 | ||
8195 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
8195 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
8196 | { |
8196 | { |
8197 | drm_i915_private_t *dev_priv = dev->dev_private; |
8197 | drm_i915_private_t *dev_priv = dev->dev_private; |
8198 | struct intel_crtc *intel_crtc; |
8198 | struct intel_crtc *intel_crtc; |
8199 | int i; |
8199 | int i; |
8200 | 8200 | ||
8201 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
8201 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
8202 | if (intel_crtc == NULL) |
8202 | if (intel_crtc == NULL) |
8203 | return; |
8203 | return; |
8204 | 8204 | ||
8205 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
8205 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
8206 | 8206 | ||
8207 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
8207 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
8208 | for (i = 0; i < 256; i++) { |
8208 | for (i = 0; i < 256; i++) { |
8209 | intel_crtc->lut_r[i] = i; |
8209 | intel_crtc->lut_r[i] = i; |
8210 | intel_crtc->lut_g[i] = i; |
8210 | intel_crtc->lut_g[i] = i; |
8211 | intel_crtc->lut_b[i] = i; |
8211 | intel_crtc->lut_b[i] = i; |
8212 | } |
8212 | } |
8213 | 8213 | ||
8214 | /* Swap pipes & planes for FBC on pre-965 */ |
8214 | /* Swap pipes & planes for FBC on pre-965 */ |
8215 | intel_crtc->pipe = pipe; |
8215 | intel_crtc->pipe = pipe; |
8216 | intel_crtc->plane = pipe; |
8216 | intel_crtc->plane = pipe; |
8217 | intel_crtc->cpu_transcoder = pipe; |
8217 | intel_crtc->cpu_transcoder = pipe; |
8218 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
8218 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
8219 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
8219 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
8220 | intel_crtc->plane = !pipe; |
8220 | intel_crtc->plane = !pipe; |
8221 | } |
8221 | } |
8222 | 8222 | ||
8223 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
8223 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
8224 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
8224 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
8225 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
8225 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
8226 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
8226 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
8227 | 8227 | ||
8228 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
8228 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
8229 | 8229 | ||
8230 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
8230 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
8231 | } |
8231 | } |
8232 | 8232 | ||
8233 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
8233 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
8234 | struct drm_file *file) |
8234 | struct drm_file *file) |
8235 | { |
8235 | { |
8236 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
8236 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; |
8237 | struct drm_mode_object *drmmode_obj; |
8237 | struct drm_mode_object *drmmode_obj; |
8238 | struct intel_crtc *crtc; |
8238 | struct intel_crtc *crtc; |
- | 8239 | ||
- | 8240 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
|
- | 8241 | return -ENODEV; |
|
8239 | 8242 | ||
8240 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, |
8243 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, |
8241 | DRM_MODE_OBJECT_CRTC); |
8244 | DRM_MODE_OBJECT_CRTC); |
8242 | 8245 | ||
8243 | if (!drmmode_obj) { |
8246 | if (!drmmode_obj) { |
8244 | DRM_ERROR("no such CRTC id\n"); |
8247 | DRM_ERROR("no such CRTC id\n"); |
8245 | return -EINVAL; |
8248 | return -EINVAL; |
8246 | } |
8249 | } |
8247 | 8250 | ||
8248 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
8251 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
8249 | pipe_from_crtc_id->pipe = crtc->pipe; |
8252 | pipe_from_crtc_id->pipe = crtc->pipe; |
8250 | 8253 | ||
8251 | return 0; |
8254 | return 0; |
8252 | } |
8255 | } |
8253 | 8256 | ||
8254 | static int intel_encoder_clones(struct intel_encoder *encoder) |
8257 | static int intel_encoder_clones(struct intel_encoder *encoder) |
8255 | { |
8258 | { |
8256 | struct drm_device *dev = encoder->base.dev; |
8259 | struct drm_device *dev = encoder->base.dev; |
8257 | struct intel_encoder *source_encoder; |
8260 | struct intel_encoder *source_encoder; |
8258 | int index_mask = 0; |
8261 | int index_mask = 0; |
8259 | int entry = 0; |
8262 | int entry = 0; |
8260 | 8263 | ||
8261 | list_for_each_entry(source_encoder, |
8264 | list_for_each_entry(source_encoder, |
8262 | &dev->mode_config.encoder_list, base.head) { |
8265 | &dev->mode_config.encoder_list, base.head) { |
8263 | 8266 | ||
8264 | if (encoder == source_encoder) |
8267 | if (encoder == source_encoder) |
8265 | index_mask |= (1 << entry); |
8268 | index_mask |= (1 << entry); |
8266 | 8269 | ||
8267 | /* Intel hw has only one MUX where enocoders could be cloned. */ |
8270 | /* Intel hw has only one MUX where enocoders could be cloned. */ |
8268 | if (encoder->cloneable && source_encoder->cloneable) |
8271 | if (encoder->cloneable && source_encoder->cloneable) |
8269 | index_mask |= (1 << entry); |
8272 | index_mask |= (1 << entry); |
8270 | 8273 | ||
8271 | entry++; |
8274 | entry++; |
8272 | } |
8275 | } |
8273 | 8276 | ||
8274 | return index_mask; |
8277 | return index_mask; |
8275 | } |
8278 | } |
8276 | 8279 | ||
8277 | static bool has_edp_a(struct drm_device *dev) |
8280 | static bool has_edp_a(struct drm_device *dev) |
8278 | { |
8281 | { |
8279 | struct drm_i915_private *dev_priv = dev->dev_private; |
8282 | struct drm_i915_private *dev_priv = dev->dev_private; |
8280 | 8283 | ||
8281 | if (!IS_MOBILE(dev)) |
8284 | if (!IS_MOBILE(dev)) |
8282 | return false; |
8285 | return false; |
8283 | 8286 | ||
8284 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) |
8287 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) |
8285 | return false; |
8288 | return false; |
8286 | 8289 | ||
8287 | if (IS_GEN5(dev) && |
8290 | if (IS_GEN5(dev) && |
8288 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) |
8291 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) |
8289 | return false; |
8292 | return false; |
8290 | 8293 | ||
8291 | return true; |
8294 | return true; |
8292 | } |
8295 | } |
8293 | 8296 | ||
8294 | static void intel_setup_outputs(struct drm_device *dev) |
8297 | static void intel_setup_outputs(struct drm_device *dev) |
8295 | { |
8298 | { |
8296 | struct drm_i915_private *dev_priv = dev->dev_private; |
8299 | struct drm_i915_private *dev_priv = dev->dev_private; |
8297 | struct intel_encoder *encoder; |
8300 | struct intel_encoder *encoder; |
8298 | bool dpd_is_edp = false; |
8301 | bool dpd_is_edp = false; |
8299 | bool has_lvds; |
8302 | bool has_lvds; |
8300 | 8303 | ||
8301 | has_lvds = intel_lvds_init(dev); |
8304 | has_lvds = intel_lvds_init(dev); |
8302 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { |
8305 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { |
8303 | /* disable the panel fitter on everything but LVDS */ |
8306 | /* disable the panel fitter on everything but LVDS */ |
8304 | I915_WRITE(PFIT_CONTROL, 0); |
8307 | I915_WRITE(PFIT_CONTROL, 0); |
8305 | } |
8308 | } |
8306 | 8309 | ||
8307 | if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
8310 | if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
8308 | intel_crt_init(dev); |
8311 | intel_crt_init(dev); |
8309 | 8312 | ||
8310 | if (HAS_DDI(dev)) { |
8313 | if (HAS_DDI(dev)) { |
8311 | int found; |
8314 | int found; |
8312 | 8315 | ||
8313 | /* Haswell uses DDI functions to detect digital outputs */ |
8316 | /* Haswell uses DDI functions to detect digital outputs */ |
8314 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; |
8317 | found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; |
8315 | /* DDI A only supports eDP */ |
8318 | /* DDI A only supports eDP */ |
8316 | if (found) |
8319 | if (found) |
8317 | intel_ddi_init(dev, PORT_A); |
8320 | intel_ddi_init(dev, PORT_A); |
8318 | 8321 | ||
8319 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
8322 | /* DDI B, C and D detection is indicated by the SFUSE_STRAP |
8320 | * register */ |
8323 | * register */ |
8321 | found = I915_READ(SFUSE_STRAP); |
8324 | found = I915_READ(SFUSE_STRAP); |
8322 | 8325 | ||
8323 | if (found & SFUSE_STRAP_DDIB_DETECTED) |
8326 | if (found & SFUSE_STRAP_DDIB_DETECTED) |
8324 | intel_ddi_init(dev, PORT_B); |
8327 | intel_ddi_init(dev, PORT_B); |
8325 | if (found & SFUSE_STRAP_DDIC_DETECTED) |
8328 | if (found & SFUSE_STRAP_DDIC_DETECTED) |
8326 | intel_ddi_init(dev, PORT_C); |
8329 | intel_ddi_init(dev, PORT_C); |
8327 | if (found & SFUSE_STRAP_DDID_DETECTED) |
8330 | if (found & SFUSE_STRAP_DDID_DETECTED) |
8328 | intel_ddi_init(dev, PORT_D); |
8331 | intel_ddi_init(dev, PORT_D); |
8329 | } else if (HAS_PCH_SPLIT(dev)) { |
8332 | } else if (HAS_PCH_SPLIT(dev)) { |
8330 | int found; |
8333 | int found; |
8331 | dpd_is_edp = intel_dpd_is_edp(dev); |
8334 | dpd_is_edp = intel_dpd_is_edp(dev); |
8332 | 8335 | ||
8333 | if (has_edp_a(dev)) |
8336 | if (has_edp_a(dev)) |
8334 | intel_dp_init(dev, DP_A, PORT_A); |
8337 | intel_dp_init(dev, DP_A, PORT_A); |
8335 | 8338 | ||
8336 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
8339 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
8337 | /* PCH SDVOB multiplex with HDMIB */ |
8340 | /* PCH SDVOB multiplex with HDMIB */ |
8338 | found = intel_sdvo_init(dev, PCH_SDVOB, true); |
8341 | found = intel_sdvo_init(dev, PCH_SDVOB, true); |
8339 | if (!found) |
8342 | if (!found) |
8340 | intel_hdmi_init(dev, HDMIB, PORT_B); |
8343 | intel_hdmi_init(dev, HDMIB, PORT_B); |
8341 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
8344 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
8342 | intel_dp_init(dev, PCH_DP_B, PORT_B); |
8345 | intel_dp_init(dev, PCH_DP_B, PORT_B); |
8343 | } |
8346 | } |
8344 | 8347 | ||
8345 | if (I915_READ(HDMIC) & PORT_DETECTED) |
8348 | if (I915_READ(HDMIC) & PORT_DETECTED) |
8346 | intel_hdmi_init(dev, HDMIC, PORT_C); |
8349 | intel_hdmi_init(dev, HDMIC, PORT_C); |
8347 | 8350 | ||
8348 | if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) |
8351 | if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) |
8349 | intel_hdmi_init(dev, HDMID, PORT_D); |
8352 | intel_hdmi_init(dev, HDMID, PORT_D); |
8350 | 8353 | ||
8351 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
8354 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
8352 | intel_dp_init(dev, PCH_DP_C, PORT_C); |
8355 | intel_dp_init(dev, PCH_DP_C, PORT_C); |
8353 | 8356 | ||
8354 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
8357 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
8355 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
8358 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
8356 | } else if (IS_VALLEYVIEW(dev)) { |
8359 | } else if (IS_VALLEYVIEW(dev)) { |
8357 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ |
8360 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ |
8358 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
8361 | if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
8359 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
8362 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
8360 | 8363 | ||
8361 | if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { |
8364 | if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { |
8362 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); |
8365 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); |
8363 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) |
8366 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) |
8364 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); |
8367 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); |
8365 | } |
8368 | } |
8366 | 8369 | ||
8367 | if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) |
8370 | if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) |
8368 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); |
8371 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); |
8369 | 8372 | ||
8370 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
8373 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
8371 | bool found = false; |
8374 | bool found = false; |
8372 | 8375 | ||
8373 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
8376 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
8374 | DRM_DEBUG_KMS("probing SDVOB\n"); |
8377 | DRM_DEBUG_KMS("probing SDVOB\n"); |
8375 | found = intel_sdvo_init(dev, SDVOB, true); |
8378 | found = intel_sdvo_init(dev, SDVOB, true); |
8376 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
8379 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
8377 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
8380 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
8378 | intel_hdmi_init(dev, SDVOB, PORT_B); |
8381 | intel_hdmi_init(dev, SDVOB, PORT_B); |
8379 | } |
8382 | } |
8380 | 8383 | ||
8381 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
8384 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
8382 | DRM_DEBUG_KMS("probing DP_B\n"); |
8385 | DRM_DEBUG_KMS("probing DP_B\n"); |
8383 | intel_dp_init(dev, DP_B, PORT_B); |
8386 | intel_dp_init(dev, DP_B, PORT_B); |
8384 | } |
8387 | } |
8385 | } |
8388 | } |
8386 | 8389 | ||
8387 | /* Before G4X SDVOC doesn't have its own detect register */ |
8390 | /* Before G4X SDVOC doesn't have its own detect register */ |
8388 | 8391 | ||
8389 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
8392 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
8390 | DRM_DEBUG_KMS("probing SDVOC\n"); |
8393 | DRM_DEBUG_KMS("probing SDVOC\n"); |
8391 | found = intel_sdvo_init(dev, SDVOC, false); |
8394 | found = intel_sdvo_init(dev, SDVOC, false); |
8392 | } |
8395 | } |
8393 | 8396 | ||
8394 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
8397 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
8395 | 8398 | ||
8396 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
8399 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
8397 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
8400 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
8398 | intel_hdmi_init(dev, SDVOC, PORT_C); |
8401 | intel_hdmi_init(dev, SDVOC, PORT_C); |
8399 | } |
8402 | } |
8400 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
8403 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
8401 | DRM_DEBUG_KMS("probing DP_C\n"); |
8404 | DRM_DEBUG_KMS("probing DP_C\n"); |
8402 | intel_dp_init(dev, DP_C, PORT_C); |
8405 | intel_dp_init(dev, DP_C, PORT_C); |
8403 | } |
8406 | } |
8404 | } |
8407 | } |
8405 | 8408 | ||
8406 | if (SUPPORTS_INTEGRATED_DP(dev) && |
8409 | if (SUPPORTS_INTEGRATED_DP(dev) && |
8407 | (I915_READ(DP_D) & DP_DETECTED)) { |
8410 | (I915_READ(DP_D) & DP_DETECTED)) { |
8408 | DRM_DEBUG_KMS("probing DP_D\n"); |
8411 | DRM_DEBUG_KMS("probing DP_D\n"); |
8409 | intel_dp_init(dev, DP_D, PORT_D); |
8412 | intel_dp_init(dev, DP_D, PORT_D); |
8410 | } |
8413 | } |
8411 | } else if (IS_GEN2(dev)) |
8414 | } else if (IS_GEN2(dev)) |
8412 | intel_dvo_init(dev); |
8415 | intel_dvo_init(dev); |
8413 | 8416 | ||
8414 | // if (SUPPORTS_TV(dev)) |
8417 | // if (SUPPORTS_TV(dev)) |
8415 | // intel_tv_init(dev); |
8418 | // intel_tv_init(dev); |
8416 | 8419 | ||
8417 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
8420 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
8418 | encoder->base.possible_crtcs = encoder->crtc_mask; |
8421 | encoder->base.possible_crtcs = encoder->crtc_mask; |
8419 | encoder->base.possible_clones = |
8422 | encoder->base.possible_clones = |
8420 | intel_encoder_clones(encoder); |
8423 | intel_encoder_clones(encoder); |
8421 | } |
8424 | } |
8422 | 8425 | ||
8423 | intel_init_pch_refclk(dev); |
8426 | intel_init_pch_refclk(dev); |
8424 | 8427 | ||
8425 | drm_helper_move_panel_connectors_to_head(dev); |
8428 | drm_helper_move_panel_connectors_to_head(dev); |
8426 | } |
8429 | } |
8427 | 8430 | ||
8428 | 8431 | ||
8429 | 8432 | ||
8430 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
8433 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
8431 | // .destroy = intel_user_framebuffer_destroy, |
8434 | // .destroy = intel_user_framebuffer_destroy, |
8432 | // .create_handle = intel_user_framebuffer_create_handle, |
8435 | // .create_handle = intel_user_framebuffer_create_handle, |
8433 | }; |
8436 | }; |
8434 | 8437 | ||
8435 | int intel_framebuffer_init(struct drm_device *dev, |
8438 | int intel_framebuffer_init(struct drm_device *dev, |
8436 | struct intel_framebuffer *intel_fb, |
8439 | struct intel_framebuffer *intel_fb, |
8437 | struct drm_mode_fb_cmd2 *mode_cmd, |
8440 | struct drm_mode_fb_cmd2 *mode_cmd, |
8438 | struct drm_i915_gem_object *obj) |
8441 | struct drm_i915_gem_object *obj) |
8439 | { |
8442 | { |
8440 | int ret; |
8443 | int ret; |
8441 | 8444 | ||
8442 | if (obj->tiling_mode == I915_TILING_Y) { |
8445 | if (obj->tiling_mode == I915_TILING_Y) { |
8443 | DRM_DEBUG("hardware does not support tiling Y\n"); |
8446 | DRM_DEBUG("hardware does not support tiling Y\n"); |
8444 | return -EINVAL; |
8447 | return -EINVAL; |
8445 | } |
8448 | } |
8446 | 8449 | ||
8447 | if (mode_cmd->pitches[0] & 63) { |
8450 | if (mode_cmd->pitches[0] & 63) { |
8448 | DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", |
8451 | DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", |
8449 | mode_cmd->pitches[0]); |
8452 | mode_cmd->pitches[0]); |
8450 | return -EINVAL; |
8453 | return -EINVAL; |
8451 | } |
8454 | } |
8452 | 8455 | ||
8453 | /* FIXME <= Gen4 stride limits are bit unclear */ |
8456 | /* FIXME <= Gen4 stride limits are bit unclear */ |
8454 | if (mode_cmd->pitches[0] > 32768) { |
8457 | if (mode_cmd->pitches[0] > 32768) { |
8455 | DRM_DEBUG("pitch (%d) must be at less than 32768\n", |
8458 | DRM_DEBUG("pitch (%d) must be at less than 32768\n", |
8456 | mode_cmd->pitches[0]); |
8459 | mode_cmd->pitches[0]); |
8457 | return -EINVAL; |
8460 | return -EINVAL; |
8458 | } |
8461 | } |
8459 | 8462 | ||
8460 | if (obj->tiling_mode != I915_TILING_NONE && |
8463 | if (obj->tiling_mode != I915_TILING_NONE && |
8461 | mode_cmd->pitches[0] != obj->stride) { |
8464 | mode_cmd->pitches[0] != obj->stride) { |
8462 | DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", |
8465 | DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", |
8463 | mode_cmd->pitches[0], obj->stride); |
8466 | mode_cmd->pitches[0], obj->stride); |
8464 | return -EINVAL; |
8467 | return -EINVAL; |
8465 | } |
8468 | } |
8466 | 8469 | ||
8467 | /* Reject formats not supported by any plane early. */ |
8470 | /* Reject formats not supported by any plane early. */ |
8468 | switch (mode_cmd->pixel_format) { |
8471 | switch (mode_cmd->pixel_format) { |
8469 | case DRM_FORMAT_C8: |
8472 | case DRM_FORMAT_C8: |
8470 | case DRM_FORMAT_RGB565: |
8473 | case DRM_FORMAT_RGB565: |
8471 | case DRM_FORMAT_XRGB8888: |
8474 | case DRM_FORMAT_XRGB8888: |
8472 | case DRM_FORMAT_ARGB8888: |
8475 | case DRM_FORMAT_ARGB8888: |
8473 | break; |
8476 | break; |
8474 | case DRM_FORMAT_XRGB1555: |
8477 | case DRM_FORMAT_XRGB1555: |
8475 | case DRM_FORMAT_ARGB1555: |
8478 | case DRM_FORMAT_ARGB1555: |
8476 | if (INTEL_INFO(dev)->gen > 3) { |
8479 | if (INTEL_INFO(dev)->gen > 3) { |
8477 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8480 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8478 | return -EINVAL; |
8481 | return -EINVAL; |
8479 | } |
8482 | } |
8480 | break; |
8483 | break; |
8481 | case DRM_FORMAT_XBGR8888: |
8484 | case DRM_FORMAT_XBGR8888: |
8482 | case DRM_FORMAT_ABGR8888: |
8485 | case DRM_FORMAT_ABGR8888: |
8483 | case DRM_FORMAT_XRGB2101010: |
8486 | case DRM_FORMAT_XRGB2101010: |
8484 | case DRM_FORMAT_ARGB2101010: |
8487 | case DRM_FORMAT_ARGB2101010: |
8485 | case DRM_FORMAT_XBGR2101010: |
8488 | case DRM_FORMAT_XBGR2101010: |
8486 | case DRM_FORMAT_ABGR2101010: |
8489 | case DRM_FORMAT_ABGR2101010: |
8487 | if (INTEL_INFO(dev)->gen < 4) { |
8490 | if (INTEL_INFO(dev)->gen < 4) { |
8488 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8491 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8489 | return -EINVAL; |
8492 | return -EINVAL; |
8490 | } |
8493 | } |
8491 | break; |
8494 | break; |
8492 | case DRM_FORMAT_YUYV: |
8495 | case DRM_FORMAT_YUYV: |
8493 | case DRM_FORMAT_UYVY: |
8496 | case DRM_FORMAT_UYVY: |
8494 | case DRM_FORMAT_YVYU: |
8497 | case DRM_FORMAT_YVYU: |
8495 | case DRM_FORMAT_VYUY: |
8498 | case DRM_FORMAT_VYUY: |
8496 | if (INTEL_INFO(dev)->gen < 5) { |
8499 | if (INTEL_INFO(dev)->gen < 5) { |
8497 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8500 | DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); |
8498 | return -EINVAL; |
8501 | return -EINVAL; |
8499 | } |
8502 | } |
8500 | break; |
8503 | break; |
8501 | default: |
8504 | default: |
8502 | DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); |
8505 | DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); |
8503 | return -EINVAL; |
8506 | return -EINVAL; |
8504 | } |
8507 | } |
8505 | 8508 | ||
8506 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ |
8509 | /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ |
8507 | if (mode_cmd->offsets[0] != 0) |
8510 | if (mode_cmd->offsets[0] != 0) |
8508 | return -EINVAL; |
8511 | return -EINVAL; |
8509 | 8512 | ||
8510 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
8513 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
8511 | intel_fb->obj = obj; |
8514 | intel_fb->obj = obj; |
8512 | 8515 | ||
8513 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
8516 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
8514 | if (ret) { |
8517 | if (ret) { |
8515 | DRM_ERROR("framebuffer init failed %d\n", ret); |
8518 | DRM_ERROR("framebuffer init failed %d\n", ret); |
8516 | return ret; |
8519 | return ret; |
8517 | } |
8520 | } |
8518 | 8521 | ||
8519 | return 0; |
8522 | return 0; |
8520 | } |
8523 | } |
8521 | 8524 | ||
8522 | 8525 | ||
8523 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
8526 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
8524 | .fb_create = NULL /*intel_user_framebuffer_create*/, |
8527 | .fb_create = NULL /*intel_user_framebuffer_create*/, |
8525 | .output_poll_changed = intel_fb_output_poll_changed, |
8528 | .output_poll_changed = intel_fb_output_poll_changed, |
8526 | }; |
8529 | }; |
8527 | 8530 | ||
8528 | /* Set up chip specific display functions */ |
8531 | /* Set up chip specific display functions */ |
8529 | static void intel_init_display(struct drm_device *dev) |
8532 | static void intel_init_display(struct drm_device *dev) |
8530 | { |
8533 | { |
8531 | struct drm_i915_private *dev_priv = dev->dev_private; |
8534 | struct drm_i915_private *dev_priv = dev->dev_private; |
8532 | 8535 | ||
8533 | /* We always want a DPMS function */ |
8536 | /* We always want a DPMS function */ |
8534 | if (HAS_DDI(dev)) { |
8537 | if (HAS_DDI(dev)) { |
8535 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
8538 | dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
8536 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
8539 | dev_priv->display.crtc_enable = haswell_crtc_enable; |
8537 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
8540 | dev_priv->display.crtc_disable = haswell_crtc_disable; |
8538 | dev_priv->display.off = haswell_crtc_off; |
8541 | dev_priv->display.off = haswell_crtc_off; |
8539 | dev_priv->display.update_plane = ironlake_update_plane; |
8542 | dev_priv->display.update_plane = ironlake_update_plane; |
8540 | } else if (HAS_PCH_SPLIT(dev)) { |
8543 | } else if (HAS_PCH_SPLIT(dev)) { |
8541 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
8544 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
8542 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
8545 | dev_priv->display.crtc_enable = ironlake_crtc_enable; |
8543 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
8546 | dev_priv->display.crtc_disable = ironlake_crtc_disable; |
8544 | dev_priv->display.off = ironlake_crtc_off; |
8547 | dev_priv->display.off = ironlake_crtc_off; |
8545 | dev_priv->display.update_plane = ironlake_update_plane; |
8548 | dev_priv->display.update_plane = ironlake_update_plane; |
8546 | } else { |
8549 | } else { |
8547 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
8550 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
8548 | dev_priv->display.crtc_enable = i9xx_crtc_enable; |
8551 | dev_priv->display.crtc_enable = i9xx_crtc_enable; |
8549 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
8552 | dev_priv->display.crtc_disable = i9xx_crtc_disable; |
8550 | dev_priv->display.off = i9xx_crtc_off; |
8553 | dev_priv->display.off = i9xx_crtc_off; |
8551 | dev_priv->display.update_plane = i9xx_update_plane; |
8554 | dev_priv->display.update_plane = i9xx_update_plane; |
8552 | } |
8555 | } |
8553 | 8556 | ||
8554 | /* Returns the core display clock speed */ |
8557 | /* Returns the core display clock speed */ |
8555 | if (IS_VALLEYVIEW(dev)) |
8558 | if (IS_VALLEYVIEW(dev)) |
8556 | dev_priv->display.get_display_clock_speed = |
8559 | dev_priv->display.get_display_clock_speed = |
8557 | valleyview_get_display_clock_speed; |
8560 | valleyview_get_display_clock_speed; |
8558 | else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
8561 | else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
8559 | dev_priv->display.get_display_clock_speed = |
8562 | dev_priv->display.get_display_clock_speed = |
8560 | i945_get_display_clock_speed; |
8563 | i945_get_display_clock_speed; |
8561 | else if (IS_I915G(dev)) |
8564 | else if (IS_I915G(dev)) |
8562 | dev_priv->display.get_display_clock_speed = |
8565 | dev_priv->display.get_display_clock_speed = |
8563 | i915_get_display_clock_speed; |
8566 | i915_get_display_clock_speed; |
8564 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
8567 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
8565 | dev_priv->display.get_display_clock_speed = |
8568 | dev_priv->display.get_display_clock_speed = |
8566 | i9xx_misc_get_display_clock_speed; |
8569 | i9xx_misc_get_display_clock_speed; |
8567 | else if (IS_I915GM(dev)) |
8570 | else if (IS_I915GM(dev)) |
8568 | dev_priv->display.get_display_clock_speed = |
8571 | dev_priv->display.get_display_clock_speed = |
8569 | i915gm_get_display_clock_speed; |
8572 | i915gm_get_display_clock_speed; |
8570 | else if (IS_I865G(dev)) |
8573 | else if (IS_I865G(dev)) |
8571 | dev_priv->display.get_display_clock_speed = |
8574 | dev_priv->display.get_display_clock_speed = |
8572 | i865_get_display_clock_speed; |
8575 | i865_get_display_clock_speed; |
8573 | else if (IS_I85X(dev)) |
8576 | else if (IS_I85X(dev)) |
8574 | dev_priv->display.get_display_clock_speed = |
8577 | dev_priv->display.get_display_clock_speed = |
8575 | i855_get_display_clock_speed; |
8578 | i855_get_display_clock_speed; |
8576 | else /* 852, 830 */ |
8579 | else /* 852, 830 */ |
8577 | dev_priv->display.get_display_clock_speed = |
8580 | dev_priv->display.get_display_clock_speed = |
8578 | i830_get_display_clock_speed; |
8581 | i830_get_display_clock_speed; |
8579 | 8582 | ||
8580 | if (HAS_PCH_SPLIT(dev)) { |
8583 | if (HAS_PCH_SPLIT(dev)) { |
8581 | if (IS_GEN5(dev)) { |
8584 | if (IS_GEN5(dev)) { |
8582 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
8585 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
8583 | dev_priv->display.write_eld = ironlake_write_eld; |
8586 | dev_priv->display.write_eld = ironlake_write_eld; |
8584 | } else if (IS_GEN6(dev)) { |
8587 | } else if (IS_GEN6(dev)) { |
8585 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
8588 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
8586 | dev_priv->display.write_eld = ironlake_write_eld; |
8589 | dev_priv->display.write_eld = ironlake_write_eld; |
8587 | } else if (IS_IVYBRIDGE(dev)) { |
8590 | } else if (IS_IVYBRIDGE(dev)) { |
8588 | /* FIXME: detect B0+ stepping and use auto training */ |
8591 | /* FIXME: detect B0+ stepping and use auto training */ |
8589 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
8592 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
8590 | dev_priv->display.write_eld = ironlake_write_eld; |
8593 | dev_priv->display.write_eld = ironlake_write_eld; |
8591 | dev_priv->display.modeset_global_resources = |
8594 | dev_priv->display.modeset_global_resources = |
8592 | ivb_modeset_global_resources; |
8595 | ivb_modeset_global_resources; |
8593 | } else if (IS_HASWELL(dev)) { |
8596 | } else if (IS_HASWELL(dev)) { |
8594 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
8597 | dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
8595 | dev_priv->display.write_eld = haswell_write_eld; |
8598 | dev_priv->display.write_eld = haswell_write_eld; |
8596 | dev_priv->display.modeset_global_resources = |
8599 | dev_priv->display.modeset_global_resources = |
8597 | haswell_modeset_global_resources; |
8600 | haswell_modeset_global_resources; |
8598 | } |
8601 | } |
8599 | } else if (IS_G4X(dev)) { |
8602 | } else if (IS_G4X(dev)) { |
8600 | dev_priv->display.write_eld = g4x_write_eld; |
8603 | dev_priv->display.write_eld = g4x_write_eld; |
8601 | } |
8604 | } |
8602 | 8605 | ||
8603 | /* Default just returns -ENODEV to indicate unsupported */ |
8606 | /* Default just returns -ENODEV to indicate unsupported */ |
8604 | // dev_priv->display.queue_flip = intel_default_queue_flip; |
8607 | // dev_priv->display.queue_flip = intel_default_queue_flip; |
8605 | 8608 | ||
8606 | 8609 | ||
8607 | 8610 | ||
8608 | 8611 | ||
8609 | } |
8612 | } |
8610 | 8613 | ||
8611 | /* |
8614 | /* |
8612 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, |
8615 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, |
8613 | * resume, or other times. This quirk makes sure that's the case for |
8616 | * resume, or other times. This quirk makes sure that's the case for |
8614 | * affected systems. |
8617 | * affected systems. |
8615 | */ |
8618 | */ |
8616 | static void quirk_pipea_force(struct drm_device *dev) |
8619 | static void quirk_pipea_force(struct drm_device *dev) |
8617 | { |
8620 | { |
8618 | struct drm_i915_private *dev_priv = dev->dev_private; |
8621 | struct drm_i915_private *dev_priv = dev->dev_private; |
8619 | 8622 | ||
8620 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; |
8623 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; |
8621 | DRM_INFO("applying pipe a force quirk\n"); |
8624 | DRM_INFO("applying pipe a force quirk\n"); |
8622 | } |
8625 | } |
8623 | 8626 | ||
8624 | /* |
8627 | /* |
8625 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason |
8628 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason |
8626 | */ |
8629 | */ |
8627 | static void quirk_ssc_force_disable(struct drm_device *dev) |
8630 | static void quirk_ssc_force_disable(struct drm_device *dev) |
8628 | { |
8631 | { |
8629 | struct drm_i915_private *dev_priv = dev->dev_private; |
8632 | struct drm_i915_private *dev_priv = dev->dev_private; |
8630 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; |
8633 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; |
8631 | DRM_INFO("applying lvds SSC disable quirk\n"); |
8634 | DRM_INFO("applying lvds SSC disable quirk\n"); |
8632 | } |
8635 | } |
8633 | 8636 | ||
8634 | /* |
8637 | /* |
8635 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight |
8638 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight |
8636 | * brightness value |
8639 | * brightness value |
8637 | */ |
8640 | */ |
8638 | static void quirk_invert_brightness(struct drm_device *dev) |
8641 | static void quirk_invert_brightness(struct drm_device *dev) |
8639 | { |
8642 | { |
8640 | struct drm_i915_private *dev_priv = dev->dev_private; |
8643 | struct drm_i915_private *dev_priv = dev->dev_private; |
8641 | dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; |
8644 | dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; |
8642 | DRM_INFO("applying inverted panel brightness quirk\n"); |
8645 | DRM_INFO("applying inverted panel brightness quirk\n"); |
8643 | } |
8646 | } |
8644 | 8647 | ||
8645 | struct intel_quirk { |
8648 | struct intel_quirk { |
8646 | int device; |
8649 | int device; |
8647 | int subsystem_vendor; |
8650 | int subsystem_vendor; |
8648 | int subsystem_device; |
8651 | int subsystem_device; |
8649 | void (*hook)(struct drm_device *dev); |
8652 | void (*hook)(struct drm_device *dev); |
8650 | }; |
8653 | }; |
8651 | 8654 | ||
8652 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ |
8655 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ |
8653 | struct intel_dmi_quirk { |
8656 | struct intel_dmi_quirk { |
8654 | void (*hook)(struct drm_device *dev); |
8657 | void (*hook)(struct drm_device *dev); |
8655 | const struct dmi_system_id (*dmi_id_list)[]; |
8658 | const struct dmi_system_id (*dmi_id_list)[]; |
8656 | }; |
8659 | }; |
8657 | 8660 | ||
8658 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) |
8661 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) |
8659 | { |
8662 | { |
8660 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); |
8663 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); |
8661 | return 1; |
8664 | return 1; |
8662 | } |
8665 | } |
8663 | 8666 | ||
8664 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { |
8667 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { |
8665 | { |
8668 | { |
8666 | .dmi_id_list = &(const struct dmi_system_id[]) { |
8669 | .dmi_id_list = &(const struct dmi_system_id[]) { |
8667 | { |
8670 | { |
8668 | .callback = intel_dmi_reverse_brightness, |
8671 | .callback = intel_dmi_reverse_brightness, |
8669 | .ident = "NCR Corporation", |
8672 | .ident = "NCR Corporation", |
8670 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), |
8673 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), |
8671 | DMI_MATCH(DMI_PRODUCT_NAME, ""), |
8674 | DMI_MATCH(DMI_PRODUCT_NAME, ""), |
8672 | }, |
8675 | }, |
8673 | }, |
8676 | }, |
8674 | { } /* terminating entry */ |
8677 | { } /* terminating entry */ |
8675 | }, |
8678 | }, |
8676 | .hook = quirk_invert_brightness, |
8679 | .hook = quirk_invert_brightness, |
8677 | }, |
8680 | }, |
8678 | }; |
8681 | }; |
8679 | 8682 | ||
8680 | static struct intel_quirk intel_quirks[] = { |
8683 | static struct intel_quirk intel_quirks[] = { |
8681 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8684 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8682 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8685 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8683 | 8686 | ||
8684 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
8687 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
8685 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
8688 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
8686 | 8689 | ||
8687 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
8690 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
8688 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
8691 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
8689 | 8692 | ||
8690 | /* 830/845 need to leave pipe A & dpll A up */ |
8693 | /* 830/845 need to leave pipe A & dpll A up */ |
8691 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8694 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8692 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8695 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8693 | 8696 | ||
8694 | /* Lenovo U160 cannot use SSC on LVDS */ |
8697 | /* Lenovo U160 cannot use SSC on LVDS */ |
8695 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8698 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8696 | 8699 | ||
8697 | /* Sony Vaio Y cannot use SSC on LVDS */ |
8700 | /* Sony Vaio Y cannot use SSC on LVDS */ |
8698 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
8701 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
8699 | 8702 | ||
8700 | /* Acer Aspire 5734Z must invert backlight brightness */ |
8703 | /* Acer Aspire 5734Z must invert backlight brightness */ |
8701 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
8704 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
8702 | 8705 | ||
8703 | /* Acer/eMachines G725 */ |
8706 | /* Acer/eMachines G725 */ |
8704 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
8707 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
8705 | 8708 | ||
8706 | /* Acer/eMachines e725 */ |
8709 | /* Acer/eMachines e725 */ |
8707 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, |
8710 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, |
8708 | 8711 | ||
8709 | /* Acer/Packard Bell NCL20 */ |
8712 | /* Acer/Packard Bell NCL20 */ |
8710 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, |
8713 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, |
8711 | 8714 | ||
8712 | /* Acer Aspire 4736Z */ |
8715 | /* Acer Aspire 4736Z */ |
8713 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
8716 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
8714 | }; |
8717 | }; |
8715 | 8718 | ||
8716 | static void intel_init_quirks(struct drm_device *dev) |
8719 | static void intel_init_quirks(struct drm_device *dev) |
8717 | { |
8720 | { |
8718 | struct pci_dev *d = dev->pdev; |
8721 | struct pci_dev *d = dev->pdev; |
8719 | int i; |
8722 | int i; |
8720 | 8723 | ||
8721 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { |
8724 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { |
8722 | struct intel_quirk *q = &intel_quirks[i]; |
8725 | struct intel_quirk *q = &intel_quirks[i]; |
8723 | 8726 | ||
8724 | if (d->device == q->device && |
8727 | if (d->device == q->device && |
8725 | (d->subsystem_vendor == q->subsystem_vendor || |
8728 | (d->subsystem_vendor == q->subsystem_vendor || |
8726 | q->subsystem_vendor == PCI_ANY_ID) && |
8729 | q->subsystem_vendor == PCI_ANY_ID) && |
8727 | (d->subsystem_device == q->subsystem_device || |
8730 | (d->subsystem_device == q->subsystem_device || |
8728 | q->subsystem_device == PCI_ANY_ID)) |
8731 | q->subsystem_device == PCI_ANY_ID)) |
8729 | q->hook(dev); |
8732 | q->hook(dev); |
8730 | } |
8733 | } |
8731 | // for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { |
8734 | // for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { |
8732 | // if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) |
8735 | // if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) |
8733 | // intel_dmi_quirks[i].hook(dev); |
8736 | // intel_dmi_quirks[i].hook(dev); |
8734 | // } |
8737 | // } |
8735 | } |
8738 | } |
8736 | 8739 | ||
8737 | /* Disable the VGA plane that we never use */ |
8740 | /* Disable the VGA plane that we never use */ |
8738 | static void i915_disable_vga(struct drm_device *dev) |
8741 | static void i915_disable_vga(struct drm_device *dev) |
8739 | { |
8742 | { |
8740 | struct drm_i915_private *dev_priv = dev->dev_private; |
8743 | struct drm_i915_private *dev_priv = dev->dev_private; |
8741 | u8 sr1; |
8744 | u8 sr1; |
8742 | u32 vga_reg = i915_vgacntrl_reg(dev); |
8745 | u32 vga_reg = i915_vgacntrl_reg(dev); |
8743 | 8746 | ||
8744 | // vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
8747 | // vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
8745 | out8(SR01, VGA_SR_INDEX); |
8748 | out8(SR01, VGA_SR_INDEX); |
8746 | sr1 = in8(VGA_SR_DATA); |
8749 | sr1 = in8(VGA_SR_DATA); |
8747 | out8(sr1 | 1<<5, VGA_SR_DATA); |
8750 | out8(sr1 | 1<<5, VGA_SR_DATA); |
8748 | // vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
8751 | // vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
8749 | udelay(300); |
8752 | udelay(300); |
8750 | 8753 | ||
8751 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
8754 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
8752 | POSTING_READ(vga_reg); |
8755 | POSTING_READ(vga_reg); |
8753 | } |
8756 | } |
8754 | 8757 | ||
8755 | void intel_modeset_init_hw(struct drm_device *dev) |
8758 | void intel_modeset_init_hw(struct drm_device *dev) |
8756 | { |
8759 | { |
8757 | intel_init_power_well(dev); |
8760 | intel_init_power_well(dev); |
8758 | 8761 | ||
8759 | intel_prepare_ddi(dev); |
8762 | intel_prepare_ddi(dev); |
8760 | 8763 | ||
8761 | intel_init_clock_gating(dev); |
8764 | intel_init_clock_gating(dev); |
8762 | 8765 | ||
8763 | // mutex_lock(&dev->struct_mutex); |
8766 | mutex_lock(&dev->struct_mutex); |
8764 | // intel_enable_gt_powersave(dev); |
8767 | intel_enable_gt_powersave(dev); |
8765 | // mutex_unlock(&dev->struct_mutex); |
8768 | mutex_unlock(&dev->struct_mutex); |
8766 | } |
8769 | } |
8767 | 8770 | ||
8768 | void intel_modeset_init(struct drm_device *dev) |
8771 | void intel_modeset_init(struct drm_device *dev) |
8769 | { |
8772 | { |
8770 | struct drm_i915_private *dev_priv = dev->dev_private; |
8773 | struct drm_i915_private *dev_priv = dev->dev_private; |
8771 | int i, ret; |
8774 | int i, ret; |
8772 | 8775 | ||
8773 | drm_mode_config_init(dev); |
8776 | drm_mode_config_init(dev); |
8774 | 8777 | ||
8775 | dev->mode_config.min_width = 0; |
8778 | dev->mode_config.min_width = 0; |
8776 | dev->mode_config.min_height = 0; |
8779 | dev->mode_config.min_height = 0; |
8777 | 8780 | ||
8778 | dev->mode_config.preferred_depth = 24; |
8781 | dev->mode_config.preferred_depth = 24; |
8779 | dev->mode_config.prefer_shadow = 1; |
8782 | dev->mode_config.prefer_shadow = 1; |
8780 | 8783 | ||
8781 | dev->mode_config.funcs = &intel_mode_funcs; |
8784 | dev->mode_config.funcs = &intel_mode_funcs; |
8782 | 8785 | ||
8783 | intel_init_quirks(dev); |
8786 | intel_init_quirks(dev); |
8784 | 8787 | ||
8785 | intel_init_pm(dev); |
8788 | intel_init_pm(dev); |
8786 | 8789 | ||
8787 | intel_init_display(dev); |
8790 | intel_init_display(dev); |
8788 | 8791 | ||
8789 | if (IS_GEN2(dev)) { |
8792 | if (IS_GEN2(dev)) { |
8790 | dev->mode_config.max_width = 2048; |
8793 | dev->mode_config.max_width = 2048; |
8791 | dev->mode_config.max_height = 2048; |
8794 | dev->mode_config.max_height = 2048; |
8792 | } else if (IS_GEN3(dev)) { |
8795 | } else if (IS_GEN3(dev)) { |
8793 | dev->mode_config.max_width = 4096; |
8796 | dev->mode_config.max_width = 4096; |
8794 | dev->mode_config.max_height = 4096; |
8797 | dev->mode_config.max_height = 4096; |
8795 | } else { |
8798 | } else { |
8796 | dev->mode_config.max_width = 8192; |
8799 | dev->mode_config.max_width = 8192; |
8797 | dev->mode_config.max_height = 8192; |
8800 | dev->mode_config.max_height = 8192; |
8798 | } |
8801 | } |
8799 | dev->mode_config.fb_base = dev_priv->gtt.mappable_base; |
8802 | dev->mode_config.fb_base = dev_priv->gtt.mappable_base; |
8800 | 8803 | ||
8801 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
8804 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
8802 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
8805 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
8803 | 8806 | ||
8804 | for (i = 0; i < dev_priv->num_pipe; i++) { |
8807 | for (i = 0; i < dev_priv->num_pipe; i++) { |
8805 | intel_crtc_init(dev, i); |
8808 | intel_crtc_init(dev, i); |
8806 | ret = intel_plane_init(dev, i); |
8809 | ret = intel_plane_init(dev, i); |
8807 | if (ret) |
8810 | if (ret) |
8808 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
8811 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
8809 | } |
8812 | } |
8810 | 8813 | ||
8811 | intel_cpu_pll_init(dev); |
8814 | intel_cpu_pll_init(dev); |
8812 | intel_pch_pll_init(dev); |
8815 | intel_pch_pll_init(dev); |
8813 | 8816 | ||
8814 | /* Just disable it once at startup */ |
8817 | /* Just disable it once at startup */ |
8815 | i915_disable_vga(dev); |
8818 | i915_disable_vga(dev); |
8816 | intel_setup_outputs(dev); |
8819 | intel_setup_outputs(dev); |
8817 | 8820 | ||
8818 | /* Just in case the BIOS is doing something questionable. */ |
8821 | /* Just in case the BIOS is doing something questionable. */ |
8819 | intel_disable_fbc(dev); |
8822 | intel_disable_fbc(dev); |
8820 | } |
8823 | } |
8821 | 8824 | ||
8822 | static void |
8825 | static void |
8823 | intel_connector_break_all_links(struct intel_connector *connector) |
8826 | intel_connector_break_all_links(struct intel_connector *connector) |
8824 | { |
8827 | { |
8825 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
8828 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
8826 | connector->base.encoder = NULL; |
8829 | connector->base.encoder = NULL; |
8827 | connector->encoder->connectors_active = false; |
8830 | connector->encoder->connectors_active = false; |
8828 | connector->encoder->base.crtc = NULL; |
8831 | connector->encoder->base.crtc = NULL; |
8829 | } |
8832 | } |
8830 | 8833 | ||
8831 | static void intel_enable_pipe_a(struct drm_device *dev) |
8834 | static void intel_enable_pipe_a(struct drm_device *dev) |
8832 | { |
8835 | { |
8833 | struct intel_connector *connector; |
8836 | struct intel_connector *connector; |
8834 | struct drm_connector *crt = NULL; |
8837 | struct drm_connector *crt = NULL; |
8835 | struct intel_load_detect_pipe load_detect_temp; |
8838 | struct intel_load_detect_pipe load_detect_temp; |
8836 | 8839 | ||
8837 | /* We can't just switch on the pipe A, we need to set things up with a |
8840 | /* We can't just switch on the pipe A, we need to set things up with a |
8838 | * proper mode and output configuration. As a gross hack, enable pipe A |
8841 | * proper mode and output configuration. As a gross hack, enable pipe A |
8839 | * by enabling the load detect pipe once. */ |
8842 | * by enabling the load detect pipe once. */ |
8840 | list_for_each_entry(connector, |
8843 | list_for_each_entry(connector, |
8841 | &dev->mode_config.connector_list, |
8844 | &dev->mode_config.connector_list, |
8842 | base.head) { |
8845 | base.head) { |
8843 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { |
8846 | if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { |
8844 | crt = &connector->base; |
8847 | crt = &connector->base; |
8845 | break; |
8848 | break; |
8846 | } |
8849 | } |
8847 | } |
8850 | } |
8848 | 8851 | ||
8849 | if (!crt) |
8852 | if (!crt) |
8850 | return; |
8853 | return; |
8851 | 8854 | ||
8852 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) |
8855 | if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp)) |
8853 | intel_release_load_detect_pipe(crt, &load_detect_temp); |
8856 | intel_release_load_detect_pipe(crt, &load_detect_temp); |
8854 | 8857 | ||
8855 | 8858 | ||
8856 | } |
8859 | } |
8857 | 8860 | ||
8858 | static bool |
8861 | static bool |
8859 | intel_check_plane_mapping(struct intel_crtc *crtc) |
8862 | intel_check_plane_mapping(struct intel_crtc *crtc) |
8860 | { |
8863 | { |
8861 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
8864 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
8862 | u32 reg, val; |
8865 | u32 reg, val; |
8863 | 8866 | ||
8864 | if (dev_priv->num_pipe == 1) |
8867 | if (dev_priv->num_pipe == 1) |
8865 | return true; |
8868 | return true; |
8866 | 8869 | ||
8867 | reg = DSPCNTR(!crtc->plane); |
8870 | reg = DSPCNTR(!crtc->plane); |
8868 | val = I915_READ(reg); |
8871 | val = I915_READ(reg); |
8869 | 8872 | ||
8870 | if ((val & DISPLAY_PLANE_ENABLE) && |
8873 | if ((val & DISPLAY_PLANE_ENABLE) && |
8871 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) |
8874 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) |
8872 | return false; |
8875 | return false; |
8873 | 8876 | ||
8874 | return true; |
8877 | return true; |
8875 | } |
8878 | } |
8876 | 8879 | ||
8877 | static void intel_sanitize_crtc(struct intel_crtc *crtc) |
8880 | static void intel_sanitize_crtc(struct intel_crtc *crtc) |
8878 | { |
8881 | { |
8879 | struct drm_device *dev = crtc->base.dev; |
8882 | struct drm_device *dev = crtc->base.dev; |
8880 | struct drm_i915_private *dev_priv = dev->dev_private; |
8883 | struct drm_i915_private *dev_priv = dev->dev_private; |
8881 | u32 reg; |
8884 | u32 reg; |
8882 | 8885 | ||
8883 | /* Clear any frame start delays used for debugging left by the BIOS */ |
8886 | /* Clear any frame start delays used for debugging left by the BIOS */ |
8884 | reg = PIPECONF(crtc->cpu_transcoder); |
8887 | reg = PIPECONF(crtc->cpu_transcoder); |
8885 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
8888 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
8886 | 8889 | ||
8887 | /* We need to sanitize the plane -> pipe mapping first because this will |
8890 | /* We need to sanitize the plane -> pipe mapping first because this will |
8888 | * disable the crtc (and hence change the state) if it is wrong. Note |
8891 | * disable the crtc (and hence change the state) if it is wrong. Note |
8889 | * that gen4+ has a fixed plane -> pipe mapping. */ |
8892 | * that gen4+ has a fixed plane -> pipe mapping. */ |
8890 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { |
8893 | if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { |
8891 | struct intel_connector *connector; |
8894 | struct intel_connector *connector; |
8892 | bool plane; |
8895 | bool plane; |
8893 | 8896 | ||
8894 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", |
8897 | DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", |
8895 | crtc->base.base.id); |
8898 | crtc->base.base.id); |
8896 | 8899 | ||
8897 | /* Pipe has the wrong plane attached and the plane is active. |
8900 | /* Pipe has the wrong plane attached and the plane is active. |
8898 | * Temporarily change the plane mapping and disable everything |
8901 | * Temporarily change the plane mapping and disable everything |
8899 | * ... */ |
8902 | * ... */ |
8900 | plane = crtc->plane; |
8903 | plane = crtc->plane; |
8901 | crtc->plane = !plane; |
8904 | crtc->plane = !plane; |
8902 | dev_priv->display.crtc_disable(&crtc->base); |
8905 | dev_priv->display.crtc_disable(&crtc->base); |
8903 | crtc->plane = plane; |
8906 | crtc->plane = plane; |
8904 | 8907 | ||
8905 | /* ... and break all links. */ |
8908 | /* ... and break all links. */ |
8906 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
8909 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
8907 | base.head) { |
8910 | base.head) { |
8908 | if (connector->encoder->base.crtc != &crtc->base) |
8911 | if (connector->encoder->base.crtc != &crtc->base) |
8909 | continue; |
8912 | continue; |
8910 | 8913 | ||
8911 | intel_connector_break_all_links(connector); |
8914 | intel_connector_break_all_links(connector); |
8912 | } |
8915 | } |
8913 | 8916 | ||
8914 | WARN_ON(crtc->active); |
8917 | WARN_ON(crtc->active); |
8915 | crtc->base.enabled = false; |
8918 | crtc->base.enabled = false; |
8916 | } |
8919 | } |
8917 | 8920 | ||
8918 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && |
8921 | if (dev_priv->quirks & QUIRK_PIPEA_FORCE && |
8919 | crtc->pipe == PIPE_A && !crtc->active) { |
8922 | crtc->pipe == PIPE_A && !crtc->active) { |
8920 | /* BIOS forgot to enable pipe A, this mostly happens after |
8923 | /* BIOS forgot to enable pipe A, this mostly happens after |
8921 | * resume. Force-enable the pipe to fix this, the update_dpms |
8924 | * resume. Force-enable the pipe to fix this, the update_dpms |
8922 | * call below we restore the pipe to the right state, but leave |
8925 | * call below we restore the pipe to the right state, but leave |
8923 | * the required bits on. */ |
8926 | * the required bits on. */ |
8924 | intel_enable_pipe_a(dev); |
8927 | intel_enable_pipe_a(dev); |
8925 | } |
8928 | } |
8926 | 8929 | ||
8927 | /* Adjust the state of the output pipe according to whether we |
8930 | /* Adjust the state of the output pipe according to whether we |
8928 | * have active connectors/encoders. */ |
8931 | * have active connectors/encoders. */ |
8929 | intel_crtc_update_dpms(&crtc->base); |
8932 | intel_crtc_update_dpms(&crtc->base); |
8930 | 8933 | ||
8931 | if (crtc->active != crtc->base.enabled) { |
8934 | if (crtc->active != crtc->base.enabled) { |
8932 | struct intel_encoder *encoder; |
8935 | struct intel_encoder *encoder; |
8933 | 8936 | ||
8934 | /* This can happen either due to bugs in the get_hw_state |
8937 | /* This can happen either due to bugs in the get_hw_state |
8935 | * functions or because the pipe is force-enabled due to the |
8938 | * functions or because the pipe is force-enabled due to the |
8936 | * pipe A quirk. */ |
8939 | * pipe A quirk. */ |
8937 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", |
8940 | DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", |
8938 | crtc->base.base.id, |
8941 | crtc->base.base.id, |
8939 | crtc->base.enabled ? "enabled" : "disabled", |
8942 | crtc->base.enabled ? "enabled" : "disabled", |
8940 | crtc->active ? "enabled" : "disabled"); |
8943 | crtc->active ? "enabled" : "disabled"); |
8941 | 8944 | ||
8942 | crtc->base.enabled = crtc->active; |
8945 | crtc->base.enabled = crtc->active; |
8943 | 8946 | ||
8944 | /* Because we only establish the connector -> encoder -> |
8947 | /* Because we only establish the connector -> encoder -> |
8945 | * crtc links if something is active, this means the |
8948 | * crtc links if something is active, this means the |
8946 | * crtc is now deactivated. Break the links. connector |
8949 | * crtc is now deactivated. Break the links. connector |
8947 | * -> encoder links are only establish when things are |
8950 | * -> encoder links are only establish when things are |
8948 | * actually up, hence no need to break them. */ |
8951 | * actually up, hence no need to break them. */ |
8949 | WARN_ON(crtc->active); |
8952 | WARN_ON(crtc->active); |
8950 | 8953 | ||
8951 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) { |
8954 | for_each_encoder_on_crtc(dev, &crtc->base, encoder) { |
8952 | WARN_ON(encoder->connectors_active); |
8955 | WARN_ON(encoder->connectors_active); |
8953 | encoder->base.crtc = NULL; |
8956 | encoder->base.crtc = NULL; |
8954 | } |
8957 | } |
8955 | } |
8958 | } |
8956 | } |
8959 | } |
8957 | 8960 | ||
8958 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
8961 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
8959 | { |
8962 | { |
8960 | struct intel_connector *connector; |
8963 | struct intel_connector *connector; |
8961 | struct drm_device *dev = encoder->base.dev; |
8964 | struct drm_device *dev = encoder->base.dev; |
8962 | 8965 | ||
8963 | /* We need to check both for a crtc link (meaning that the |
8966 | /* We need to check both for a crtc link (meaning that the |
8964 | * encoder is active and trying to read from a pipe) and the |
8967 | * encoder is active and trying to read from a pipe) and the |
8965 | * pipe itself being active. */ |
8968 | * pipe itself being active. */ |
8966 | bool has_active_crtc = encoder->base.crtc && |
8969 | bool has_active_crtc = encoder->base.crtc && |
8967 | to_intel_crtc(encoder->base.crtc)->active; |
8970 | to_intel_crtc(encoder->base.crtc)->active; |
8968 | 8971 | ||
8969 | if (encoder->connectors_active && !has_active_crtc) { |
8972 | if (encoder->connectors_active && !has_active_crtc) { |
8970 | DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", |
8973 | DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", |
8971 | encoder->base.base.id, |
8974 | encoder->base.base.id, |
8972 | drm_get_encoder_name(&encoder->base)); |
8975 | drm_get_encoder_name(&encoder->base)); |
8973 | 8976 | ||
8974 | /* Connector is active, but has no active pipe. This is |
8977 | /* Connector is active, but has no active pipe. This is |
8975 | * fallout from our resume register restoring. Disable |
8978 | * fallout from our resume register restoring. Disable |
8976 | * the encoder manually again. */ |
8979 | * the encoder manually again. */ |
8977 | if (encoder->base.crtc) { |
8980 | if (encoder->base.crtc) { |
8978 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
8981 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
8979 | encoder->base.base.id, |
8982 | encoder->base.base.id, |
8980 | drm_get_encoder_name(&encoder->base)); |
8983 | drm_get_encoder_name(&encoder->base)); |
8981 | encoder->disable(encoder); |
8984 | encoder->disable(encoder); |
8982 | } |
8985 | } |
8983 | 8986 | ||
8984 | /* Inconsistent output/port/pipe state happens presumably due to |
8987 | /* Inconsistent output/port/pipe state happens presumably due to |
8985 | * a bug in one of the get_hw_state functions. Or someplace else |
8988 | * a bug in one of the get_hw_state functions. Or someplace else |
8986 | * in our code, like the register restore mess on resume. Clamp |
8989 | * in our code, like the register restore mess on resume. Clamp |
8987 | * things to off as a safer default. */ |
8990 | * things to off as a safer default. */ |
8988 | list_for_each_entry(connector, |
8991 | list_for_each_entry(connector, |
8989 | &dev->mode_config.connector_list, |
8992 | &dev->mode_config.connector_list, |
8990 | base.head) { |
8993 | base.head) { |
8991 | if (connector->encoder != encoder) |
8994 | if (connector->encoder != encoder) |
8992 | continue; |
8995 | continue; |
8993 | 8996 | ||
8994 | intel_connector_break_all_links(connector); |
8997 | intel_connector_break_all_links(connector); |
8995 | } |
8998 | } |
8996 | } |
8999 | } |
8997 | /* Enabled encoders without active connectors will be fixed in |
9000 | /* Enabled encoders without active connectors will be fixed in |
8998 | * the crtc fixup. */ |
9001 | * the crtc fixup. */ |
8999 | } |
9002 | } |
9000 | 9003 | ||
9001 | /* Scan out the current hw modeset state, sanitizes it and maps it into the drm |
9004 | /* Scan out the current hw modeset state, sanitizes it and maps it into the drm |
9002 | * and i915 state tracking structures. */ |
9005 | * and i915 state tracking structures. */ |
9003 | void intel_modeset_setup_hw_state(struct drm_device *dev, |
9006 | void intel_modeset_setup_hw_state(struct drm_device *dev, |
9004 | bool force_restore) |
9007 | bool force_restore) |
9005 | { |
9008 | { |
9006 | struct drm_i915_private *dev_priv = dev->dev_private; |
9009 | struct drm_i915_private *dev_priv = dev->dev_private; |
9007 | enum pipe pipe; |
9010 | enum pipe pipe; |
9008 | u32 tmp; |
9011 | u32 tmp; |
9009 | struct intel_crtc *crtc; |
9012 | struct intel_crtc *crtc; |
9010 | struct intel_encoder *encoder; |
9013 | struct intel_encoder *encoder; |
9011 | struct intel_connector *connector; |
9014 | struct intel_connector *connector; |
9012 | 9015 | ||
9013 | if (HAS_DDI(dev)) { |
9016 | if (HAS_DDI(dev)) { |
9014 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
9017 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
9015 | 9018 | ||
9016 | if (tmp & TRANS_DDI_FUNC_ENABLE) { |
9019 | if (tmp & TRANS_DDI_FUNC_ENABLE) { |
9017 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
9020 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
9018 | case TRANS_DDI_EDP_INPUT_A_ON: |
9021 | case TRANS_DDI_EDP_INPUT_A_ON: |
9019 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
9022 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
9020 | pipe = PIPE_A; |
9023 | pipe = PIPE_A; |
9021 | break; |
9024 | break; |
9022 | case TRANS_DDI_EDP_INPUT_B_ONOFF: |
9025 | case TRANS_DDI_EDP_INPUT_B_ONOFF: |
9023 | pipe = PIPE_B; |
9026 | pipe = PIPE_B; |
9024 | break; |
9027 | break; |
9025 | case TRANS_DDI_EDP_INPUT_C_ONOFF: |
9028 | case TRANS_DDI_EDP_INPUT_C_ONOFF: |
9026 | pipe = PIPE_C; |
9029 | pipe = PIPE_C; |
9027 | break; |
9030 | break; |
9028 | } |
9031 | } |
9029 | 9032 | ||
9030 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9033 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9031 | crtc->cpu_transcoder = TRANSCODER_EDP; |
9034 | crtc->cpu_transcoder = TRANSCODER_EDP; |
9032 | 9035 | ||
9033 | DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", |
9036 | DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", |
9034 | pipe_name(pipe)); |
9037 | pipe_name(pipe)); |
9035 | } |
9038 | } |
9036 | } |
9039 | } |
9037 | 9040 | ||
9038 | for_each_pipe(pipe) { |
9041 | for_each_pipe(pipe) { |
9039 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9042 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9040 | 9043 | ||
9041 | tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); |
9044 | tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); |
9042 | if (tmp & PIPECONF_ENABLE) |
9045 | if (tmp & PIPECONF_ENABLE) |
9043 | crtc->active = true; |
9046 | crtc->active = true; |
9044 | else |
9047 | else |
9045 | crtc->active = false; |
9048 | crtc->active = false; |
9046 | 9049 | ||
9047 | crtc->base.enabled = crtc->active; |
9050 | crtc->base.enabled = crtc->active; |
9048 | 9051 | ||
9049 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", |
9052 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", |
9050 | crtc->base.base.id, |
9053 | crtc->base.base.id, |
9051 | crtc->active ? "enabled" : "disabled"); |
9054 | crtc->active ? "enabled" : "disabled"); |
9052 | } |
9055 | } |
9053 | 9056 | ||
9054 | if (HAS_DDI(dev)) |
9057 | if (HAS_DDI(dev)) |
9055 | intel_ddi_setup_hw_pll_state(dev); |
9058 | intel_ddi_setup_hw_pll_state(dev); |
9056 | 9059 | ||
9057 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9060 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9058 | base.head) { |
9061 | base.head) { |
9059 | pipe = 0; |
9062 | pipe = 0; |
9060 | 9063 | ||
9061 | if (encoder->get_hw_state(encoder, &pipe)) { |
9064 | if (encoder->get_hw_state(encoder, &pipe)) { |
9062 | encoder->base.crtc = |
9065 | encoder->base.crtc = |
9063 | dev_priv->pipe_to_crtc_mapping[pipe]; |
9066 | dev_priv->pipe_to_crtc_mapping[pipe]; |
9064 | } else { |
9067 | } else { |
9065 | encoder->base.crtc = NULL; |
9068 | encoder->base.crtc = NULL; |
9066 | } |
9069 | } |
9067 | 9070 | ||
9068 | encoder->connectors_active = false; |
9071 | encoder->connectors_active = false; |
9069 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", |
9072 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", |
9070 | encoder->base.base.id, |
9073 | encoder->base.base.id, |
9071 | drm_get_encoder_name(&encoder->base), |
9074 | drm_get_encoder_name(&encoder->base), |
9072 | encoder->base.crtc ? "enabled" : "disabled", |
9075 | encoder->base.crtc ? "enabled" : "disabled", |
9073 | pipe); |
9076 | pipe); |
9074 | } |
9077 | } |
9075 | 9078 | ||
9076 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9079 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
9077 | base.head) { |
9080 | base.head) { |
9078 | if (connector->get_hw_state(connector)) { |
9081 | if (connector->get_hw_state(connector)) { |
9079 | connector->base.dpms = DRM_MODE_DPMS_ON; |
9082 | connector->base.dpms = DRM_MODE_DPMS_ON; |
9080 | connector->encoder->connectors_active = true; |
9083 | connector->encoder->connectors_active = true; |
9081 | connector->base.encoder = &connector->encoder->base; |
9084 | connector->base.encoder = &connector->encoder->base; |
9082 | } else { |
9085 | } else { |
9083 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
9086 | connector->base.dpms = DRM_MODE_DPMS_OFF; |
9084 | connector->base.encoder = NULL; |
9087 | connector->base.encoder = NULL; |
9085 | } |
9088 | } |
9086 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", |
9089 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", |
9087 | connector->base.base.id, |
9090 | connector->base.base.id, |
9088 | drm_get_connector_name(&connector->base), |
9091 | drm_get_connector_name(&connector->base), |
9089 | connector->base.encoder ? "enabled" : "disabled"); |
9092 | connector->base.encoder ? "enabled" : "disabled"); |
9090 | } |
9093 | } |
9091 | 9094 | ||
9092 | /* HW state is read out, now we need to sanitize this mess. */ |
9095 | /* HW state is read out, now we need to sanitize this mess. */ |
9093 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9096 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9094 | base.head) { |
9097 | base.head) { |
9095 | intel_sanitize_encoder(encoder); |
9098 | intel_sanitize_encoder(encoder); |
9096 | } |
9099 | } |
9097 | 9100 | ||
9098 | for_each_pipe(pipe) { |
9101 | for_each_pipe(pipe) { |
9099 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9102 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9100 | intel_sanitize_crtc(crtc); |
9103 | intel_sanitize_crtc(crtc); |
9101 | } |
9104 | } |
9102 | 9105 | ||
9103 | if (force_restore) { |
9106 | if (force_restore) { |
9104 | for_each_pipe(pipe) { |
9107 | for_each_pipe(pipe) { |
9105 | intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); |
9108 | intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); |
9106 | } |
9109 | } |
9107 | 9110 | ||
9108 | // i915_redisable_vga(dev); |
9111 | // i915_redisable_vga(dev); |
9109 | } else { |
9112 | } else { |
9110 | intel_modeset_update_staged_output_state(dev); |
9113 | intel_modeset_update_staged_output_state(dev); |
9111 | } |
9114 | } |
9112 | 9115 | ||
9113 | intel_modeset_check_state(dev); |
9116 | intel_modeset_check_state(dev); |
9114 | 9117 | ||
9115 | drm_mode_config_reset(dev); |
9118 | drm_mode_config_reset(dev); |
9116 | } |
9119 | } |
9117 | 9120 | ||
9118 | void intel_modeset_gem_init(struct drm_device *dev) |
9121 | void intel_modeset_gem_init(struct drm_device *dev) |
9119 | { |
9122 | { |
9120 | intel_modeset_init_hw(dev); |
9123 | intel_modeset_init_hw(dev); |
9121 | 9124 | ||
9122 | // intel_setup_overlay(dev); |
9125 | // intel_setup_overlay(dev); |
9123 | 9126 | ||
9124 | intel_modeset_setup_hw_state(dev, false); |
9127 | intel_modeset_setup_hw_state(dev, false); |
9125 | } |
9128 | } |
9126 | 9129 | ||
9127 | void intel_modeset_cleanup(struct drm_device *dev) |
9130 | void intel_modeset_cleanup(struct drm_device *dev) |
9128 | { |
9131 | { |
9129 | #if 0 |
9132 | #if 0 |
9130 | struct drm_i915_private *dev_priv = dev->dev_private; |
9133 | struct drm_i915_private *dev_priv = dev->dev_private; |
9131 | struct drm_crtc *crtc; |
9134 | struct drm_crtc *crtc; |
9132 | struct intel_crtc *intel_crtc; |
9135 | struct intel_crtc *intel_crtc; |
9133 | 9136 | ||
9134 | // drm_kms_helper_poll_fini(dev); |
9137 | // drm_kms_helper_poll_fini(dev); |
9135 | mutex_lock(&dev->struct_mutex); |
9138 | mutex_lock(&dev->struct_mutex); |
9136 | 9139 | ||
9137 | // intel_unregister_dsm_handler(); |
9140 | // intel_unregister_dsm_handler(); |
9138 | 9141 | ||
9139 | 9142 | ||
9140 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
9143 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
9141 | /* Skip inactive CRTCs */ |
9144 | /* Skip inactive CRTCs */ |
9142 | if (!crtc->fb) |
9145 | if (!crtc->fb) |
9143 | continue; |
9146 | continue; |
9144 | 9147 | ||
9145 | intel_crtc = to_intel_crtc(crtc); |
9148 | intel_crtc = to_intel_crtc(crtc); |
9146 | intel_increase_pllclock(crtc); |
9149 | intel_increase_pllclock(crtc); |
9147 | } |
9150 | } |
9148 | 9151 | ||
9149 | intel_disable_fbc(dev); |
9152 | intel_disable_fbc(dev); |
9150 | 9153 | ||
9151 | intel_disable_gt_powersave(dev); |
9154 | intel_disable_gt_powersave(dev); |
9152 | 9155 | ||
9153 | ironlake_teardown_rc6(dev); |
9156 | ironlake_teardown_rc6(dev); |
9154 | 9157 | ||
9155 | if (IS_VALLEYVIEW(dev)) |
9158 | if (IS_VALLEYVIEW(dev)) |
9156 | vlv_init_dpio(dev); |
9159 | vlv_init_dpio(dev); |
9157 | 9160 | ||
9158 | mutex_unlock(&dev->struct_mutex); |
9161 | mutex_unlock(&dev->struct_mutex); |
9159 | 9162 | ||
9160 | /* Disable the irq before mode object teardown, for the irq might |
9163 | /* Disable the irq before mode object teardown, for the irq might |
9161 | * enqueue unpin/hotplug work. */ |
9164 | * enqueue unpin/hotplug work. */ |
9162 | // drm_irq_uninstall(dev); |
9165 | // drm_irq_uninstall(dev); |
9163 | // cancel_work_sync(&dev_priv->hotplug_work); |
9166 | // cancel_work_sync(&dev_priv->hotplug_work); |
9164 | // cancel_work_sync(&dev_priv->rps.work); |
9167 | // cancel_work_sync(&dev_priv->rps.work); |
9165 | 9168 | ||
9166 | /* flush any delayed tasks or pending work */ |
9169 | /* flush any delayed tasks or pending work */ |
9167 | // flush_scheduled_work(); |
9170 | // flush_scheduled_work(); |
9168 | 9171 | ||
9169 | drm_mode_config_cleanup(dev); |
9172 | drm_mode_config_cleanup(dev); |
9170 | #endif |
9173 | #endif |
9171 | } |
9174 | } |
9172 | 9175 | ||
9173 | /* |
9176 | /* |
9174 | * Return which encoder is currently attached for connector. |
9177 | * Return which encoder is currently attached for connector. |
9175 | */ |
9178 | */ |
9176 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
9179 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
9177 | { |
9180 | { |
9178 | return &intel_attached_encoder(connector)->base; |
9181 | return &intel_attached_encoder(connector)->base; |
9179 | } |
9182 | } |
9180 | 9183 | ||
9181 | void intel_connector_attach_encoder(struct intel_connector *connector, |
9184 | void intel_connector_attach_encoder(struct intel_connector *connector, |
9182 | struct intel_encoder *encoder) |
9185 | struct intel_encoder *encoder) |
9183 | { |
9186 | { |
9184 | connector->encoder = encoder; |
9187 | connector->encoder = encoder; |
9185 | drm_mode_connector_attach_encoder(&connector->base, |
9188 | drm_mode_connector_attach_encoder(&connector->base, |
9186 | &encoder->base); |
9189 | &encoder->base); |
9187 | } |
9190 | } |
9188 | 9191 | ||
9189 | /* |
9192 | /* |
9190 | * set vga decode state - true == enable VGA decode |
9193 | * set vga decode state - true == enable VGA decode |
9191 | */ |
9194 | */ |
9192 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
9195 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
9193 | { |
9196 | { |
9194 | struct drm_i915_private *dev_priv = dev->dev_private; |
9197 | struct drm_i915_private *dev_priv = dev->dev_private; |
9195 | u16 gmch_ctrl; |
9198 | u16 gmch_ctrl; |
9196 | 9199 | ||
9197 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); |
9200 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); |
9198 | if (state) |
9201 | if (state) |
9199 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
9202 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
9200 | else |
9203 | else |
9201 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
9204 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
9202 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
9205 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); |
9203 | return 0; |
9206 | return 0; |
9204 | } |
9207 | } |
9205 | 9208 | ||
9206 | #ifdef CONFIG_DEBUG_FS |
9209 | #ifdef CONFIG_DEBUG_FS |
9207 | #include |
9210 | #include |
9208 | 9211 | ||
9209 | struct intel_display_error_state { |
9212 | struct intel_display_error_state { |
9210 | struct intel_cursor_error_state { |
9213 | struct intel_cursor_error_state { |
9211 | u32 control; |
9214 | u32 control; |
9212 | u32 position; |
9215 | u32 position; |
9213 | u32 base; |
9216 | u32 base; |
9214 | u32 size; |
9217 | u32 size; |
9215 | } cursor[I915_MAX_PIPES]; |
9218 | } cursor[I915_MAX_PIPES]; |
9216 | 9219 | ||
9217 | struct intel_pipe_error_state { |
9220 | struct intel_pipe_error_state { |
9218 | u32 conf; |
9221 | u32 conf; |
9219 | u32 source; |
9222 | u32 source; |
9220 | 9223 | ||
9221 | u32 htotal; |
9224 | u32 htotal; |
9222 | u32 hblank; |
9225 | u32 hblank; |
9223 | u32 hsync; |
9226 | u32 hsync; |
9224 | u32 vtotal; |
9227 | u32 vtotal; |
9225 | u32 vblank; |
9228 | u32 vblank; |
9226 | u32 vsync; |
9229 | u32 vsync; |
9227 | } pipe[I915_MAX_PIPES]; |
9230 | } pipe[I915_MAX_PIPES]; |
9228 | 9231 | ||
9229 | struct intel_plane_error_state { |
9232 | struct intel_plane_error_state { |
9230 | u32 control; |
9233 | u32 control; |
9231 | u32 stride; |
9234 | u32 stride; |
9232 | u32 size; |
9235 | u32 size; |
9233 | u32 pos; |
9236 | u32 pos; |
9234 | u32 addr; |
9237 | u32 addr; |
9235 | u32 surface; |
9238 | u32 surface; |
9236 | u32 tile_offset; |
9239 | u32 tile_offset; |
9237 | } plane[I915_MAX_PIPES]; |
9240 | } plane[I915_MAX_PIPES]; |
9238 | }; |
9241 | }; |
9239 | 9242 | ||
9240 | struct intel_display_error_state * |
9243 | struct intel_display_error_state * |
9241 | intel_display_capture_error_state(struct drm_device *dev) |
9244 | intel_display_capture_error_state(struct drm_device *dev) |
9242 | { |
9245 | { |
9243 | drm_i915_private_t *dev_priv = dev->dev_private; |
9246 | drm_i915_private_t *dev_priv = dev->dev_private; |
9244 | struct intel_display_error_state *error; |
9247 | struct intel_display_error_state *error; |
9245 | enum transcoder cpu_transcoder; |
9248 | enum transcoder cpu_transcoder; |
9246 | int i; |
9249 | int i; |
9247 | 9250 | ||
9248 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
9251 | error = kmalloc(sizeof(*error), GFP_ATOMIC); |
9249 | if (error == NULL) |
9252 | if (error == NULL) |
9250 | return NULL; |
9253 | return NULL; |
9251 | 9254 | ||
9252 | for_each_pipe(i) { |
9255 | for_each_pipe(i) { |
9253 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); |
9256 | cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); |
9254 | 9257 | ||
9255 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
9258 | error->cursor[i].control = I915_READ(CURCNTR(i)); |
9256 | error->cursor[i].position = I915_READ(CURPOS(i)); |
9259 | error->cursor[i].position = I915_READ(CURPOS(i)); |
9257 | error->cursor[i].base = I915_READ(CURBASE(i)); |
9260 | error->cursor[i].base = I915_READ(CURBASE(i)); |
9258 | 9261 | ||
9259 | error->plane[i].control = I915_READ(DSPCNTR(i)); |
9262 | error->plane[i].control = I915_READ(DSPCNTR(i)); |
9260 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); |
9263 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); |
9261 | error->plane[i].size = I915_READ(DSPSIZE(i)); |
9264 | error->plane[i].size = I915_READ(DSPSIZE(i)); |
9262 | error->plane[i].pos = I915_READ(DSPPOS(i)); |
9265 | error->plane[i].pos = I915_READ(DSPPOS(i)); |
9263 | error->plane[i].addr = I915_READ(DSPADDR(i)); |
9266 | error->plane[i].addr = I915_READ(DSPADDR(i)); |
9264 | if (INTEL_INFO(dev)->gen >= 4) { |
9267 | if (INTEL_INFO(dev)->gen >= 4) { |
9265 | error->plane[i].surface = I915_READ(DSPSURF(i)); |
9268 | error->plane[i].surface = I915_READ(DSPSURF(i)); |
9266 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
9269 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); |
9267 | } |
9270 | } |
9268 | 9271 | ||
9269 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); |
9272 | error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); |
9270 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
9273 | error->pipe[i].source = I915_READ(PIPESRC(i)); |
9271 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); |
9274 | error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); |
9272 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); |
9275 | error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); |
9273 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); |
9276 | error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); |
9274 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); |
9277 | error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); |
9275 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); |
9278 | error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); |
9276 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); |
9279 | error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); |
9277 | } |
9280 | } |
9278 | 9281 | ||
9279 | return error; |
9282 | return error; |
9280 | } |
9283 | } |
9281 | 9284 | ||
9282 | void |
9285 | void |
9283 | intel_display_print_error_state(struct seq_file *m, |
9286 | intel_display_print_error_state(struct seq_file *m, |
9284 | struct drm_device *dev, |
9287 | struct drm_device *dev, |
9285 | struct intel_display_error_state *error) |
9288 | struct intel_display_error_state *error) |
9286 | { |
9289 | { |
9287 | drm_i915_private_t *dev_priv = dev->dev_private; |
9290 | drm_i915_private_t *dev_priv = dev->dev_private; |
9288 | int i; |
9291 | int i; |
9289 | 9292 | ||
9290 | seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); |
9293 | seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); |
9291 | for_each_pipe(i) { |
9294 | for_each_pipe(i) { |
9292 | seq_printf(m, "Pipe [%d]:\n", i); |
9295 | seq_printf(m, "Pipe [%d]:\n", i); |
9293 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); |
9296 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); |
9294 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); |
9297 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); |
9295 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); |
9298 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); |
9296 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); |
9299 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); |
9297 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); |
9300 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); |
9298 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); |
9301 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); |
9299 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); |
9302 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); |
9300 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); |
9303 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); |
9301 | 9304 | ||
9302 | seq_printf(m, "Plane [%d]:\n", i); |
9305 | seq_printf(m, "Plane [%d]:\n", i); |
9303 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); |
9306 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); |
9304 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); |
9307 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); |
9305 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); |
9308 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); |
9306 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); |
9309 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); |
9307 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); |
9310 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); |
9308 | if (INTEL_INFO(dev)->gen >= 4) { |
9311 | if (INTEL_INFO(dev)->gen >= 4) { |
9309 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); |
9312 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); |
9310 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); |
9313 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); |
9311 | } |
9314 | } |
9312 | 9315 | ||
9313 | seq_printf(m, "Cursor [%d]:\n", i); |
9316 | seq_printf(m, "Cursor [%d]:\n", i); |
9314 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); |
9317 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); |
9315 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); |
9318 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); |
9316 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); |
9319 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); |
9317 | } |
9320 | } |
9318 | } |
9321 | } |
9319 | #endif>>5,><5,>>>>>=>><>><>>>>>(intel_crtc)-><(intel_crtc)->><>><>><>><>><>><>><>><>=><=>>><>>>>>><>><>><>><>>><>><>>>><>><>>>><>><>><>><>>>><>><>><>><>>><>><>><>><>>>><>><>><>><>>><>><>><>><>><>>=>><>><>><>><>->>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>><>><>><>>><>><>><>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>20)><20)>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>=>>>>>>>>>><>><>><>><>><>><>><>><>><>>>><>><>>>>>>>><>><>>>><>><>><>><>><>>>>>><>><>><>>>>>=>=>>>>=>>>=>=>=>=>>>>>>>>>>=>=>>>>>>>>> |
9322 | #endif>>5,><5,>>>>>=>><>><>>>>>(intel_crtc)-><(intel_crtc)->><>><>><>><>><>><>><>><>=><=>>><>>>>>><>><>><>><>>><>><>>>><>><>>>><>><>><>><>>>><>><>><>><>>><>><>><>><>>>><>><>><>><>>><>><>><>><>><>>=>><>><>><>><>->>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>><>><>><>>><>><>><>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>20)><20)>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>=>>>>>>>>>><>><>><>><>><>><>><>><>><>>>><>><>>>>>>>><>><>>>><>><>><>><>><>>>>>><>><>><>>>>>=>=>>>>=>>>=>=>=>=>>>>>>>>>>=>=>>>>>>>>> |