Rev 2351 | Rev 3031 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2351 | Rev 2360 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2006-2007 Intel Corporation |
2 | * Copyright © 2006-2007 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
21 | * DEALINGS IN THE SOFTWARE. |
21 | * DEALINGS IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | */ |
25 | */ |
26 | 26 | ||
27 | //#include |
27 | //#include |
28 | #include |
28 | #include |
29 | //#include |
29 | //#include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | //#include |
33 | //#include |
34 | #include |
34 | #include |
35 | #include "drmP.h" |
35 | #include "drmP.h" |
36 | #include "intel_drv.h" |
36 | #include "intel_drv.h" |
37 | #include "i915_drm.h" |
37 | #include "i915_drm.h" |
38 | #include "i915_drv.h" |
38 | #include "i915_drv.h" |
39 | #include "i915_trace.h" |
39 | #include "i915_trace.h" |
40 | #include "drm_dp_helper.h" |
40 | #include "drm_dp_helper.h" |
41 | #include "drm_crtc_helper.h" |
41 | #include "drm_crtc_helper.h" |
42 | 42 | ||
43 | phys_addr_t get_bus_addr(void); |
43 | phys_addr_t get_bus_addr(void); |
44 | 44 | ||
45 | static inline __attribute__((const)) |
45 | static inline __attribute__((const)) |
46 | bool is_power_of_2(unsigned long n) |
46 | bool is_power_of_2(unsigned long n) |
47 | { |
47 | { |
48 | return (n != 0 && ((n & (n - 1)) == 0)); |
48 | return (n != 0 && ((n & (n - 1)) == 0)); |
49 | } |
49 | } |
50 | 50 | ||
51 | #define MAX_ERRNO 4095 |
51 | #define MAX_ERRNO 4095 |
52 | 52 | ||
53 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
53 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
54 | 54 | ||
55 | static inline long IS_ERR(const void *ptr) |
55 | static inline long IS_ERR(const void *ptr) |
56 | { |
56 | { |
57 | return IS_ERR_VALUE((unsigned long)ptr); |
57 | return IS_ERR_VALUE((unsigned long)ptr); |
58 | } |
58 | } |
59 | 59 | ||
60 | static inline void *ERR_PTR(long error) |
60 | static inline void *ERR_PTR(long error) |
61 | { |
61 | { |
62 | return (void *) error; |
62 | return (void *) error; |
63 | } |
63 | } |
64 | 64 | ||
65 | 65 | ||
66 | static inline int pci_read_config_word(struct pci_dev *dev, int where, |
66 | static inline int pci_read_config_word(struct pci_dev *dev, int where, |
67 | u16 *val) |
67 | u16 *val) |
68 | { |
68 | { |
69 | *val = PciRead16(dev->busnr, dev->devfn, where); |
69 | *val = PciRead16(dev->busnr, dev->devfn, where); |
70 | return 1; |
70 | return 1; |
71 | } |
71 | } |
72 | 72 | ||
73 | 73 | ||
74 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
74 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
75 | 75 | ||
76 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
76 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
77 | static void intel_update_watermarks(struct drm_device *dev); |
77 | static void intel_update_watermarks(struct drm_device *dev); |
78 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
78 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
79 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
79 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
80 | 80 | ||
81 | typedef struct { |
81 | typedef struct { |
82 | /* given values */ |
82 | /* given values */ |
83 | int n; |
83 | int n; |
84 | int m1, m2; |
84 | int m1, m2; |
85 | int p1, p2; |
85 | int p1, p2; |
86 | /* derived values */ |
86 | /* derived values */ |
87 | int dot; |
87 | int dot; |
88 | int vco; |
88 | int vco; |
89 | int m; |
89 | int m; |
90 | int p; |
90 | int p; |
91 | } intel_clock_t; |
91 | } intel_clock_t; |
92 | 92 | ||
93 | typedef struct { |
93 | typedef struct { |
94 | int min, max; |
94 | int min, max; |
95 | } intel_range_t; |
95 | } intel_range_t; |
96 | 96 | ||
97 | typedef struct { |
97 | typedef struct { |
98 | int dot_limit; |
98 | int dot_limit; |
99 | int p2_slow, p2_fast; |
99 | int p2_slow, p2_fast; |
100 | } intel_p2_t; |
100 | } intel_p2_t; |
101 | 101 | ||
102 | #define INTEL_P2_NUM 2 |
102 | #define INTEL_P2_NUM 2 |
103 | typedef struct intel_limit intel_limit_t; |
103 | typedef struct intel_limit intel_limit_t; |
104 | struct intel_limit { |
104 | struct intel_limit { |
105 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
105 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
106 | intel_p2_t p2; |
106 | intel_p2_t p2; |
107 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
107 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
108 | int, int, intel_clock_t *); |
108 | int, int, intel_clock_t *); |
109 | }; |
109 | }; |
110 | 110 | ||
111 | /* FDI */ |
111 | /* FDI */ |
112 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
112 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ |
113 | 113 | ||
114 | static bool |
114 | static bool |
115 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
115 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
116 | int target, int refclk, intel_clock_t *best_clock); |
116 | int target, int refclk, intel_clock_t *best_clock); |
117 | static bool |
117 | static bool |
118 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
118 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
119 | int target, int refclk, intel_clock_t *best_clock); |
119 | int target, int refclk, intel_clock_t *best_clock); |
120 | 120 | ||
121 | static bool |
121 | static bool |
122 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
122 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
123 | int target, int refclk, intel_clock_t *best_clock); |
123 | int target, int refclk, intel_clock_t *best_clock); |
124 | static bool |
124 | static bool |
125 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
125 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
126 | int target, int refclk, intel_clock_t *best_clock); |
126 | int target, int refclk, intel_clock_t *best_clock); |
127 | 127 | ||
128 | static inline u32 /* units of 100MHz */ |
128 | static inline u32 /* units of 100MHz */ |
129 | intel_fdi_link_freq(struct drm_device *dev) |
129 | intel_fdi_link_freq(struct drm_device *dev) |
130 | { |
130 | { |
131 | if (IS_GEN5(dev)) { |
131 | if (IS_GEN5(dev)) { |
132 | struct drm_i915_private *dev_priv = dev->dev_private; |
132 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; |
133 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; |
134 | } else |
134 | } else |
135 | return 27; |
135 | return 27; |
136 | } |
136 | } |
137 | 137 | ||
138 | static const intel_limit_t intel_limits_i8xx_dvo = { |
138 | static const intel_limit_t intel_limits_i8xx_dvo = { |
139 | .dot = { .min = 25000, .max = 350000 }, |
139 | .dot = { .min = 25000, .max = 350000 }, |
140 | .vco = { .min = 930000, .max = 1400000 }, |
140 | .vco = { .min = 930000, .max = 1400000 }, |
141 | .n = { .min = 3, .max = 16 }, |
141 | .n = { .min = 3, .max = 16 }, |
142 | .m = { .min = 96, .max = 140 }, |
142 | .m = { .min = 96, .max = 140 }, |
143 | .m1 = { .min = 18, .max = 26 }, |
143 | .m1 = { .min = 18, .max = 26 }, |
144 | .m2 = { .min = 6, .max = 16 }, |
144 | .m2 = { .min = 6, .max = 16 }, |
145 | .p = { .min = 4, .max = 128 }, |
145 | .p = { .min = 4, .max = 128 }, |
146 | .p1 = { .min = 2, .max = 33 }, |
146 | .p1 = { .min = 2, .max = 33 }, |
147 | .p2 = { .dot_limit = 165000, |
147 | .p2 = { .dot_limit = 165000, |
148 | .p2_slow = 4, .p2_fast = 2 }, |
148 | .p2_slow = 4, .p2_fast = 2 }, |
149 | .find_pll = intel_find_best_PLL, |
149 | .find_pll = intel_find_best_PLL, |
150 | }; |
150 | }; |
151 | 151 | ||
152 | static const intel_limit_t intel_limits_i8xx_lvds = { |
152 | static const intel_limit_t intel_limits_i8xx_lvds = { |
153 | .dot = { .min = 25000, .max = 350000 }, |
153 | .dot = { .min = 25000, .max = 350000 }, |
154 | .vco = { .min = 930000, .max = 1400000 }, |
154 | .vco = { .min = 930000, .max = 1400000 }, |
155 | .n = { .min = 3, .max = 16 }, |
155 | .n = { .min = 3, .max = 16 }, |
156 | .m = { .min = 96, .max = 140 }, |
156 | .m = { .min = 96, .max = 140 }, |
157 | .m1 = { .min = 18, .max = 26 }, |
157 | .m1 = { .min = 18, .max = 26 }, |
158 | .m2 = { .min = 6, .max = 16 }, |
158 | .m2 = { .min = 6, .max = 16 }, |
159 | .p = { .min = 4, .max = 128 }, |
159 | .p = { .min = 4, .max = 128 }, |
160 | .p1 = { .min = 1, .max = 6 }, |
160 | .p1 = { .min = 1, .max = 6 }, |
161 | .p2 = { .dot_limit = 165000, |
161 | .p2 = { .dot_limit = 165000, |
162 | .p2_slow = 14, .p2_fast = 7 }, |
162 | .p2_slow = 14, .p2_fast = 7 }, |
163 | .find_pll = intel_find_best_PLL, |
163 | .find_pll = intel_find_best_PLL, |
164 | }; |
164 | }; |
165 | 165 | ||
166 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
166 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
167 | .dot = { .min = 20000, .max = 400000 }, |
167 | .dot = { .min = 20000, .max = 400000 }, |
168 | .vco = { .min = 1400000, .max = 2800000 }, |
168 | .vco = { .min = 1400000, .max = 2800000 }, |
169 | .n = { .min = 1, .max = 6 }, |
169 | .n = { .min = 1, .max = 6 }, |
170 | .m = { .min = 70, .max = 120 }, |
170 | .m = { .min = 70, .max = 120 }, |
171 | .m1 = { .min = 10, .max = 22 }, |
171 | .m1 = { .min = 10, .max = 22 }, |
172 | .m2 = { .min = 5, .max = 9 }, |
172 | .m2 = { .min = 5, .max = 9 }, |
173 | .p = { .min = 5, .max = 80 }, |
173 | .p = { .min = 5, .max = 80 }, |
174 | .p1 = { .min = 1, .max = 8 }, |
174 | .p1 = { .min = 1, .max = 8 }, |
175 | .p2 = { .dot_limit = 200000, |
175 | .p2 = { .dot_limit = 200000, |
176 | .p2_slow = 10, .p2_fast = 5 }, |
176 | .p2_slow = 10, .p2_fast = 5 }, |
177 | .find_pll = intel_find_best_PLL, |
177 | .find_pll = intel_find_best_PLL, |
178 | }; |
178 | }; |
179 | 179 | ||
180 | static const intel_limit_t intel_limits_i9xx_lvds = { |
180 | static const intel_limit_t intel_limits_i9xx_lvds = { |
181 | .dot = { .min = 20000, .max = 400000 }, |
181 | .dot = { .min = 20000, .max = 400000 }, |
182 | .vco = { .min = 1400000, .max = 2800000 }, |
182 | .vco = { .min = 1400000, .max = 2800000 }, |
183 | .n = { .min = 1, .max = 6 }, |
183 | .n = { .min = 1, .max = 6 }, |
184 | .m = { .min = 70, .max = 120 }, |
184 | .m = { .min = 70, .max = 120 }, |
185 | .m1 = { .min = 10, .max = 22 }, |
185 | .m1 = { .min = 10, .max = 22 }, |
186 | .m2 = { .min = 5, .max = 9 }, |
186 | .m2 = { .min = 5, .max = 9 }, |
187 | .p = { .min = 7, .max = 98 }, |
187 | .p = { .min = 7, .max = 98 }, |
188 | .p1 = { .min = 1, .max = 8 }, |
188 | .p1 = { .min = 1, .max = 8 }, |
189 | .p2 = { .dot_limit = 112000, |
189 | .p2 = { .dot_limit = 112000, |
190 | .p2_slow = 14, .p2_fast = 7 }, |
190 | .p2_slow = 14, .p2_fast = 7 }, |
191 | .find_pll = intel_find_best_PLL, |
191 | .find_pll = intel_find_best_PLL, |
192 | }; |
192 | }; |
193 | 193 | ||
194 | 194 | ||
195 | static const intel_limit_t intel_limits_g4x_sdvo = { |
195 | static const intel_limit_t intel_limits_g4x_sdvo = { |
196 | .dot = { .min = 25000, .max = 270000 }, |
196 | .dot = { .min = 25000, .max = 270000 }, |
197 | .vco = { .min = 1750000, .max = 3500000}, |
197 | .vco = { .min = 1750000, .max = 3500000}, |
198 | .n = { .min = 1, .max = 4 }, |
198 | .n = { .min = 1, .max = 4 }, |
199 | .m = { .min = 104, .max = 138 }, |
199 | .m = { .min = 104, .max = 138 }, |
200 | .m1 = { .min = 17, .max = 23 }, |
200 | .m1 = { .min = 17, .max = 23 }, |
201 | .m2 = { .min = 5, .max = 11 }, |
201 | .m2 = { .min = 5, .max = 11 }, |
202 | .p = { .min = 10, .max = 30 }, |
202 | .p = { .min = 10, .max = 30 }, |
203 | .p1 = { .min = 1, .max = 3}, |
203 | .p1 = { .min = 1, .max = 3}, |
204 | .p2 = { .dot_limit = 270000, |
204 | .p2 = { .dot_limit = 270000, |
205 | .p2_slow = 10, |
205 | .p2_slow = 10, |
206 | .p2_fast = 10 |
206 | .p2_fast = 10 |
207 | }, |
207 | }, |
208 | .find_pll = intel_g4x_find_best_PLL, |
208 | .find_pll = intel_g4x_find_best_PLL, |
209 | }; |
209 | }; |
210 | 210 | ||
211 | static const intel_limit_t intel_limits_g4x_hdmi = { |
211 | static const intel_limit_t intel_limits_g4x_hdmi = { |
212 | .dot = { .min = 22000, .max = 400000 }, |
212 | .dot = { .min = 22000, .max = 400000 }, |
213 | .vco = { .min = 1750000, .max = 3500000}, |
213 | .vco = { .min = 1750000, .max = 3500000}, |
214 | .n = { .min = 1, .max = 4 }, |
214 | .n = { .min = 1, .max = 4 }, |
215 | .m = { .min = 104, .max = 138 }, |
215 | .m = { .min = 104, .max = 138 }, |
216 | .m1 = { .min = 16, .max = 23 }, |
216 | .m1 = { .min = 16, .max = 23 }, |
217 | .m2 = { .min = 5, .max = 11 }, |
217 | .m2 = { .min = 5, .max = 11 }, |
218 | .p = { .min = 5, .max = 80 }, |
218 | .p = { .min = 5, .max = 80 }, |
219 | .p1 = { .min = 1, .max = 8}, |
219 | .p1 = { .min = 1, .max = 8}, |
220 | .p2 = { .dot_limit = 165000, |
220 | .p2 = { .dot_limit = 165000, |
221 | .p2_slow = 10, .p2_fast = 5 }, |
221 | .p2_slow = 10, .p2_fast = 5 }, |
222 | .find_pll = intel_g4x_find_best_PLL, |
222 | .find_pll = intel_g4x_find_best_PLL, |
223 | }; |
223 | }; |
224 | 224 | ||
225 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
225 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
226 | .dot = { .min = 20000, .max = 115000 }, |
226 | .dot = { .min = 20000, .max = 115000 }, |
227 | .vco = { .min = 1750000, .max = 3500000 }, |
227 | .vco = { .min = 1750000, .max = 3500000 }, |
228 | .n = { .min = 1, .max = 3 }, |
228 | .n = { .min = 1, .max = 3 }, |
229 | .m = { .min = 104, .max = 138 }, |
229 | .m = { .min = 104, .max = 138 }, |
230 | .m1 = { .min = 17, .max = 23 }, |
230 | .m1 = { .min = 17, .max = 23 }, |
231 | .m2 = { .min = 5, .max = 11 }, |
231 | .m2 = { .min = 5, .max = 11 }, |
232 | .p = { .min = 28, .max = 112 }, |
232 | .p = { .min = 28, .max = 112 }, |
233 | .p1 = { .min = 2, .max = 8 }, |
233 | .p1 = { .min = 2, .max = 8 }, |
234 | .p2 = { .dot_limit = 0, |
234 | .p2 = { .dot_limit = 0, |
235 | .p2_slow = 14, .p2_fast = 14 |
235 | .p2_slow = 14, .p2_fast = 14 |
236 | }, |
236 | }, |
237 | .find_pll = intel_g4x_find_best_PLL, |
237 | .find_pll = intel_g4x_find_best_PLL, |
238 | }; |
238 | }; |
239 | 239 | ||
240 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
240 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
241 | .dot = { .min = 80000, .max = 224000 }, |
241 | .dot = { .min = 80000, .max = 224000 }, |
242 | .vco = { .min = 1750000, .max = 3500000 }, |
242 | .vco = { .min = 1750000, .max = 3500000 }, |
243 | .n = { .min = 1, .max = 3 }, |
243 | .n = { .min = 1, .max = 3 }, |
244 | .m = { .min = 104, .max = 138 }, |
244 | .m = { .min = 104, .max = 138 }, |
245 | .m1 = { .min = 17, .max = 23 }, |
245 | .m1 = { .min = 17, .max = 23 }, |
246 | .m2 = { .min = 5, .max = 11 }, |
246 | .m2 = { .min = 5, .max = 11 }, |
247 | .p = { .min = 14, .max = 42 }, |
247 | .p = { .min = 14, .max = 42 }, |
248 | .p1 = { .min = 2, .max = 6 }, |
248 | .p1 = { .min = 2, .max = 6 }, |
249 | .p2 = { .dot_limit = 0, |
249 | .p2 = { .dot_limit = 0, |
250 | .p2_slow = 7, .p2_fast = 7 |
250 | .p2_slow = 7, .p2_fast = 7 |
251 | }, |
251 | }, |
252 | .find_pll = intel_g4x_find_best_PLL, |
252 | .find_pll = intel_g4x_find_best_PLL, |
253 | }; |
253 | }; |
254 | 254 | ||
255 | static const intel_limit_t intel_limits_g4x_display_port = { |
255 | static const intel_limit_t intel_limits_g4x_display_port = { |
256 | .dot = { .min = 161670, .max = 227000 }, |
256 | .dot = { .min = 161670, .max = 227000 }, |
257 | .vco = { .min = 1750000, .max = 3500000}, |
257 | .vco = { .min = 1750000, .max = 3500000}, |
258 | .n = { .min = 1, .max = 2 }, |
258 | .n = { .min = 1, .max = 2 }, |
259 | .m = { .min = 97, .max = 108 }, |
259 | .m = { .min = 97, .max = 108 }, |
260 | .m1 = { .min = 0x10, .max = 0x12 }, |
260 | .m1 = { .min = 0x10, .max = 0x12 }, |
261 | .m2 = { .min = 0x05, .max = 0x06 }, |
261 | .m2 = { .min = 0x05, .max = 0x06 }, |
262 | .p = { .min = 10, .max = 20 }, |
262 | .p = { .min = 10, .max = 20 }, |
263 | .p1 = { .min = 1, .max = 2}, |
263 | .p1 = { .min = 1, .max = 2}, |
264 | .p2 = { .dot_limit = 0, |
264 | .p2 = { .dot_limit = 0, |
265 | .p2_slow = 10, .p2_fast = 10 }, |
265 | .p2_slow = 10, .p2_fast = 10 }, |
266 | .find_pll = intel_find_pll_g4x_dp, |
266 | .find_pll = intel_find_pll_g4x_dp, |
267 | }; |
267 | }; |
268 | 268 | ||
269 | static const intel_limit_t intel_limits_pineview_sdvo = { |
269 | static const intel_limit_t intel_limits_pineview_sdvo = { |
270 | .dot = { .min = 20000, .max = 400000}, |
270 | .dot = { .min = 20000, .max = 400000}, |
271 | .vco = { .min = 1700000, .max = 3500000 }, |
271 | .vco = { .min = 1700000, .max = 3500000 }, |
272 | /* Pineview's Ncounter is a ring counter */ |
272 | /* Pineview's Ncounter is a ring counter */ |
273 | .n = { .min = 3, .max = 6 }, |
273 | .n = { .min = 3, .max = 6 }, |
274 | .m = { .min = 2, .max = 256 }, |
274 | .m = { .min = 2, .max = 256 }, |
275 | /* Pineview only has one combined m divider, which we treat as m2. */ |
275 | /* Pineview only has one combined m divider, which we treat as m2. */ |
276 | .m1 = { .min = 0, .max = 0 }, |
276 | .m1 = { .min = 0, .max = 0 }, |
277 | .m2 = { .min = 0, .max = 254 }, |
277 | .m2 = { .min = 0, .max = 254 }, |
278 | .p = { .min = 5, .max = 80 }, |
278 | .p = { .min = 5, .max = 80 }, |
279 | .p1 = { .min = 1, .max = 8 }, |
279 | .p1 = { .min = 1, .max = 8 }, |
280 | .p2 = { .dot_limit = 200000, |
280 | .p2 = { .dot_limit = 200000, |
281 | .p2_slow = 10, .p2_fast = 5 }, |
281 | .p2_slow = 10, .p2_fast = 5 }, |
282 | .find_pll = intel_find_best_PLL, |
282 | .find_pll = intel_find_best_PLL, |
283 | }; |
283 | }; |
284 | 284 | ||
285 | static const intel_limit_t intel_limits_pineview_lvds = { |
285 | static const intel_limit_t intel_limits_pineview_lvds = { |
286 | .dot = { .min = 20000, .max = 400000 }, |
286 | .dot = { .min = 20000, .max = 400000 }, |
287 | .vco = { .min = 1700000, .max = 3500000 }, |
287 | .vco = { .min = 1700000, .max = 3500000 }, |
288 | .n = { .min = 3, .max = 6 }, |
288 | .n = { .min = 3, .max = 6 }, |
289 | .m = { .min = 2, .max = 256 }, |
289 | .m = { .min = 2, .max = 256 }, |
290 | .m1 = { .min = 0, .max = 0 }, |
290 | .m1 = { .min = 0, .max = 0 }, |
291 | .m2 = { .min = 0, .max = 254 }, |
291 | .m2 = { .min = 0, .max = 254 }, |
292 | .p = { .min = 7, .max = 112 }, |
292 | .p = { .min = 7, .max = 112 }, |
293 | .p1 = { .min = 1, .max = 8 }, |
293 | .p1 = { .min = 1, .max = 8 }, |
294 | .p2 = { .dot_limit = 112000, |
294 | .p2 = { .dot_limit = 112000, |
295 | .p2_slow = 14, .p2_fast = 14 }, |
295 | .p2_slow = 14, .p2_fast = 14 }, |
296 | .find_pll = intel_find_best_PLL, |
296 | .find_pll = intel_find_best_PLL, |
297 | }; |
297 | }; |
298 | 298 | ||
299 | /* Ironlake / Sandybridge |
299 | /* Ironlake / Sandybridge |
300 | * |
300 | * |
301 | * We calculate clock using (register_value + 2) for N/M1/M2, so here |
301 | * We calculate clock using (register_value + 2) for N/M1/M2, so here |
302 | * the range value for them is (actual_value - 2). |
302 | * the range value for them is (actual_value - 2). |
303 | */ |
303 | */ |
304 | static const intel_limit_t intel_limits_ironlake_dac = { |
304 | static const intel_limit_t intel_limits_ironlake_dac = { |
305 | .dot = { .min = 25000, .max = 350000 }, |
305 | .dot = { .min = 25000, .max = 350000 }, |
306 | .vco = { .min = 1760000, .max = 3510000 }, |
306 | .vco = { .min = 1760000, .max = 3510000 }, |
307 | .n = { .min = 1, .max = 5 }, |
307 | .n = { .min = 1, .max = 5 }, |
308 | .m = { .min = 79, .max = 127 }, |
308 | .m = { .min = 79, .max = 127 }, |
309 | .m1 = { .min = 12, .max = 22 }, |
309 | .m1 = { .min = 12, .max = 22 }, |
310 | .m2 = { .min = 5, .max = 9 }, |
310 | .m2 = { .min = 5, .max = 9 }, |
311 | .p = { .min = 5, .max = 80 }, |
311 | .p = { .min = 5, .max = 80 }, |
312 | .p1 = { .min = 1, .max = 8 }, |
312 | .p1 = { .min = 1, .max = 8 }, |
313 | .p2 = { .dot_limit = 225000, |
313 | .p2 = { .dot_limit = 225000, |
314 | .p2_slow = 10, .p2_fast = 5 }, |
314 | .p2_slow = 10, .p2_fast = 5 }, |
315 | .find_pll = intel_g4x_find_best_PLL, |
315 | .find_pll = intel_g4x_find_best_PLL, |
316 | }; |
316 | }; |
317 | 317 | ||
318 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
318 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
319 | .dot = { .min = 25000, .max = 350000 }, |
319 | .dot = { .min = 25000, .max = 350000 }, |
320 | .vco = { .min = 1760000, .max = 3510000 }, |
320 | .vco = { .min = 1760000, .max = 3510000 }, |
321 | .n = { .min = 1, .max = 3 }, |
321 | .n = { .min = 1, .max = 3 }, |
322 | .m = { .min = 79, .max = 118 }, |
322 | .m = { .min = 79, .max = 118 }, |
323 | .m1 = { .min = 12, .max = 22 }, |
323 | .m1 = { .min = 12, .max = 22 }, |
324 | .m2 = { .min = 5, .max = 9 }, |
324 | .m2 = { .min = 5, .max = 9 }, |
325 | .p = { .min = 28, .max = 112 }, |
325 | .p = { .min = 28, .max = 112 }, |
326 | .p1 = { .min = 2, .max = 8 }, |
326 | .p1 = { .min = 2, .max = 8 }, |
327 | .p2 = { .dot_limit = 225000, |
327 | .p2 = { .dot_limit = 225000, |
328 | .p2_slow = 14, .p2_fast = 14 }, |
328 | .p2_slow = 14, .p2_fast = 14 }, |
329 | .find_pll = intel_g4x_find_best_PLL, |
329 | .find_pll = intel_g4x_find_best_PLL, |
330 | }; |
330 | }; |
331 | 331 | ||
332 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
332 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { |
333 | .dot = { .min = 25000, .max = 350000 }, |
333 | .dot = { .min = 25000, .max = 350000 }, |
334 | .vco = { .min = 1760000, .max = 3510000 }, |
334 | .vco = { .min = 1760000, .max = 3510000 }, |
335 | .n = { .min = 1, .max = 3 }, |
335 | .n = { .min = 1, .max = 3 }, |
336 | .m = { .min = 79, .max = 127 }, |
336 | .m = { .min = 79, .max = 127 }, |
337 | .m1 = { .min = 12, .max = 22 }, |
337 | .m1 = { .min = 12, .max = 22 }, |
338 | .m2 = { .min = 5, .max = 9 }, |
338 | .m2 = { .min = 5, .max = 9 }, |
339 | .p = { .min = 14, .max = 56 }, |
339 | .p = { .min = 14, .max = 56 }, |
340 | .p1 = { .min = 2, .max = 8 }, |
340 | .p1 = { .min = 2, .max = 8 }, |
341 | .p2 = { .dot_limit = 225000, |
341 | .p2 = { .dot_limit = 225000, |
342 | .p2_slow = 7, .p2_fast = 7 }, |
342 | .p2_slow = 7, .p2_fast = 7 }, |
343 | .find_pll = intel_g4x_find_best_PLL, |
343 | .find_pll = intel_g4x_find_best_PLL, |
344 | }; |
344 | }; |
345 | 345 | ||
346 | /* LVDS 100mhz refclk limits. */ |
346 | /* LVDS 100mhz refclk limits. */ |
347 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
347 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
348 | .dot = { .min = 25000, .max = 350000 }, |
348 | .dot = { .min = 25000, .max = 350000 }, |
349 | .vco = { .min = 1760000, .max = 3510000 }, |
349 | .vco = { .min = 1760000, .max = 3510000 }, |
350 | .n = { .min = 1, .max = 2 }, |
350 | .n = { .min = 1, .max = 2 }, |
351 | .m = { .min = 79, .max = 126 }, |
351 | .m = { .min = 79, .max = 126 }, |
352 | .m1 = { .min = 12, .max = 22 }, |
352 | .m1 = { .min = 12, .max = 22 }, |
353 | .m2 = { .min = 5, .max = 9 }, |
353 | .m2 = { .min = 5, .max = 9 }, |
354 | .p = { .min = 28, .max = 112 }, |
354 | .p = { .min = 28, .max = 112 }, |
355 | .p1 = { .min = 2, .max = 8 }, |
355 | .p1 = { .min = 2, .max = 8 }, |
356 | .p2 = { .dot_limit = 225000, |
356 | .p2 = { .dot_limit = 225000, |
357 | .p2_slow = 14, .p2_fast = 14 }, |
357 | .p2_slow = 14, .p2_fast = 14 }, |
358 | .find_pll = intel_g4x_find_best_PLL, |
358 | .find_pll = intel_g4x_find_best_PLL, |
359 | }; |
359 | }; |
360 | 360 | ||
361 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
361 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { |
362 | .dot = { .min = 25000, .max = 350000 }, |
362 | .dot = { .min = 25000, .max = 350000 }, |
363 | .vco = { .min = 1760000, .max = 3510000 }, |
363 | .vco = { .min = 1760000, .max = 3510000 }, |
364 | .n = { .min = 1, .max = 3 }, |
364 | .n = { .min = 1, .max = 3 }, |
365 | .m = { .min = 79, .max = 126 }, |
365 | .m = { .min = 79, .max = 126 }, |
366 | .m1 = { .min = 12, .max = 22 }, |
366 | .m1 = { .min = 12, .max = 22 }, |
367 | .m2 = { .min = 5, .max = 9 }, |
367 | .m2 = { .min = 5, .max = 9 }, |
368 | .p = { .min = 14, .max = 42 }, |
368 | .p = { .min = 14, .max = 42 }, |
369 | .p1 = { .min = 2, .max = 6 }, |
369 | .p1 = { .min = 2, .max = 6 }, |
370 | .p2 = { .dot_limit = 225000, |
370 | .p2 = { .dot_limit = 225000, |
371 | .p2_slow = 7, .p2_fast = 7 }, |
371 | .p2_slow = 7, .p2_fast = 7 }, |
372 | .find_pll = intel_g4x_find_best_PLL, |
372 | .find_pll = intel_g4x_find_best_PLL, |
373 | }; |
373 | }; |
374 | 374 | ||
375 | static const intel_limit_t intel_limits_ironlake_display_port = { |
375 | static const intel_limit_t intel_limits_ironlake_display_port = { |
376 | .dot = { .min = 25000, .max = 350000 }, |
376 | .dot = { .min = 25000, .max = 350000 }, |
377 | .vco = { .min = 1760000, .max = 3510000}, |
377 | .vco = { .min = 1760000, .max = 3510000}, |
378 | .n = { .min = 1, .max = 2 }, |
378 | .n = { .min = 1, .max = 2 }, |
379 | .m = { .min = 81, .max = 90 }, |
379 | .m = { .min = 81, .max = 90 }, |
380 | .m1 = { .min = 12, .max = 22 }, |
380 | .m1 = { .min = 12, .max = 22 }, |
381 | .m2 = { .min = 5, .max = 9 }, |
381 | .m2 = { .min = 5, .max = 9 }, |
382 | .p = { .min = 10, .max = 20 }, |
382 | .p = { .min = 10, .max = 20 }, |
383 | .p1 = { .min = 1, .max = 2}, |
383 | .p1 = { .min = 1, .max = 2}, |
384 | .p2 = { .dot_limit = 0, |
384 | .p2 = { .dot_limit = 0, |
385 | .p2_slow = 10, .p2_fast = 10 }, |
385 | .p2_slow = 10, .p2_fast = 10 }, |
386 | .find_pll = intel_find_pll_ironlake_dp, |
386 | .find_pll = intel_find_pll_ironlake_dp, |
387 | }; |
387 | }; |
388 | 388 | ||
389 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
389 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
390 | int refclk) |
390 | int refclk) |
391 | { |
391 | { |
392 | struct drm_device *dev = crtc->dev; |
392 | struct drm_device *dev = crtc->dev; |
393 | struct drm_i915_private *dev_priv = dev->dev_private; |
393 | struct drm_i915_private *dev_priv = dev->dev_private; |
394 | const intel_limit_t *limit; |
394 | const intel_limit_t *limit; |
395 | 395 | ||
396 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
396 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
397 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
397 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
398 | LVDS_CLKB_POWER_UP) { |
398 | LVDS_CLKB_POWER_UP) { |
399 | /* LVDS dual channel */ |
399 | /* LVDS dual channel */ |
400 | if (refclk == 100000) |
400 | if (refclk == 100000) |
401 | limit = &intel_limits_ironlake_dual_lvds_100m; |
401 | limit = &intel_limits_ironlake_dual_lvds_100m; |
402 | else |
402 | else |
403 | limit = &intel_limits_ironlake_dual_lvds; |
403 | limit = &intel_limits_ironlake_dual_lvds; |
404 | } else { |
404 | } else { |
405 | if (refclk == 100000) |
405 | if (refclk == 100000) |
406 | limit = &intel_limits_ironlake_single_lvds_100m; |
406 | limit = &intel_limits_ironlake_single_lvds_100m; |
407 | else |
407 | else |
408 | limit = &intel_limits_ironlake_single_lvds; |
408 | limit = &intel_limits_ironlake_single_lvds; |
409 | } |
409 | } |
410 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
410 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
411 | HAS_eDP) |
411 | HAS_eDP) |
412 | limit = &intel_limits_ironlake_display_port; |
412 | limit = &intel_limits_ironlake_display_port; |
413 | else |
413 | else |
414 | limit = &intel_limits_ironlake_dac; |
414 | limit = &intel_limits_ironlake_dac; |
415 | 415 | ||
416 | return limit; |
416 | return limit; |
417 | } |
417 | } |
418 | 418 | ||
419 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
419 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
420 | { |
420 | { |
421 | struct drm_device *dev = crtc->dev; |
421 | struct drm_device *dev = crtc->dev; |
422 | struct drm_i915_private *dev_priv = dev->dev_private; |
422 | struct drm_i915_private *dev_priv = dev->dev_private; |
423 | const intel_limit_t *limit; |
423 | const intel_limit_t *limit; |
424 | 424 | ||
425 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
425 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
426 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
426 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
427 | LVDS_CLKB_POWER_UP) |
427 | LVDS_CLKB_POWER_UP) |
428 | /* LVDS with dual channel */ |
428 | /* LVDS with dual channel */ |
429 | limit = &intel_limits_g4x_dual_channel_lvds; |
429 | limit = &intel_limits_g4x_dual_channel_lvds; |
430 | else |
430 | else |
431 | /* LVDS with dual channel */ |
431 | /* LVDS with dual channel */ |
432 | limit = &intel_limits_g4x_single_channel_lvds; |
432 | limit = &intel_limits_g4x_single_channel_lvds; |
433 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
433 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
434 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
434 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { |
435 | limit = &intel_limits_g4x_hdmi; |
435 | limit = &intel_limits_g4x_hdmi; |
436 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
436 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
437 | limit = &intel_limits_g4x_sdvo; |
437 | limit = &intel_limits_g4x_sdvo; |
438 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
438 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
439 | limit = &intel_limits_g4x_display_port; |
439 | limit = &intel_limits_g4x_display_port; |
440 | } else /* The option is for other outputs */ |
440 | } else /* The option is for other outputs */ |
441 | limit = &intel_limits_i9xx_sdvo; |
441 | limit = &intel_limits_i9xx_sdvo; |
442 | 442 | ||
443 | return limit; |
443 | return limit; |
444 | } |
444 | } |
445 | 445 | ||
446 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
446 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
447 | { |
447 | { |
448 | struct drm_device *dev = crtc->dev; |
448 | struct drm_device *dev = crtc->dev; |
449 | const intel_limit_t *limit; |
449 | const intel_limit_t *limit; |
450 | 450 | ||
451 | if (HAS_PCH_SPLIT(dev)) |
451 | if (HAS_PCH_SPLIT(dev)) |
452 | limit = intel_ironlake_limit(crtc, refclk); |
452 | limit = intel_ironlake_limit(crtc, refclk); |
453 | else if (IS_G4X(dev)) { |
453 | else if (IS_G4X(dev)) { |
454 | limit = intel_g4x_limit(crtc); |
454 | limit = intel_g4x_limit(crtc); |
455 | } else if (IS_PINEVIEW(dev)) { |
455 | } else if (IS_PINEVIEW(dev)) { |
456 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
456 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
457 | limit = &intel_limits_pineview_lvds; |
457 | limit = &intel_limits_pineview_lvds; |
458 | else |
458 | else |
459 | limit = &intel_limits_pineview_sdvo; |
459 | limit = &intel_limits_pineview_sdvo; |
460 | } else if (!IS_GEN2(dev)) { |
460 | } else if (!IS_GEN2(dev)) { |
461 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
461 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
462 | limit = &intel_limits_i9xx_lvds; |
462 | limit = &intel_limits_i9xx_lvds; |
463 | else |
463 | else |
464 | limit = &intel_limits_i9xx_sdvo; |
464 | limit = &intel_limits_i9xx_sdvo; |
465 | } else { |
465 | } else { |
466 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
466 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
467 | limit = &intel_limits_i8xx_lvds; |
467 | limit = &intel_limits_i8xx_lvds; |
468 | else |
468 | else |
469 | limit = &intel_limits_i8xx_dvo; |
469 | limit = &intel_limits_i8xx_dvo; |
470 | } |
470 | } |
471 | return limit; |
471 | return limit; |
472 | } |
472 | } |
473 | 473 | ||
474 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
474 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
475 | static void pineview_clock(int refclk, intel_clock_t *clock) |
475 | static void pineview_clock(int refclk, intel_clock_t *clock) |
476 | { |
476 | { |
477 | clock->m = clock->m2 + 2; |
477 | clock->m = clock->m2 + 2; |
478 | clock->p = clock->p1 * clock->p2; |
478 | clock->p = clock->p1 * clock->p2; |
479 | clock->vco = refclk * clock->m / clock->n; |
479 | clock->vco = refclk * clock->m / clock->n; |
480 | clock->dot = clock->vco / clock->p; |
480 | clock->dot = clock->vco / clock->p; |
481 | } |
481 | } |
482 | 482 | ||
483 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) |
483 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) |
484 | { |
484 | { |
485 | if (IS_PINEVIEW(dev)) { |
485 | if (IS_PINEVIEW(dev)) { |
486 | pineview_clock(refclk, clock); |
486 | pineview_clock(refclk, clock); |
487 | return; |
487 | return; |
488 | } |
488 | } |
489 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
489 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
490 | clock->p = clock->p1 * clock->p2; |
490 | clock->p = clock->p1 * clock->p2; |
491 | clock->vco = refclk * clock->m / (clock->n + 2); |
491 | clock->vco = refclk * clock->m / (clock->n + 2); |
492 | clock->dot = clock->vco / clock->p; |
492 | clock->dot = clock->vco / clock->p; |
493 | } |
493 | } |
494 | 494 | ||
495 | /** |
495 | /** |
496 | * Returns whether any output on the specified pipe is of the specified type |
496 | * Returns whether any output on the specified pipe is of the specified type |
497 | */ |
497 | */ |
498 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
498 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
499 | { |
499 | { |
500 | struct drm_device *dev = crtc->dev; |
500 | struct drm_device *dev = crtc->dev; |
501 | struct drm_mode_config *mode_config = &dev->mode_config; |
501 | struct drm_mode_config *mode_config = &dev->mode_config; |
502 | struct intel_encoder *encoder; |
502 | struct intel_encoder *encoder; |
503 | 503 | ||
504 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
504 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
505 | if (encoder->base.crtc == crtc && encoder->type == type) |
505 | if (encoder->base.crtc == crtc && encoder->type == type) |
506 | return true; |
506 | return true; |
507 | 507 | ||
508 | return false; |
508 | return false; |
509 | } |
509 | } |
510 | 510 | ||
511 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
511 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
512 | /** |
512 | /** |
513 | * Returns whether the given set of divisors are valid for a given refclk with |
513 | * Returns whether the given set of divisors are valid for a given refclk with |
514 | * the given connectors. |
514 | * the given connectors. |
515 | */ |
515 | */ |
516 | 516 | ||
517 | static bool intel_PLL_is_valid(struct drm_device *dev, |
517 | static bool intel_PLL_is_valid(struct drm_device *dev, |
518 | const intel_limit_t *limit, |
518 | const intel_limit_t *limit, |
519 | const intel_clock_t *clock) |
519 | const intel_clock_t *clock) |
520 | { |
520 | { |
521 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
521 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
522 | INTELPllInvalid("p1 out of range\n"); |
522 | INTELPllInvalid("p1 out of range\n"); |
523 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
523 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
524 | INTELPllInvalid("p out of range\n"); |
524 | INTELPllInvalid("p out of range\n"); |
525 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
525 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
526 | INTELPllInvalid("m2 out of range\n"); |
526 | INTELPllInvalid("m2 out of range\n"); |
527 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
527 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
528 | INTELPllInvalid("m1 out of range\n"); |
528 | INTELPllInvalid("m1 out of range\n"); |
529 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
529 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
530 | INTELPllInvalid("m1 <= m2\n"); |
530 | INTELPllInvalid("m1 <= m2\n"); |
531 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
531 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
532 | INTELPllInvalid("m out of range\n"); |
532 | INTELPllInvalid("m out of range\n"); |
533 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
533 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
534 | INTELPllInvalid("n out of range\n"); |
534 | INTELPllInvalid("n out of range\n"); |
535 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
535 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
536 | INTELPllInvalid("vco out of range\n"); |
536 | INTELPllInvalid("vco out of range\n"); |
537 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
537 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
538 | * connector, etc., rather than just a single range. |
538 | * connector, etc., rather than just a single range. |
539 | */ |
539 | */ |
540 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
540 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) |
541 | INTELPllInvalid("dot out of range\n"); |
541 | INTELPllInvalid("dot out of range\n"); |
542 | 542 | ||
543 | return true; |
543 | return true; |
544 | } |
544 | } |
545 | 545 | ||
546 | static bool |
546 | static bool |
547 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
547 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
548 | int target, int refclk, intel_clock_t *best_clock) |
548 | int target, int refclk, intel_clock_t *best_clock) |
549 | 549 | ||
550 | { |
550 | { |
551 | struct drm_device *dev = crtc->dev; |
551 | struct drm_device *dev = crtc->dev; |
552 | struct drm_i915_private *dev_priv = dev->dev_private; |
552 | struct drm_i915_private *dev_priv = dev->dev_private; |
553 | intel_clock_t clock; |
553 | intel_clock_t clock; |
554 | int err = target; |
554 | int err = target; |
555 | 555 | ||
556 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
556 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
557 | (I915_READ(LVDS)) != 0) { |
557 | (I915_READ(LVDS)) != 0) { |
558 | /* |
558 | /* |
559 | * For LVDS, if the panel is on, just rely on its current |
559 | * For LVDS, if the panel is on, just rely on its current |
560 | * settings for dual-channel. We haven't figured out how to |
560 | * settings for dual-channel. We haven't figured out how to |
561 | * reliably set up different single/dual channel state, if we |
561 | * reliably set up different single/dual channel state, if we |
562 | * even can. |
562 | * even can. |
563 | */ |
563 | */ |
564 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
564 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == |
565 | LVDS_CLKB_POWER_UP) |
565 | LVDS_CLKB_POWER_UP) |
566 | clock.p2 = limit->p2.p2_fast; |
566 | clock.p2 = limit->p2.p2_fast; |
567 | else |
567 | else |
568 | clock.p2 = limit->p2.p2_slow; |
568 | clock.p2 = limit->p2.p2_slow; |
569 | } else { |
569 | } else { |
570 | if (target < limit->p2.dot_limit) |
570 | if (target < limit->p2.dot_limit) |
571 | clock.p2 = limit->p2.p2_slow; |
571 | clock.p2 = limit->p2.p2_slow; |
572 | else |
572 | else |
573 | clock.p2 = limit->p2.p2_fast; |
573 | clock.p2 = limit->p2.p2_fast; |
574 | } |
574 | } |
575 | 575 | ||
576 | memset(best_clock, 0, sizeof(*best_clock)); |
576 | memset(best_clock, 0, sizeof(*best_clock)); |
577 | 577 | ||
578 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
578 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
579 | clock.m1++) { |
579 | clock.m1++) { |
580 | for (clock.m2 = limit->m2.min; |
580 | for (clock.m2 = limit->m2.min; |
581 | clock.m2 <= limit->m2.max; clock.m2++) { |
581 | clock.m2 <= limit->m2.max; clock.m2++) { |
582 | /* m1 is always 0 in Pineview */ |
582 | /* m1 is always 0 in Pineview */ |
583 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
583 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) |
584 | break; |
584 | break; |
585 | for (clock.n = limit->n.min; |
585 | for (clock.n = limit->n.min; |
586 | clock.n <= limit->n.max; clock.n++) { |
586 | clock.n <= limit->n.max; clock.n++) { |
587 | for (clock.p1 = limit->p1.min; |
587 | for (clock.p1 = limit->p1.min; |
588 | clock.p1 <= limit->p1.max; clock.p1++) { |
588 | clock.p1 <= limit->p1.max; clock.p1++) { |
589 | int this_err; |
589 | int this_err; |
590 | 590 | ||
591 | intel_clock(dev, refclk, &clock); |
591 | intel_clock(dev, refclk, &clock); |
592 | if (!intel_PLL_is_valid(dev, limit, |
592 | if (!intel_PLL_is_valid(dev, limit, |
593 | &clock)) |
593 | &clock)) |
594 | continue; |
594 | continue; |
595 | 595 | ||
596 | this_err = abs(clock.dot - target); |
596 | this_err = abs(clock.dot - target); |
597 | if (this_err < err) { |
597 | if (this_err < err) { |
598 | *best_clock = clock; |
598 | *best_clock = clock; |
599 | err = this_err; |
599 | err = this_err; |
600 | } |
600 | } |
601 | } |
601 | } |
602 | } |
602 | } |
603 | } |
603 | } |
604 | } |
604 | } |
605 | 605 | ||
606 | return (err != target); |
606 | return (err != target); |
607 | } |
607 | } |
608 | 608 | ||
609 | static bool |
609 | static bool |
610 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
610 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
611 | int target, int refclk, intel_clock_t *best_clock) |
611 | int target, int refclk, intel_clock_t *best_clock) |
612 | { |
612 | { |
613 | struct drm_device *dev = crtc->dev; |
613 | struct drm_device *dev = crtc->dev; |
614 | struct drm_i915_private *dev_priv = dev->dev_private; |
614 | struct drm_i915_private *dev_priv = dev->dev_private; |
615 | intel_clock_t clock; |
615 | intel_clock_t clock; |
616 | int max_n; |
616 | int max_n; |
617 | bool found; |
617 | bool found; |
618 | /* approximately equals target * 0.00585 */ |
618 | /* approximately equals target * 0.00585 */ |
619 | int err_most = (target >> 8) + (target >> 9); |
619 | int err_most = (target >> 8) + (target >> 9); |
620 | found = false; |
620 | found = false; |
621 | 621 | ||
622 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
622 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
623 | int lvds_reg; |
623 | int lvds_reg; |
624 | 624 | ||
625 | if (HAS_PCH_SPLIT(dev)) |
625 | if (HAS_PCH_SPLIT(dev)) |
626 | lvds_reg = PCH_LVDS; |
626 | lvds_reg = PCH_LVDS; |
627 | else |
627 | else |
628 | lvds_reg = LVDS; |
628 | lvds_reg = LVDS; |
629 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == |
629 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == |
630 | LVDS_CLKB_POWER_UP) |
630 | LVDS_CLKB_POWER_UP) |
631 | clock.p2 = limit->p2.p2_fast; |
631 | clock.p2 = limit->p2.p2_fast; |
632 | else |
632 | else |
633 | clock.p2 = limit->p2.p2_slow; |
633 | clock.p2 = limit->p2.p2_slow; |
634 | } else { |
634 | } else { |
635 | if (target < limit->p2.dot_limit) |
635 | if (target < limit->p2.dot_limit) |
636 | clock.p2 = limit->p2.p2_slow; |
636 | clock.p2 = limit->p2.p2_slow; |
637 | else |
637 | else |
638 | clock.p2 = limit->p2.p2_fast; |
638 | clock.p2 = limit->p2.p2_fast; |
639 | } |
639 | } |
640 | 640 | ||
641 | memset(best_clock, 0, sizeof(*best_clock)); |
641 | memset(best_clock, 0, sizeof(*best_clock)); |
642 | max_n = limit->n.max; |
642 | max_n = limit->n.max; |
643 | /* based on hardware requirement, prefer smaller n to precision */ |
643 | /* based on hardware requirement, prefer smaller n to precision */ |
644 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
644 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
645 | /* based on hardware requirement, prefere larger m1,m2 */ |
645 | /* based on hardware requirement, prefere larger m1,m2 */ |
646 | for (clock.m1 = limit->m1.max; |
646 | for (clock.m1 = limit->m1.max; |
647 | clock.m1 >= limit->m1.min; clock.m1--) { |
647 | clock.m1 >= limit->m1.min; clock.m1--) { |
648 | for (clock.m2 = limit->m2.max; |
648 | for (clock.m2 = limit->m2.max; |
649 | clock.m2 >= limit->m2.min; clock.m2--) { |
649 | clock.m2 >= limit->m2.min; clock.m2--) { |
650 | for (clock.p1 = limit->p1.max; |
650 | for (clock.p1 = limit->p1.max; |
651 | clock.p1 >= limit->p1.min; clock.p1--) { |
651 | clock.p1 >= limit->p1.min; clock.p1--) { |
652 | int this_err; |
652 | int this_err; |
653 | 653 | ||
654 | intel_clock(dev, refclk, &clock); |
654 | intel_clock(dev, refclk, &clock); |
655 | if (!intel_PLL_is_valid(dev, limit, |
655 | if (!intel_PLL_is_valid(dev, limit, |
656 | &clock)) |
656 | &clock)) |
657 | continue; |
657 | continue; |
658 | 658 | ||
659 | this_err = abs(clock.dot - target); |
659 | this_err = abs(clock.dot - target); |
660 | if (this_err < err_most) { |
660 | if (this_err < err_most) { |
661 | *best_clock = clock; |
661 | *best_clock = clock; |
662 | err_most = this_err; |
662 | err_most = this_err; |
663 | max_n = clock.n; |
663 | max_n = clock.n; |
664 | found = true; |
664 | found = true; |
665 | } |
665 | } |
666 | } |
666 | } |
667 | } |
667 | } |
668 | } |
668 | } |
669 | } |
669 | } |
670 | return found; |
670 | return found; |
671 | } |
671 | } |
672 | 672 | ||
673 | static bool |
673 | static bool |
674 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
674 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
675 | int target, int refclk, intel_clock_t *best_clock) |
675 | int target, int refclk, intel_clock_t *best_clock) |
676 | { |
676 | { |
677 | struct drm_device *dev = crtc->dev; |
677 | struct drm_device *dev = crtc->dev; |
678 | intel_clock_t clock; |
678 | intel_clock_t clock; |
679 | 679 | ||
680 | if (target < 200000) { |
680 | if (target < 200000) { |
681 | clock.n = 1; |
681 | clock.n = 1; |
682 | clock.p1 = 2; |
682 | clock.p1 = 2; |
683 | clock.p2 = 10; |
683 | clock.p2 = 10; |
684 | clock.m1 = 12; |
684 | clock.m1 = 12; |
685 | clock.m2 = 9; |
685 | clock.m2 = 9; |
686 | } else { |
686 | } else { |
687 | clock.n = 2; |
687 | clock.n = 2; |
688 | clock.p1 = 1; |
688 | clock.p1 = 1; |
689 | clock.p2 = 10; |
689 | clock.p2 = 10; |
690 | clock.m1 = 14; |
690 | clock.m1 = 14; |
691 | clock.m2 = 8; |
691 | clock.m2 = 8; |
692 | } |
692 | } |
693 | intel_clock(dev, refclk, &clock); |
693 | intel_clock(dev, refclk, &clock); |
694 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
694 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
695 | return true; |
695 | return true; |
696 | } |
696 | } |
697 | 697 | ||
698 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
698 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
699 | static bool |
699 | static bool |
700 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
700 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
701 | int target, int refclk, intel_clock_t *best_clock) |
701 | int target, int refclk, intel_clock_t *best_clock) |
702 | { |
702 | { |
703 | intel_clock_t clock; |
703 | intel_clock_t clock; |
704 | if (target < 200000) { |
704 | if (target < 200000) { |
705 | clock.p1 = 2; |
705 | clock.p1 = 2; |
706 | clock.p2 = 10; |
706 | clock.p2 = 10; |
707 | clock.n = 2; |
707 | clock.n = 2; |
708 | clock.m1 = 23; |
708 | clock.m1 = 23; |
709 | clock.m2 = 8; |
709 | clock.m2 = 8; |
710 | } else { |
710 | } else { |
711 | clock.p1 = 1; |
711 | clock.p1 = 1; |
712 | clock.p2 = 10; |
712 | clock.p2 = 10; |
713 | clock.n = 1; |
713 | clock.n = 1; |
714 | clock.m1 = 14; |
714 | clock.m1 = 14; |
715 | clock.m2 = 2; |
715 | clock.m2 = 2; |
716 | } |
716 | } |
717 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
717 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); |
718 | clock.p = (clock.p1 * clock.p2); |
718 | clock.p = (clock.p1 * clock.p2); |
719 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
719 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; |
720 | clock.vco = 0; |
720 | clock.vco = 0; |
721 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
721 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); |
722 | return true; |
722 | return true; |
723 | } |
723 | } |
724 | 724 | ||
725 | /** |
725 | /** |
726 | * intel_wait_for_vblank - wait for vblank on a given pipe |
726 | * intel_wait_for_vblank - wait for vblank on a given pipe |
727 | * @dev: drm device |
727 | * @dev: drm device |
728 | * @pipe: pipe to wait for |
728 | * @pipe: pipe to wait for |
729 | * |
729 | * |
730 | * Wait for vblank to occur on a given pipe. Needed for various bits of |
730 | * Wait for vblank to occur on a given pipe. Needed for various bits of |
731 | * mode setting code. |
731 | * mode setting code. |
732 | */ |
732 | */ |
733 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
733 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) |
734 | { |
734 | { |
735 | struct drm_i915_private *dev_priv = dev->dev_private; |
735 | struct drm_i915_private *dev_priv = dev->dev_private; |
736 | int pipestat_reg = PIPESTAT(pipe); |
736 | int pipestat_reg = PIPESTAT(pipe); |
737 | 737 | ||
738 | /* Clear existing vblank status. Note this will clear any other |
738 | /* Clear existing vblank status. Note this will clear any other |
739 | * sticky status fields as well. |
739 | * sticky status fields as well. |
740 | * |
740 | * |
741 | * This races with i915_driver_irq_handler() with the result |
741 | * This races with i915_driver_irq_handler() with the result |
742 | * that either function could miss a vblank event. Here it is not |
742 | * that either function could miss a vblank event. Here it is not |
743 | * fatal, as we will either wait upon the next vblank interrupt or |
743 | * fatal, as we will either wait upon the next vblank interrupt or |
744 | * timeout. Generally speaking intel_wait_for_vblank() is only |
744 | * timeout. Generally speaking intel_wait_for_vblank() is only |
745 | * called during modeset at which time the GPU should be idle and |
745 | * called during modeset at which time the GPU should be idle and |
746 | * should *not* be performing page flips and thus not waiting on |
746 | * should *not* be performing page flips and thus not waiting on |
747 | * vblanks... |
747 | * vblanks... |
748 | * Currently, the result of us stealing a vblank from the irq |
748 | * Currently, the result of us stealing a vblank from the irq |
749 | * handler is that a single frame will be skipped during swapbuffers. |
749 | * handler is that a single frame will be skipped during swapbuffers. |
750 | */ |
750 | */ |
751 | I915_WRITE(pipestat_reg, |
751 | I915_WRITE(pipestat_reg, |
752 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
752 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); |
753 | 753 | ||
754 | /* Wait for vblank interrupt bit to set */ |
754 | /* Wait for vblank interrupt bit to set */ |
755 | if (wait_for(I915_READ(pipestat_reg) & |
755 | if (wait_for(I915_READ(pipestat_reg) & |
756 | PIPE_VBLANK_INTERRUPT_STATUS, |
756 | PIPE_VBLANK_INTERRUPT_STATUS, |
757 | 50)) |
757 | 50)) |
758 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
758 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
759 | } |
759 | } |
760 | 760 | ||
761 | /* |
761 | /* |
762 | * intel_wait_for_pipe_off - wait for pipe to turn off |
762 | * intel_wait_for_pipe_off - wait for pipe to turn off |
763 | * @dev: drm device |
763 | * @dev: drm device |
764 | * @pipe: pipe to wait for |
764 | * @pipe: pipe to wait for |
765 | * |
765 | * |
766 | * After disabling a pipe, we can't wait for vblank in the usual way, |
766 | * After disabling a pipe, we can't wait for vblank in the usual way, |
767 | * spinning on the vblank interrupt status bit, since we won't actually |
767 | * spinning on the vblank interrupt status bit, since we won't actually |
768 | * see an interrupt when the pipe is disabled. |
768 | * see an interrupt when the pipe is disabled. |
769 | * |
769 | * |
770 | * On Gen4 and above: |
770 | * On Gen4 and above: |
771 | * wait for the pipe register state bit to turn off |
771 | * wait for the pipe register state bit to turn off |
772 | * |
772 | * |
773 | * Otherwise: |
773 | * Otherwise: |
774 | * wait for the display line value to settle (it usually |
774 | * wait for the display line value to settle (it usually |
775 | * ends up stopping at the start of the next frame). |
775 | * ends up stopping at the start of the next frame). |
776 | * |
776 | * |
777 | */ |
777 | */ |
778 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
778 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
779 | { |
779 | { |
780 | struct drm_i915_private *dev_priv = dev->dev_private; |
780 | struct drm_i915_private *dev_priv = dev->dev_private; |
781 | 781 | ||
782 | if (INTEL_INFO(dev)->gen >= 4) { |
782 | if (INTEL_INFO(dev)->gen >= 4) { |
783 | int reg = PIPECONF(pipe); |
783 | int reg = PIPECONF(pipe); |
784 | 784 | ||
785 | /* Wait for the Pipe State to go off */ |
785 | /* Wait for the Pipe State to go off */ |
786 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
786 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
787 | 100)) |
787 | 100)) |
788 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
788 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
789 | } else { |
789 | } else { |
790 | u32 last_line; |
790 | u32 last_line; |
791 | int reg = PIPEDSL(pipe); |
791 | int reg = PIPEDSL(pipe); |
792 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
792 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
793 | 793 | ||
794 | /* Wait for the display line to settle */ |
794 | /* Wait for the display line to settle */ |
795 | do { |
795 | do { |
796 | last_line = I915_READ(reg) & DSL_LINEMASK; |
796 | last_line = I915_READ(reg) & DSL_LINEMASK; |
797 | mdelay(5); |
797 | mdelay(5); |
798 | } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && |
798 | } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && |
799 | time_after(timeout, jiffies)); |
799 | time_after(timeout, jiffies)); |
800 | if (time_after(jiffies, timeout)) |
800 | if (time_after(jiffies, timeout)) |
801 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
801 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
802 | } |
802 | } |
803 | } |
803 | } |
804 | 804 | ||
805 | static const char *state_string(bool enabled) |
805 | static const char *state_string(bool enabled) |
806 | { |
806 | { |
807 | return enabled ? "on" : "off"; |
807 | return enabled ? "on" : "off"; |
808 | } |
808 | } |
809 | 809 | ||
810 | /* Only for pre-ILK configs */ |
810 | /* Only for pre-ILK configs */ |
811 | static void assert_pll(struct drm_i915_private *dev_priv, |
811 | static void assert_pll(struct drm_i915_private *dev_priv, |
812 | enum pipe pipe, bool state) |
812 | enum pipe pipe, bool state) |
813 | { |
813 | { |
814 | int reg; |
814 | int reg; |
815 | u32 val; |
815 | u32 val; |
816 | bool cur_state; |
816 | bool cur_state; |
817 | 817 | ||
818 | reg = DPLL(pipe); |
818 | reg = DPLL(pipe); |
819 | val = I915_READ(reg); |
819 | val = I915_READ(reg); |
820 | cur_state = !!(val & DPLL_VCO_ENABLE); |
820 | cur_state = !!(val & DPLL_VCO_ENABLE); |
821 | WARN(cur_state != state, |
821 | WARN(cur_state != state, |
822 | "PLL state assertion failure (expected %s, current %s)\n", |
822 | "PLL state assertion failure (expected %s, current %s)\n", |
823 | state_string(state), state_string(cur_state)); |
823 | state_string(state), state_string(cur_state)); |
824 | } |
824 | } |
825 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) |
825 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) |
826 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) |
826 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) |
827 | 827 | ||
828 | /* For ILK+ */ |
828 | /* For ILK+ */ |
829 | static void assert_pch_pll(struct drm_i915_private *dev_priv, |
829 | static void assert_pch_pll(struct drm_i915_private *dev_priv, |
830 | enum pipe pipe, bool state) |
830 | enum pipe pipe, bool state) |
831 | { |
831 | { |
832 | int reg; |
832 | int reg; |
833 | u32 val; |
833 | u32 val; |
834 | bool cur_state; |
834 | bool cur_state; |
835 | 835 | ||
836 | if (HAS_PCH_CPT(dev_priv->dev)) { |
836 | if (HAS_PCH_CPT(dev_priv->dev)) { |
837 | u32 pch_dpll; |
837 | u32 pch_dpll; |
838 | 838 | ||
839 | pch_dpll = I915_READ(PCH_DPLL_SEL); |
839 | pch_dpll = I915_READ(PCH_DPLL_SEL); |
840 | 840 | ||
841 | /* Make sure the selected PLL is enabled to the transcoder */ |
841 | /* Make sure the selected PLL is enabled to the transcoder */ |
842 | WARN(!((pch_dpll >> (4 * pipe)) & 8), |
842 | WARN(!((pch_dpll >> (4 * pipe)) & 8), |
843 | "transcoder %d PLL not enabled\n", pipe); |
843 | "transcoder %d PLL not enabled\n", pipe); |
844 | 844 | ||
845 | /* Convert the transcoder pipe number to a pll pipe number */ |
845 | /* Convert the transcoder pipe number to a pll pipe number */ |
846 | pipe = (pch_dpll >> (4 * pipe)) & 1; |
846 | pipe = (pch_dpll >> (4 * pipe)) & 1; |
847 | } |
847 | } |
848 | 848 | ||
849 | reg = PCH_DPLL(pipe); |
849 | reg = PCH_DPLL(pipe); |
850 | val = I915_READ(reg); |
850 | val = I915_READ(reg); |
851 | cur_state = !!(val & DPLL_VCO_ENABLE); |
851 | cur_state = !!(val & DPLL_VCO_ENABLE); |
852 | WARN(cur_state != state, |
852 | WARN(cur_state != state, |
853 | "PCH PLL state assertion failure (expected %s, current %s)\n", |
853 | "PCH PLL state assertion failure (expected %s, current %s)\n", |
854 | state_string(state), state_string(cur_state)); |
854 | state_string(state), state_string(cur_state)); |
855 | } |
855 | } |
856 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) |
856 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) |
857 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) |
857 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) |
858 | 858 | ||
859 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
859 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
860 | enum pipe pipe, bool state) |
860 | enum pipe pipe, bool state) |
861 | { |
861 | { |
862 | int reg; |
862 | int reg; |
863 | u32 val; |
863 | u32 val; |
864 | bool cur_state; |
864 | bool cur_state; |
865 | 865 | ||
866 | reg = FDI_TX_CTL(pipe); |
866 | reg = FDI_TX_CTL(pipe); |
867 | val = I915_READ(reg); |
867 | val = I915_READ(reg); |
868 | cur_state = !!(val & FDI_TX_ENABLE); |
868 | cur_state = !!(val & FDI_TX_ENABLE); |
869 | WARN(cur_state != state, |
869 | WARN(cur_state != state, |
870 | "FDI TX state assertion failure (expected %s, current %s)\n", |
870 | "FDI TX state assertion failure (expected %s, current %s)\n", |
871 | state_string(state), state_string(cur_state)); |
871 | state_string(state), state_string(cur_state)); |
872 | } |
872 | } |
873 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) |
873 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) |
874 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) |
874 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) |
875 | 875 | ||
876 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
876 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
877 | enum pipe pipe, bool state) |
877 | enum pipe pipe, bool state) |
878 | { |
878 | { |
879 | int reg; |
879 | int reg; |
880 | u32 val; |
880 | u32 val; |
881 | bool cur_state; |
881 | bool cur_state; |
882 | 882 | ||
883 | reg = FDI_RX_CTL(pipe); |
883 | reg = FDI_RX_CTL(pipe); |
884 | val = I915_READ(reg); |
884 | val = I915_READ(reg); |
885 | cur_state = !!(val & FDI_RX_ENABLE); |
885 | cur_state = !!(val & FDI_RX_ENABLE); |
886 | WARN(cur_state != state, |
886 | WARN(cur_state != state, |
887 | "FDI RX state assertion failure (expected %s, current %s)\n", |
887 | "FDI RX state assertion failure (expected %s, current %s)\n", |
888 | state_string(state), state_string(cur_state)); |
888 | state_string(state), state_string(cur_state)); |
889 | } |
889 | } |
890 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) |
890 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) |
891 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) |
891 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) |
892 | 892 | ||
893 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
893 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
894 | enum pipe pipe) |
894 | enum pipe pipe) |
895 | { |
895 | { |
896 | int reg; |
896 | int reg; |
897 | u32 val; |
897 | u32 val; |
898 | 898 | ||
899 | /* ILK FDI PLL is always enabled */ |
899 | /* ILK FDI PLL is always enabled */ |
900 | if (dev_priv->info->gen == 5) |
900 | if (dev_priv->info->gen == 5) |
901 | return; |
901 | return; |
902 | 902 | ||
903 | reg = FDI_TX_CTL(pipe); |
903 | reg = FDI_TX_CTL(pipe); |
904 | val = I915_READ(reg); |
904 | val = I915_READ(reg); |
905 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
905 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
906 | } |
906 | } |
907 | 907 | ||
908 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, |
908 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, |
909 | enum pipe pipe) |
909 | enum pipe pipe) |
910 | { |
910 | { |
911 | int reg; |
911 | int reg; |
912 | u32 val; |
912 | u32 val; |
913 | 913 | ||
914 | reg = FDI_RX_CTL(pipe); |
914 | reg = FDI_RX_CTL(pipe); |
915 | val = I915_READ(reg); |
915 | val = I915_READ(reg); |
916 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); |
916 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); |
917 | } |
917 | } |
918 | 918 | ||
919 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
919 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
920 | enum pipe pipe) |
920 | enum pipe pipe) |
921 | { |
921 | { |
922 | int pp_reg, lvds_reg; |
922 | int pp_reg, lvds_reg; |
923 | u32 val; |
923 | u32 val; |
924 | enum pipe panel_pipe = PIPE_A; |
924 | enum pipe panel_pipe = PIPE_A; |
925 | bool locked = true; |
925 | bool locked = true; |
926 | 926 | ||
927 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
927 | if (HAS_PCH_SPLIT(dev_priv->dev)) { |
928 | pp_reg = PCH_PP_CONTROL; |
928 | pp_reg = PCH_PP_CONTROL; |
929 | lvds_reg = PCH_LVDS; |
929 | lvds_reg = PCH_LVDS; |
930 | } else { |
930 | } else { |
931 | pp_reg = PP_CONTROL; |
931 | pp_reg = PP_CONTROL; |
932 | lvds_reg = LVDS; |
932 | lvds_reg = LVDS; |
933 | } |
933 | } |
934 | 934 | ||
935 | val = I915_READ(pp_reg); |
935 | val = I915_READ(pp_reg); |
936 | if (!(val & PANEL_POWER_ON) || |
936 | if (!(val & PANEL_POWER_ON) || |
937 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) |
937 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) |
938 | locked = false; |
938 | locked = false; |
939 | 939 | ||
940 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) |
940 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) |
941 | panel_pipe = PIPE_B; |
941 | panel_pipe = PIPE_B; |
942 | 942 | ||
943 | WARN(panel_pipe == pipe && locked, |
943 | WARN(panel_pipe == pipe && locked, |
944 | "panel assertion failure, pipe %c regs locked\n", |
944 | "panel assertion failure, pipe %c regs locked\n", |
945 | pipe_name(pipe)); |
945 | pipe_name(pipe)); |
946 | } |
946 | } |
947 | 947 | ||
948 | void assert_pipe(struct drm_i915_private *dev_priv, |
948 | void assert_pipe(struct drm_i915_private *dev_priv, |
949 | enum pipe pipe, bool state) |
949 | enum pipe pipe, bool state) |
950 | { |
950 | { |
951 | int reg; |
951 | int reg; |
952 | u32 val; |
952 | u32 val; |
953 | bool cur_state; |
953 | bool cur_state; |
954 | 954 | ||
955 | reg = PIPECONF(pipe); |
955 | reg = PIPECONF(pipe); |
956 | val = I915_READ(reg); |
956 | val = I915_READ(reg); |
957 | cur_state = !!(val & PIPECONF_ENABLE); |
957 | cur_state = !!(val & PIPECONF_ENABLE); |
958 | WARN(cur_state != state, |
958 | WARN(cur_state != state, |
959 | "pipe %c assertion failure (expected %s, current %s)\n", |
959 | "pipe %c assertion failure (expected %s, current %s)\n", |
960 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
960 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
961 | } |
961 | } |
962 | 962 | ||
963 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, |
963 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, |
964 | enum plane plane) |
964 | enum plane plane) |
965 | { |
965 | { |
966 | int reg; |
966 | int reg; |
967 | u32 val; |
967 | u32 val; |
968 | 968 | ||
969 | reg = DSPCNTR(plane); |
969 | reg = DSPCNTR(plane); |
970 | val = I915_READ(reg); |
970 | val = I915_READ(reg); |
971 | WARN(!(val & DISPLAY_PLANE_ENABLE), |
971 | WARN(!(val & DISPLAY_PLANE_ENABLE), |
972 | "plane %c assertion failure, should be active but is disabled\n", |
972 | "plane %c assertion failure, should be active but is disabled\n", |
973 | plane_name(plane)); |
973 | plane_name(plane)); |
974 | } |
974 | } |
975 | 975 | ||
976 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
976 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, |
977 | enum pipe pipe) |
977 | enum pipe pipe) |
978 | { |
978 | { |
979 | int reg, i; |
979 | int reg, i; |
980 | u32 val; |
980 | u32 val; |
981 | int cur_pipe; |
981 | int cur_pipe; |
982 | 982 | ||
983 | /* Planes are fixed to pipes on ILK+ */ |
983 | /* Planes are fixed to pipes on ILK+ */ |
984 | if (HAS_PCH_SPLIT(dev_priv->dev)) |
984 | if (HAS_PCH_SPLIT(dev_priv->dev)) |
985 | return; |
985 | return; |
986 | 986 | ||
987 | /* Need to check both planes against the pipe */ |
987 | /* Need to check both planes against the pipe */ |
988 | for (i = 0; i < 2; i++) { |
988 | for (i = 0; i < 2; i++) { |
989 | reg = DSPCNTR(i); |
989 | reg = DSPCNTR(i); |
990 | val = I915_READ(reg); |
990 | val = I915_READ(reg); |
991 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
991 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
992 | DISPPLANE_SEL_PIPE_SHIFT; |
992 | DISPPLANE_SEL_PIPE_SHIFT; |
993 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
993 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
994 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
994 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
995 | plane_name(i), pipe_name(pipe)); |
995 | plane_name(i), pipe_name(pipe)); |
996 | } |
996 | } |
997 | } |
997 | } |
998 | 998 | ||
999 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
999 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
1000 | { |
1000 | { |
1001 | u32 val; |
1001 | u32 val; |
1002 | bool enabled; |
1002 | bool enabled; |
1003 | 1003 | ||
1004 | val = I915_READ(PCH_DREF_CONTROL); |
1004 | val = I915_READ(PCH_DREF_CONTROL); |
1005 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1005 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1006 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1006 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1007 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1007 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1008 | } |
1008 | } |
1009 | 1009 | ||
1010 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, |
1010 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, |
1011 | enum pipe pipe) |
1011 | enum pipe pipe) |
1012 | { |
1012 | { |
1013 | int reg; |
1013 | int reg; |
1014 | u32 val; |
1014 | u32 val; |
1015 | bool enabled; |
1015 | bool enabled; |
1016 | 1016 | ||
1017 | reg = TRANSCONF(pipe); |
1017 | reg = TRANSCONF(pipe); |
1018 | val = I915_READ(reg); |
1018 | val = I915_READ(reg); |
1019 | enabled = !!(val & TRANS_ENABLE); |
1019 | enabled = !!(val & TRANS_ENABLE); |
1020 | WARN(enabled, |
1020 | WARN(enabled, |
1021 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1021 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1022 | pipe_name(pipe)); |
1022 | pipe_name(pipe)); |
1023 | } |
1023 | } |
1024 | 1024 | ||
1025 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
1025 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
1026 | enum pipe pipe, u32 port_sel, u32 val) |
1026 | enum pipe pipe, u32 port_sel, u32 val) |
1027 | { |
1027 | { |
1028 | if ((val & DP_PORT_EN) == 0) |
1028 | if ((val & DP_PORT_EN) == 0) |
1029 | return false; |
1029 | return false; |
1030 | 1030 | ||
1031 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1031 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1032 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); |
1032 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); |
1033 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); |
1033 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); |
1034 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
1034 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) |
1035 | return false; |
1035 | return false; |
1036 | } else { |
1036 | } else { |
1037 | if ((val & DP_PIPE_MASK) != (pipe << 30)) |
1037 | if ((val & DP_PIPE_MASK) != (pipe << 30)) |
1038 | return false; |
1038 | return false; |
1039 | } |
1039 | } |
1040 | return true; |
1040 | return true; |
1041 | } |
1041 | } |
1042 | 1042 | ||
1043 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1043 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1044 | enum pipe pipe, u32 val) |
1044 | enum pipe pipe, u32 val) |
1045 | { |
1045 | { |
1046 | if ((val & PORT_ENABLE) == 0) |
1046 | if ((val & PORT_ENABLE) == 0) |
1047 | return false; |
1047 | return false; |
1048 | 1048 | ||
1049 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1049 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1050 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1050 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1051 | return false; |
1051 | return false; |
1052 | } else { |
1052 | } else { |
1053 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) |
1053 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) |
1054 | return false; |
1054 | return false; |
1055 | } |
1055 | } |
1056 | return true; |
1056 | return true; |
1057 | } |
1057 | } |
1058 | 1058 | ||
1059 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, |
1059 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, |
1060 | enum pipe pipe, u32 val) |
1060 | enum pipe pipe, u32 val) |
1061 | { |
1061 | { |
1062 | if ((val & LVDS_PORT_EN) == 0) |
1062 | if ((val & LVDS_PORT_EN) == 0) |
1063 | return false; |
1063 | return false; |
1064 | 1064 | ||
1065 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1065 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1066 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1066 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1067 | return false; |
1067 | return false; |
1068 | } else { |
1068 | } else { |
1069 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) |
1069 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) |
1070 | return false; |
1070 | return false; |
1071 | } |
1071 | } |
1072 | return true; |
1072 | return true; |
1073 | } |
1073 | } |
1074 | 1074 | ||
1075 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, |
1075 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, |
1076 | enum pipe pipe, u32 val) |
1076 | enum pipe pipe, u32 val) |
1077 | { |
1077 | { |
1078 | if ((val & ADPA_DAC_ENABLE) == 0) |
1078 | if ((val & ADPA_DAC_ENABLE) == 0) |
1079 | return false; |
1079 | return false; |
1080 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1080 | if (HAS_PCH_CPT(dev_priv->dev)) { |
1081 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1081 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) |
1082 | return false; |
1082 | return false; |
1083 | } else { |
1083 | } else { |
1084 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) |
1084 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) |
1085 | return false; |
1085 | return false; |
1086 | } |
1086 | } |
1087 | return true; |
1087 | return true; |
1088 | } |
1088 | } |
1089 | 1089 | ||
1090 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1090 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
1091 | enum pipe pipe, int reg, u32 port_sel) |
1091 | enum pipe pipe, int reg, u32 port_sel) |
1092 | { |
1092 | { |
1093 | u32 val = I915_READ(reg); |
1093 | u32 val = I915_READ(reg); |
1094 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1094 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1095 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1095 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1096 | reg, pipe_name(pipe)); |
1096 | reg, pipe_name(pipe)); |
1097 | } |
1097 | } |
1098 | 1098 | ||
1099 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
1099 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, |
1100 | enum pipe pipe, int reg) |
1100 | enum pipe pipe, int reg) |
1101 | { |
1101 | { |
1102 | u32 val = I915_READ(reg); |
1102 | u32 val = I915_READ(reg); |
1103 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
1103 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
1104 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1104 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1105 | reg, pipe_name(pipe)); |
1105 | reg, pipe_name(pipe)); |
1106 | } |
1106 | } |
1107 | 1107 | ||
1108 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1108 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1109 | enum pipe pipe) |
1109 | enum pipe pipe) |
1110 | { |
1110 | { |
1111 | int reg; |
1111 | int reg; |
1112 | u32 val; |
1112 | u32 val; |
1113 | 1113 | ||
1114 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1114 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1115 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1115 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1116 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1116 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1117 | 1117 | ||
1118 | reg = PCH_ADPA; |
1118 | reg = PCH_ADPA; |
1119 | val = I915_READ(reg); |
1119 | val = I915_READ(reg); |
1120 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
1120 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
1121 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1121 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1122 | pipe_name(pipe)); |
1122 | pipe_name(pipe)); |
1123 | 1123 | ||
1124 | reg = PCH_LVDS; |
1124 | reg = PCH_LVDS; |
1125 | val = I915_READ(reg); |
1125 | val = I915_READ(reg); |
1126 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
1126 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
1127 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1127 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1128 | pipe_name(pipe)); |
1128 | pipe_name(pipe)); |
1129 | 1129 | ||
1130 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); |
1130 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); |
1131 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); |
1131 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); |
1132 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); |
1132 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); |
1133 | } |
1133 | } |
1134 | 1134 | ||
1135 | /** |
1135 | /** |
1136 | * intel_enable_pll - enable a PLL |
1136 | * intel_enable_pll - enable a PLL |
1137 | * @dev_priv: i915 private structure |
1137 | * @dev_priv: i915 private structure |
1138 | * @pipe: pipe PLL to enable |
1138 | * @pipe: pipe PLL to enable |
1139 | * |
1139 | * |
1140 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to |
1140 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to |
1141 | * make sure the PLL reg is writable first though, since the panel write |
1141 | * make sure the PLL reg is writable first though, since the panel write |
1142 | * protect mechanism may be enabled. |
1142 | * protect mechanism may be enabled. |
1143 | * |
1143 | * |
1144 | * Note! This is for pre-ILK only. |
1144 | * Note! This is for pre-ILK only. |
1145 | */ |
1145 | */ |
1146 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1146 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1147 | { |
1147 | { |
1148 | int reg; |
1148 | int reg; |
1149 | u32 val; |
1149 | u32 val; |
1150 | 1150 | ||
1151 | /* No really, not for ILK+ */ |
1151 | /* No really, not for ILK+ */ |
1152 | BUG_ON(dev_priv->info->gen >= 5); |
1152 | BUG_ON(dev_priv->info->gen >= 5); |
1153 | 1153 | ||
1154 | /* PLL is protected by panel, make sure we can write it */ |
1154 | /* PLL is protected by panel, make sure we can write it */ |
1155 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) |
1155 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) |
1156 | assert_panel_unlocked(dev_priv, pipe); |
1156 | assert_panel_unlocked(dev_priv, pipe); |
1157 | 1157 | ||
1158 | reg = DPLL(pipe); |
1158 | reg = DPLL(pipe); |
1159 | val = I915_READ(reg); |
1159 | val = I915_READ(reg); |
1160 | val |= DPLL_VCO_ENABLE; |
1160 | val |= DPLL_VCO_ENABLE; |
1161 | 1161 | ||
1162 | /* We do this three times for luck */ |
1162 | /* We do this three times for luck */ |
1163 | I915_WRITE(reg, val); |
1163 | I915_WRITE(reg, val); |
1164 | POSTING_READ(reg); |
1164 | POSTING_READ(reg); |
1165 | udelay(150); /* wait for warmup */ |
1165 | udelay(150); /* wait for warmup */ |
1166 | I915_WRITE(reg, val); |
1166 | I915_WRITE(reg, val); |
1167 | POSTING_READ(reg); |
1167 | POSTING_READ(reg); |
1168 | udelay(150); /* wait for warmup */ |
1168 | udelay(150); /* wait for warmup */ |
1169 | I915_WRITE(reg, val); |
1169 | I915_WRITE(reg, val); |
1170 | POSTING_READ(reg); |
1170 | POSTING_READ(reg); |
1171 | udelay(150); /* wait for warmup */ |
1171 | udelay(150); /* wait for warmup */ |
1172 | } |
1172 | } |
1173 | 1173 | ||
1174 | /** |
1174 | /** |
1175 | * intel_disable_pll - disable a PLL |
1175 | * intel_disable_pll - disable a PLL |
1176 | * @dev_priv: i915 private structure |
1176 | * @dev_priv: i915 private structure |
1177 | * @pipe: pipe PLL to disable |
1177 | * @pipe: pipe PLL to disable |
1178 | * |
1178 | * |
1179 | * Disable the PLL for @pipe, making sure the pipe is off first. |
1179 | * Disable the PLL for @pipe, making sure the pipe is off first. |
1180 | * |
1180 | * |
1181 | * Note! This is for pre-ILK only. |
1181 | * Note! This is for pre-ILK only. |
1182 | */ |
1182 | */ |
1183 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1183 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) |
1184 | { |
1184 | { |
1185 | int reg; |
1185 | int reg; |
1186 | u32 val; |
1186 | u32 val; |
1187 | 1187 | ||
1188 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1188 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1189 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1189 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1190 | return; |
1190 | return; |
1191 | 1191 | ||
1192 | /* Make sure the pipe isn't still relying on us */ |
1192 | /* Make sure the pipe isn't still relying on us */ |
1193 | assert_pipe_disabled(dev_priv, pipe); |
1193 | assert_pipe_disabled(dev_priv, pipe); |
1194 | 1194 | ||
1195 | reg = DPLL(pipe); |
1195 | reg = DPLL(pipe); |
1196 | val = I915_READ(reg); |
1196 | val = I915_READ(reg); |
1197 | val &= ~DPLL_VCO_ENABLE; |
1197 | val &= ~DPLL_VCO_ENABLE; |
1198 | I915_WRITE(reg, val); |
1198 | I915_WRITE(reg, val); |
1199 | POSTING_READ(reg); |
1199 | POSTING_READ(reg); |
1200 | } |
1200 | } |
1201 | 1201 | ||
1202 | /** |
1202 | /** |
1203 | * intel_enable_pch_pll - enable PCH PLL |
1203 | * intel_enable_pch_pll - enable PCH PLL |
1204 | * @dev_priv: i915 private structure |
1204 | * @dev_priv: i915 private structure |
1205 | * @pipe: pipe PLL to enable |
1205 | * @pipe: pipe PLL to enable |
1206 | * |
1206 | * |
1207 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1207 | * The PCH PLL needs to be enabled before the PCH transcoder, since it |
1208 | * drives the transcoder clock. |
1208 | * drives the transcoder clock. |
1209 | */ |
1209 | */ |
1210 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, |
1210 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, |
1211 | enum pipe pipe) |
1211 | enum pipe pipe) |
1212 | { |
1212 | { |
1213 | int reg; |
1213 | int reg; |
1214 | u32 val; |
1214 | u32 val; |
1215 | 1215 | ||
1216 | if (pipe > 1) |
1216 | if (pipe > 1) |
1217 | return; |
1217 | return; |
1218 | 1218 | ||
1219 | /* PCH only available on ILK+ */ |
1219 | /* PCH only available on ILK+ */ |
1220 | BUG_ON(dev_priv->info->gen < 5); |
1220 | BUG_ON(dev_priv->info->gen < 5); |
1221 | 1221 | ||
1222 | /* PCH refclock must be enabled first */ |
1222 | /* PCH refclock must be enabled first */ |
1223 | assert_pch_refclk_enabled(dev_priv); |
1223 | assert_pch_refclk_enabled(dev_priv); |
1224 | 1224 | ||
1225 | reg = PCH_DPLL(pipe); |
1225 | reg = PCH_DPLL(pipe); |
1226 | val = I915_READ(reg); |
1226 | val = I915_READ(reg); |
1227 | val |= DPLL_VCO_ENABLE; |
1227 | val |= DPLL_VCO_ENABLE; |
1228 | I915_WRITE(reg, val); |
1228 | I915_WRITE(reg, val); |
1229 | POSTING_READ(reg); |
1229 | POSTING_READ(reg); |
1230 | udelay(200); |
1230 | udelay(200); |
1231 | } |
1231 | } |
1232 | 1232 | ||
1233 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, |
1233 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, |
1234 | enum pipe pipe) |
1234 | enum pipe pipe) |
1235 | { |
1235 | { |
1236 | int reg; |
1236 | int reg; |
1237 | u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, |
1237 | u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL, |
1238 | pll_sel = TRANSC_DPLL_ENABLE; |
1238 | pll_sel = TRANSC_DPLL_ENABLE; |
1239 | 1239 | ||
1240 | if (pipe > 1) |
1240 | if (pipe > 1) |
1241 | return; |
1241 | return; |
1242 | 1242 | ||
1243 | /* PCH only available on ILK+ */ |
1243 | /* PCH only available on ILK+ */ |
1244 | BUG_ON(dev_priv->info->gen < 5); |
1244 | BUG_ON(dev_priv->info->gen < 5); |
1245 | 1245 | ||
1246 | /* Make sure transcoder isn't still depending on us */ |
1246 | /* Make sure transcoder isn't still depending on us */ |
1247 | assert_transcoder_disabled(dev_priv, pipe); |
1247 | assert_transcoder_disabled(dev_priv, pipe); |
1248 | 1248 | ||
1249 | if (pipe == 0) |
1249 | if (pipe == 0) |
1250 | pll_sel |= TRANSC_DPLLA_SEL; |
1250 | pll_sel |= TRANSC_DPLLA_SEL; |
1251 | else if (pipe == 1) |
1251 | else if (pipe == 1) |
1252 | pll_sel |= TRANSC_DPLLB_SEL; |
1252 | pll_sel |= TRANSC_DPLLB_SEL; |
1253 | 1253 | ||
1254 | 1254 | ||
1255 | if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) |
1255 | if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel) |
1256 | return; |
1256 | return; |
1257 | 1257 | ||
1258 | reg = PCH_DPLL(pipe); |
1258 | reg = PCH_DPLL(pipe); |
1259 | val = I915_READ(reg); |
1259 | val = I915_READ(reg); |
1260 | val &= ~DPLL_VCO_ENABLE; |
1260 | val &= ~DPLL_VCO_ENABLE; |
1261 | I915_WRITE(reg, val); |
1261 | I915_WRITE(reg, val); |
1262 | POSTING_READ(reg); |
1262 | POSTING_READ(reg); |
1263 | udelay(200); |
1263 | udelay(200); |
1264 | } |
1264 | } |
1265 | 1265 | ||
1266 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, |
1266 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, |
1267 | enum pipe pipe) |
1267 | enum pipe pipe) |
1268 | { |
1268 | { |
1269 | int reg; |
1269 | int reg; |
1270 | u32 val; |
1270 | u32 val; |
1271 | 1271 | ||
1272 | /* PCH only available on ILK+ */ |
1272 | /* PCH only available on ILK+ */ |
1273 | BUG_ON(dev_priv->info->gen < 5); |
1273 | BUG_ON(dev_priv->info->gen < 5); |
1274 | 1274 | ||
1275 | /* Make sure PCH DPLL is enabled */ |
1275 | /* Make sure PCH DPLL is enabled */ |
1276 | assert_pch_pll_enabled(dev_priv, pipe); |
1276 | assert_pch_pll_enabled(dev_priv, pipe); |
1277 | 1277 | ||
1278 | /* FDI must be feeding us bits for PCH ports */ |
1278 | /* FDI must be feeding us bits for PCH ports */ |
1279 | assert_fdi_tx_enabled(dev_priv, pipe); |
1279 | assert_fdi_tx_enabled(dev_priv, pipe); |
1280 | assert_fdi_rx_enabled(dev_priv, pipe); |
1280 | assert_fdi_rx_enabled(dev_priv, pipe); |
1281 | 1281 | ||
1282 | reg = TRANSCONF(pipe); |
1282 | reg = TRANSCONF(pipe); |
1283 | val = I915_READ(reg); |
1283 | val = I915_READ(reg); |
1284 | 1284 | ||
1285 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1285 | if (HAS_PCH_IBX(dev_priv->dev)) { |
1286 | /* |
1286 | /* |
1287 | * make the BPC in transcoder be consistent with |
1287 | * make the BPC in transcoder be consistent with |
1288 | * that in pipeconf reg. |
1288 | * that in pipeconf reg. |
1289 | */ |
1289 | */ |
1290 | val &= ~PIPE_BPC_MASK; |
1290 | val &= ~PIPE_BPC_MASK; |
1291 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; |
1291 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; |
1292 | } |
1292 | } |
1293 | I915_WRITE(reg, val | TRANS_ENABLE); |
1293 | I915_WRITE(reg, val | TRANS_ENABLE); |
1294 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1294 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
1295 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
1295 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
1296 | } |
1296 | } |
1297 | 1297 | ||
1298 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, |
1298 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, |
1299 | enum pipe pipe) |
1299 | enum pipe pipe) |
1300 | { |
1300 | { |
1301 | int reg; |
1301 | int reg; |
1302 | u32 val; |
1302 | u32 val; |
1303 | 1303 | ||
1304 | /* FDI relies on the transcoder */ |
1304 | /* FDI relies on the transcoder */ |
1305 | assert_fdi_tx_disabled(dev_priv, pipe); |
1305 | assert_fdi_tx_disabled(dev_priv, pipe); |
1306 | assert_fdi_rx_disabled(dev_priv, pipe); |
1306 | assert_fdi_rx_disabled(dev_priv, pipe); |
1307 | 1307 | ||
1308 | /* Ports must be off as well */ |
1308 | /* Ports must be off as well */ |
1309 | assert_pch_ports_disabled(dev_priv, pipe); |
1309 | assert_pch_ports_disabled(dev_priv, pipe); |
1310 | 1310 | ||
1311 | reg = TRANSCONF(pipe); |
1311 | reg = TRANSCONF(pipe); |
1312 | val = I915_READ(reg); |
1312 | val = I915_READ(reg); |
1313 | val &= ~TRANS_ENABLE; |
1313 | val &= ~TRANS_ENABLE; |
1314 | I915_WRITE(reg, val); |
1314 | I915_WRITE(reg, val); |
1315 | /* wait for PCH transcoder off, transcoder state */ |
1315 | /* wait for PCH transcoder off, transcoder state */ |
1316 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1316 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1317 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1317 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1318 | } |
1318 | } |
1319 | 1319 | ||
1320 | /** |
1320 | /** |
1321 | * intel_enable_pipe - enable a pipe, asserting requirements |
1321 | * intel_enable_pipe - enable a pipe, asserting requirements |
1322 | * @dev_priv: i915 private structure |
1322 | * @dev_priv: i915 private structure |
1323 | * @pipe: pipe to enable |
1323 | * @pipe: pipe to enable |
1324 | * @pch_port: on ILK+, is this pipe driving a PCH port or not |
1324 | * @pch_port: on ILK+, is this pipe driving a PCH port or not |
1325 | * |
1325 | * |
1326 | * Enable @pipe, making sure that various hardware specific requirements |
1326 | * Enable @pipe, making sure that various hardware specific requirements |
1327 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. |
1327 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. |
1328 | * |
1328 | * |
1329 | * @pipe should be %PIPE_A or %PIPE_B. |
1329 | * @pipe should be %PIPE_A or %PIPE_B. |
1330 | * |
1330 | * |
1331 | * Will wait until the pipe is actually running (i.e. first vblank) before |
1331 | * Will wait until the pipe is actually running (i.e. first vblank) before |
1332 | * returning. |
1332 | * returning. |
1333 | */ |
1333 | */ |
1334 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1334 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1335 | bool pch_port) |
1335 | bool pch_port) |
1336 | { |
1336 | { |
1337 | int reg; |
1337 | int reg; |
1338 | u32 val; |
1338 | u32 val; |
1339 | 1339 | ||
1340 | /* |
1340 | /* |
1341 | * A pipe without a PLL won't actually be able to drive bits from |
1341 | * A pipe without a PLL won't actually be able to drive bits from |
1342 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1342 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
1343 | * need the check. |
1343 | * need the check. |
1344 | */ |
1344 | */ |
1345 | if (!HAS_PCH_SPLIT(dev_priv->dev)) |
1345 | if (!HAS_PCH_SPLIT(dev_priv->dev)) |
1346 | assert_pll_enabled(dev_priv, pipe); |
1346 | assert_pll_enabled(dev_priv, pipe); |
1347 | else { |
1347 | else { |
1348 | if (pch_port) { |
1348 | if (pch_port) { |
1349 | /* if driving the PCH, we need FDI enabled */ |
1349 | /* if driving the PCH, we need FDI enabled */ |
1350 | assert_fdi_rx_pll_enabled(dev_priv, pipe); |
1350 | assert_fdi_rx_pll_enabled(dev_priv, pipe); |
1351 | assert_fdi_tx_pll_enabled(dev_priv, pipe); |
1351 | assert_fdi_tx_pll_enabled(dev_priv, pipe); |
1352 | } |
1352 | } |
1353 | /* FIXME: assert CPU port conditions for SNB+ */ |
1353 | /* FIXME: assert CPU port conditions for SNB+ */ |
1354 | } |
1354 | } |
1355 | 1355 | ||
1356 | reg = PIPECONF(pipe); |
1356 | reg = PIPECONF(pipe); |
1357 | val = I915_READ(reg); |
1357 | val = I915_READ(reg); |
1358 | if (val & PIPECONF_ENABLE) |
1358 | if (val & PIPECONF_ENABLE) |
1359 | return; |
1359 | return; |
1360 | 1360 | ||
1361 | I915_WRITE(reg, val | PIPECONF_ENABLE); |
1361 | I915_WRITE(reg, val | PIPECONF_ENABLE); |
1362 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1362 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1363 | } |
1363 | } |
1364 | 1364 | ||
1365 | /** |
1365 | /** |
1366 | * intel_disable_pipe - disable a pipe, asserting requirements |
1366 | * intel_disable_pipe - disable a pipe, asserting requirements |
1367 | * @dev_priv: i915 private structure |
1367 | * @dev_priv: i915 private structure |
1368 | * @pipe: pipe to disable |
1368 | * @pipe: pipe to disable |
1369 | * |
1369 | * |
1370 | * Disable @pipe, making sure that various hardware specific requirements |
1370 | * Disable @pipe, making sure that various hardware specific requirements |
1371 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. |
1371 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. |
1372 | * |
1372 | * |
1373 | * @pipe should be %PIPE_A or %PIPE_B. |
1373 | * @pipe should be %PIPE_A or %PIPE_B. |
1374 | * |
1374 | * |
1375 | * Will wait until the pipe has shut down before returning. |
1375 | * Will wait until the pipe has shut down before returning. |
1376 | */ |
1376 | */ |
1377 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1377 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, |
1378 | enum pipe pipe) |
1378 | enum pipe pipe) |
1379 | { |
1379 | { |
1380 | int reg; |
1380 | int reg; |
1381 | u32 val; |
1381 | u32 val; |
1382 | 1382 | ||
1383 | /* |
1383 | /* |
1384 | * Make sure planes won't keep trying to pump pixels to us, |
1384 | * Make sure planes won't keep trying to pump pixels to us, |
1385 | * or we might hang the display. |
1385 | * or we might hang the display. |
1386 | */ |
1386 | */ |
1387 | assert_planes_disabled(dev_priv, pipe); |
1387 | assert_planes_disabled(dev_priv, pipe); |
1388 | 1388 | ||
1389 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1389 | /* Don't disable pipe A or pipe A PLLs if needed */ |
1390 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1390 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) |
1391 | return; |
1391 | return; |
1392 | 1392 | ||
1393 | reg = PIPECONF(pipe); |
1393 | reg = PIPECONF(pipe); |
1394 | val = I915_READ(reg); |
1394 | val = I915_READ(reg); |
1395 | if ((val & PIPECONF_ENABLE) == 0) |
1395 | if ((val & PIPECONF_ENABLE) == 0) |
1396 | return; |
1396 | return; |
1397 | 1397 | ||
1398 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); |
1398 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); |
1399 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1399 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1400 | } |
1400 | } |
1401 | 1401 | ||
1402 | /* |
1402 | /* |
1403 | * Plane regs are double buffered, going from enabled->disabled needs a |
1403 | * Plane regs are double buffered, going from enabled->disabled needs a |
1404 | * trigger in order to latch. The display address reg provides this. |
1404 | * trigger in order to latch. The display address reg provides this. |
1405 | */ |
1405 | */ |
1406 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
1406 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, |
1407 | enum plane plane) |
1407 | enum plane plane) |
1408 | { |
1408 | { |
1409 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
1409 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); |
1410 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
1410 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); |
1411 | } |
1411 | } |
1412 | 1412 | ||
1413 | /** |
1413 | /** |
1414 | * intel_enable_plane - enable a display plane on a given pipe |
1414 | * intel_enable_plane - enable a display plane on a given pipe |
1415 | * @dev_priv: i915 private structure |
1415 | * @dev_priv: i915 private structure |
1416 | * @plane: plane to enable |
1416 | * @plane: plane to enable |
1417 | * @pipe: pipe being fed |
1417 | * @pipe: pipe being fed |
1418 | * |
1418 | * |
1419 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1419 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1420 | */ |
1420 | */ |
1421 | static void intel_enable_plane(struct drm_i915_private *dev_priv, |
1421 | static void intel_enable_plane(struct drm_i915_private *dev_priv, |
1422 | enum plane plane, enum pipe pipe) |
1422 | enum plane plane, enum pipe pipe) |
1423 | { |
1423 | { |
1424 | int reg; |
1424 | int reg; |
1425 | u32 val; |
1425 | u32 val; |
1426 | 1426 | ||
1427 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1427 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1428 | assert_pipe_enabled(dev_priv, pipe); |
1428 | assert_pipe_enabled(dev_priv, pipe); |
1429 | 1429 | ||
1430 | reg = DSPCNTR(plane); |
1430 | reg = DSPCNTR(plane); |
1431 | val = I915_READ(reg); |
1431 | val = I915_READ(reg); |
1432 | if (val & DISPLAY_PLANE_ENABLE) |
1432 | if (val & DISPLAY_PLANE_ENABLE) |
1433 | return; |
1433 | return; |
1434 | 1434 | ||
1435 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1435 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1436 | intel_flush_display_plane(dev_priv, plane); |
1436 | intel_flush_display_plane(dev_priv, plane); |
1437 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1437 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1438 | } |
1438 | } |
1439 | 1439 | ||
1440 | /** |
1440 | /** |
1441 | * intel_disable_plane - disable a display plane |
1441 | * intel_disable_plane - disable a display plane |
1442 | * @dev_priv: i915 private structure |
1442 | * @dev_priv: i915 private structure |
1443 | * @plane: plane to disable |
1443 | * @plane: plane to disable |
1444 | * @pipe: pipe consuming the data |
1444 | * @pipe: pipe consuming the data |
1445 | * |
1445 | * |
1446 | * Disable @plane; should be an independent operation. |
1446 | * Disable @plane; should be an independent operation. |
1447 | */ |
1447 | */ |
1448 | static void intel_disable_plane(struct drm_i915_private *dev_priv, |
1448 | static void intel_disable_plane(struct drm_i915_private *dev_priv, |
1449 | enum plane plane, enum pipe pipe) |
1449 | enum plane plane, enum pipe pipe) |
1450 | { |
1450 | { |
1451 | int reg; |
1451 | int reg; |
1452 | u32 val; |
1452 | u32 val; |
1453 | 1453 | ||
1454 | reg = DSPCNTR(plane); |
1454 | reg = DSPCNTR(plane); |
1455 | val = I915_READ(reg); |
1455 | val = I915_READ(reg); |
1456 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1456 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1457 | return; |
1457 | return; |
1458 | 1458 | ||
1459 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1459 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1460 | intel_flush_display_plane(dev_priv, plane); |
1460 | intel_flush_display_plane(dev_priv, plane); |
1461 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1461 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1462 | } |
1462 | } |
1463 | 1463 | ||
1464 | static void disable_pch_dp(struct drm_i915_private *dev_priv, |
1464 | static void disable_pch_dp(struct drm_i915_private *dev_priv, |
1465 | enum pipe pipe, int reg, u32 port_sel) |
1465 | enum pipe pipe, int reg, u32 port_sel) |
1466 | { |
1466 | { |
1467 | u32 val = I915_READ(reg); |
1467 | u32 val = I915_READ(reg); |
1468 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
1468 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
1469 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); |
1469 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); |
1470 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1470 | I915_WRITE(reg, val & ~DP_PORT_EN); |
1471 | } |
1471 | } |
1472 | } |
1472 | } |
1473 | 1473 | ||
1474 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, |
1474 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, |
1475 | enum pipe pipe, int reg) |
1475 | enum pipe pipe, int reg) |
1476 | { |
1476 | { |
1477 | u32 val = I915_READ(reg); |
1477 | u32 val = I915_READ(reg); |
1478 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
1478 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
1479 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1479 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1480 | reg, pipe); |
1480 | reg, pipe); |
1481 | I915_WRITE(reg, val & ~PORT_ENABLE); |
1481 | I915_WRITE(reg, val & ~PORT_ENABLE); |
1482 | } |
1482 | } |
1483 | } |
1483 | } |
1484 | 1484 | ||
1485 | /* Disable any ports connected to this transcoder */ |
1485 | /* Disable any ports connected to this transcoder */ |
1486 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, |
1486 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, |
1487 | enum pipe pipe) |
1487 | enum pipe pipe) |
1488 | { |
1488 | { |
1489 | u32 reg, val; |
1489 | u32 reg, val; |
1490 | 1490 | ||
1491 | val = I915_READ(PCH_PP_CONTROL); |
1491 | val = I915_READ(PCH_PP_CONTROL); |
1492 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); |
1492 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); |
1493 | 1493 | ||
1494 | disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1494 | disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1495 | disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1495 | disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1496 | disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1496 | disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1497 | 1497 | ||
1498 | reg = PCH_ADPA; |
1498 | reg = PCH_ADPA; |
1499 | val = I915_READ(reg); |
1499 | val = I915_READ(reg); |
1500 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
1500 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
1501 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1501 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1502 | 1502 | ||
1503 | reg = PCH_LVDS; |
1503 | reg = PCH_LVDS; |
1504 | val = I915_READ(reg); |
1504 | val = I915_READ(reg); |
1505 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1505 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1506 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); |
1506 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); |
1507 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1507 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1508 | POSTING_READ(reg); |
1508 | POSTING_READ(reg); |
1509 | udelay(100); |
1509 | udelay(100); |
1510 | } |
1510 | } |
1511 | 1511 | ||
1512 | disable_pch_hdmi(dev_priv, pipe, HDMIB); |
1512 | disable_pch_hdmi(dev_priv, pipe, HDMIB); |
1513 | disable_pch_hdmi(dev_priv, pipe, HDMIC); |
1513 | disable_pch_hdmi(dev_priv, pipe, HDMIC); |
1514 | disable_pch_hdmi(dev_priv, pipe, HDMID); |
1514 | disable_pch_hdmi(dev_priv, pipe, HDMID); |
1515 | } |
1515 | } |
1516 | 1516 | ||
1517 | static void i8xx_disable_fbc(struct drm_device *dev) |
1517 | static void i8xx_disable_fbc(struct drm_device *dev) |
1518 | { |
1518 | { |
1519 | struct drm_i915_private *dev_priv = dev->dev_private; |
1519 | struct drm_i915_private *dev_priv = dev->dev_private; |
1520 | u32 fbc_ctl; |
1520 | u32 fbc_ctl; |
1521 | 1521 | ||
1522 | /* Disable compression */ |
1522 | /* Disable compression */ |
1523 | fbc_ctl = I915_READ(FBC_CONTROL); |
1523 | fbc_ctl = I915_READ(FBC_CONTROL); |
1524 | if ((fbc_ctl & FBC_CTL_EN) == 0) |
1524 | if ((fbc_ctl & FBC_CTL_EN) == 0) |
1525 | return; |
1525 | return; |
1526 | 1526 | ||
1527 | fbc_ctl &= ~FBC_CTL_EN; |
1527 | fbc_ctl &= ~FBC_CTL_EN; |
1528 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1528 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1529 | 1529 | ||
1530 | /* Wait for compressing bit to clear */ |
1530 | /* Wait for compressing bit to clear */ |
1531 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { |
1531 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { |
1532 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
1532 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
1533 | return; |
1533 | return; |
1534 | } |
1534 | } |
1535 | 1535 | ||
1536 | DRM_DEBUG_KMS("disabled FBC\n"); |
1536 | DRM_DEBUG_KMS("disabled FBC\n"); |
1537 | } |
1537 | } |
1538 | 1538 | ||
1539 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1539 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1540 | { |
1540 | { |
1541 | struct drm_device *dev = crtc->dev; |
1541 | struct drm_device *dev = crtc->dev; |
1542 | struct drm_i915_private *dev_priv = dev->dev_private; |
1542 | struct drm_i915_private *dev_priv = dev->dev_private; |
1543 | struct drm_framebuffer *fb = crtc->fb; |
1543 | struct drm_framebuffer *fb = crtc->fb; |
1544 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1544 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1545 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1545 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1546 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1546 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1547 | int cfb_pitch; |
1547 | int cfb_pitch; |
1548 | int plane, i; |
1548 | int plane, i; |
1549 | u32 fbc_ctl, fbc_ctl2; |
1549 | u32 fbc_ctl, fbc_ctl2; |
1550 | 1550 | ||
1551 | cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1551 | cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1552 | if (fb->pitches[0] < cfb_pitch) |
1552 | if (fb->pitches[0] < cfb_pitch) |
1553 | cfb_pitch = fb->pitches[0]; |
1553 | cfb_pitch = fb->pitches[0]; |
1554 | 1554 | ||
1555 | /* FBC_CTL wants 64B units */ |
1555 | /* FBC_CTL wants 64B units */ |
1556 | cfb_pitch = (cfb_pitch / 64) - 1; |
1556 | cfb_pitch = (cfb_pitch / 64) - 1; |
1557 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1557 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; |
1558 | 1558 | ||
1559 | /* Clear old tags */ |
1559 | /* Clear old tags */ |
1560 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
1560 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) |
1561 | I915_WRITE(FBC_TAG + (i * 4), 0); |
1561 | I915_WRITE(FBC_TAG + (i * 4), 0); |
1562 | 1562 | ||
1563 | /* Set it up... */ |
1563 | /* Set it up... */ |
1564 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
1564 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
1565 | fbc_ctl2 |= plane; |
1565 | fbc_ctl2 |= plane; |
1566 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1566 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1567 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
1567 | I915_WRITE(FBC_FENCE_OFF, crtc->y); |
1568 | 1568 | ||
1569 | /* enable it... */ |
1569 | /* enable it... */ |
1570 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1570 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1571 | if (IS_I945GM(dev)) |
1571 | if (IS_I945GM(dev)) |
1572 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1572 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
1573 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1573 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
1574 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1574 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
1575 | fbc_ctl |= obj->fence_reg; |
1575 | fbc_ctl |= obj->fence_reg; |
1576 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1576 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1577 | 1577 | ||
1578 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", |
1578 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", |
1579 | cfb_pitch, crtc->y, intel_crtc->plane); |
1579 | cfb_pitch, crtc->y, intel_crtc->plane); |
1580 | } |
1580 | } |
1581 | 1581 | ||
1582 | static bool i8xx_fbc_enabled(struct drm_device *dev) |
1582 | static bool i8xx_fbc_enabled(struct drm_device *dev) |
1583 | { |
1583 | { |
1584 | struct drm_i915_private *dev_priv = dev->dev_private; |
1584 | struct drm_i915_private *dev_priv = dev->dev_private; |
1585 | 1585 | ||
1586 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
1586 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
1587 | } |
1587 | } |
1588 | 1588 | ||
1589 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1589 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1590 | { |
1590 | { |
1591 | struct drm_device *dev = crtc->dev; |
1591 | struct drm_device *dev = crtc->dev; |
1592 | struct drm_i915_private *dev_priv = dev->dev_private; |
1592 | struct drm_i915_private *dev_priv = dev->dev_private; |
1593 | struct drm_framebuffer *fb = crtc->fb; |
1593 | struct drm_framebuffer *fb = crtc->fb; |
1594 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1594 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1595 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1595 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1596 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1596 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1597 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1597 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1598 | unsigned long stall_watermark = 200; |
1598 | unsigned long stall_watermark = 200; |
1599 | u32 dpfc_ctl; |
1599 | u32 dpfc_ctl; |
1600 | 1600 | ||
1601 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1601 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
1602 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
1602 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
1603 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1603 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
1604 | 1604 | ||
1605 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1605 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1606 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1606 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1607 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1607 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1608 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); |
1608 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); |
1609 | 1609 | ||
1610 | /* enable it... */ |
1610 | /* enable it... */ |
1611 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); |
1611 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); |
1612 | 1612 | ||
1613 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1613 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1614 | } |
1614 | } |
1615 | 1615 | ||
1616 | static void g4x_disable_fbc(struct drm_device *dev) |
1616 | static void g4x_disable_fbc(struct drm_device *dev) |
1617 | { |
1617 | { |
1618 | struct drm_i915_private *dev_priv = dev->dev_private; |
1618 | struct drm_i915_private *dev_priv = dev->dev_private; |
1619 | u32 dpfc_ctl; |
1619 | u32 dpfc_ctl; |
1620 | 1620 | ||
1621 | /* Disable compression */ |
1621 | /* Disable compression */ |
1622 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1622 | dpfc_ctl = I915_READ(DPFC_CONTROL); |
1623 | if (dpfc_ctl & DPFC_CTL_EN) { |
1623 | if (dpfc_ctl & DPFC_CTL_EN) { |
1624 | dpfc_ctl &= ~DPFC_CTL_EN; |
1624 | dpfc_ctl &= ~DPFC_CTL_EN; |
1625 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); |
1625 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); |
1626 | 1626 | ||
1627 | DRM_DEBUG_KMS("disabled FBC\n"); |
1627 | DRM_DEBUG_KMS("disabled FBC\n"); |
1628 | } |
1628 | } |
1629 | } |
1629 | } |
1630 | 1630 | ||
1631 | static bool g4x_fbc_enabled(struct drm_device *dev) |
1631 | static bool g4x_fbc_enabled(struct drm_device *dev) |
1632 | { |
1632 | { |
1633 | struct drm_i915_private *dev_priv = dev->dev_private; |
1633 | struct drm_i915_private *dev_priv = dev->dev_private; |
1634 | 1634 | ||
1635 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1635 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
1636 | } |
1636 | } |
1637 | 1637 | ||
1638 | static void sandybridge_blit_fbc_update(struct drm_device *dev) |
1638 | static void sandybridge_blit_fbc_update(struct drm_device *dev) |
1639 | { |
1639 | { |
1640 | struct drm_i915_private *dev_priv = dev->dev_private; |
1640 | struct drm_i915_private *dev_priv = dev->dev_private; |
1641 | u32 blt_ecoskpd; |
1641 | u32 blt_ecoskpd; |
1642 | 1642 | ||
1643 | /* Make sure blitter notifies FBC of writes */ |
1643 | /* Make sure blitter notifies FBC of writes */ |
1644 | gen6_gt_force_wake_get(dev_priv); |
1644 | gen6_gt_force_wake_get(dev_priv); |
1645 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1645 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1646 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
1646 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
1647 | GEN6_BLITTER_LOCK_SHIFT; |
1647 | GEN6_BLITTER_LOCK_SHIFT; |
1648 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1648 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1649 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; |
1649 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; |
1650 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1650 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1651 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << |
1651 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << |
1652 | GEN6_BLITTER_LOCK_SHIFT); |
1652 | GEN6_BLITTER_LOCK_SHIFT); |
1653 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1653 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1654 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
1654 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
1655 | gen6_gt_force_wake_put(dev_priv); |
1655 | gen6_gt_force_wake_put(dev_priv); |
1656 | } |
1656 | } |
1657 | 1657 | ||
1658 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1658 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1659 | { |
1659 | { |
1660 | struct drm_device *dev = crtc->dev; |
1660 | struct drm_device *dev = crtc->dev; |
1661 | struct drm_i915_private *dev_priv = dev->dev_private; |
1661 | struct drm_i915_private *dev_priv = dev->dev_private; |
1662 | struct drm_framebuffer *fb = crtc->fb; |
1662 | struct drm_framebuffer *fb = crtc->fb; |
1663 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1663 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
1664 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1664 | struct drm_i915_gem_object *obj = intel_fb->obj; |
1665 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1665 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1666 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1666 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
1667 | unsigned long stall_watermark = 200; |
1667 | unsigned long stall_watermark = 200; |
1668 | u32 dpfc_ctl; |
1668 | u32 dpfc_ctl; |
1669 | 1669 | ||
1670 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1670 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1671 | dpfc_ctl &= DPFC_RESERVED; |
1671 | dpfc_ctl &= DPFC_RESERVED; |
1672 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1672 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); |
1673 | /* Set persistent mode for front-buffer rendering, ala X. */ |
1673 | /* Set persistent mode for front-buffer rendering, ala X. */ |
1674 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; |
1674 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; |
1675 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); |
1675 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); |
1676 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1676 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
1677 | 1677 | ||
1678 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1678 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1679 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1679 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | |
1680 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1680 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); |
1681 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1681 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); |
1682 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1682 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
1683 | /* enable it... */ |
1683 | /* enable it... */ |
1684 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1684 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
1685 | 1685 | ||
1686 | if (IS_GEN6(dev)) { |
1686 | if (IS_GEN6(dev)) { |
1687 | I915_WRITE(SNB_DPFC_CTL_SA, |
1687 | I915_WRITE(SNB_DPFC_CTL_SA, |
1688 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
1688 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
1689 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
1689 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
1690 | sandybridge_blit_fbc_update(dev); |
1690 | sandybridge_blit_fbc_update(dev); |
1691 | } |
1691 | } |
1692 | 1692 | ||
1693 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1693 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1694 | } |
1694 | } |
1695 | 1695 | ||
1696 | static void ironlake_disable_fbc(struct drm_device *dev) |
1696 | static void ironlake_disable_fbc(struct drm_device *dev) |
1697 | { |
1697 | { |
1698 | struct drm_i915_private *dev_priv = dev->dev_private; |
1698 | struct drm_i915_private *dev_priv = dev->dev_private; |
1699 | u32 dpfc_ctl; |
1699 | u32 dpfc_ctl; |
1700 | 1700 | ||
1701 | /* Disable compression */ |
1701 | /* Disable compression */ |
1702 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1702 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
1703 | if (dpfc_ctl & DPFC_CTL_EN) { |
1703 | if (dpfc_ctl & DPFC_CTL_EN) { |
1704 | dpfc_ctl &= ~DPFC_CTL_EN; |
1704 | dpfc_ctl &= ~DPFC_CTL_EN; |
1705 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); |
1705 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); |
1706 | 1706 | ||
1707 | DRM_DEBUG_KMS("disabled FBC\n"); |
1707 | DRM_DEBUG_KMS("disabled FBC\n"); |
1708 | } |
1708 | } |
1709 | } |
1709 | } |
1710 | 1710 | ||
1711 | static bool ironlake_fbc_enabled(struct drm_device *dev) |
1711 | static bool ironlake_fbc_enabled(struct drm_device *dev) |
1712 | { |
1712 | { |
1713 | struct drm_i915_private *dev_priv = dev->dev_private; |
1713 | struct drm_i915_private *dev_priv = dev->dev_private; |
1714 | 1714 | ||
1715 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
1715 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
1716 | } |
1716 | } |
1717 | 1717 | ||
1718 | bool intel_fbc_enabled(struct drm_device *dev) |
1718 | bool intel_fbc_enabled(struct drm_device *dev) |
1719 | { |
1719 | { |
1720 | struct drm_i915_private *dev_priv = dev->dev_private; |
1720 | struct drm_i915_private *dev_priv = dev->dev_private; |
1721 | 1721 | ||
1722 | if (!dev_priv->display.fbc_enabled) |
1722 | if (!dev_priv->display.fbc_enabled) |
1723 | return false; |
1723 | return false; |
1724 | 1724 | ||
1725 | return dev_priv->display.fbc_enabled(dev); |
1725 | return dev_priv->display.fbc_enabled(dev); |
1726 | } |
1726 | } |
1727 | 1727 | ||
1728 | 1728 | ||
1729 | 1729 | ||
1730 | 1730 | ||
1731 | 1731 | ||
1732 | 1732 | ||
1733 | 1733 | ||
1734 | 1734 | ||
1735 | 1735 | ||
1736 | 1736 | ||
1737 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1737 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1738 | { |
1738 | { |
1739 | struct intel_fbc_work *work; |
1739 | struct intel_fbc_work *work; |
1740 | struct drm_device *dev = crtc->dev; |
1740 | struct drm_device *dev = crtc->dev; |
1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
1741 | struct drm_i915_private *dev_priv = dev->dev_private; |
1742 | 1742 | ||
1743 | if (!dev_priv->display.enable_fbc) |
1743 | if (!dev_priv->display.enable_fbc) |
1744 | return; |
1744 | return; |
1745 | 1745 | ||
1746 | // intel_cancel_fbc_work(dev_priv); |
1746 | // intel_cancel_fbc_work(dev_priv); |
1747 | 1747 | ||
1748 | // work = kzalloc(sizeof *work, GFP_KERNEL); |
1748 | // work = kzalloc(sizeof *work, GFP_KERNEL); |
1749 | // if (work == NULL) { |
1749 | // if (work == NULL) { |
1750 | // dev_priv->display.enable_fbc(crtc, interval); |
1750 | // dev_priv->display.enable_fbc(crtc, interval); |
1751 | // return; |
1751 | // return; |
1752 | // } |
1752 | // } |
1753 | 1753 | ||
1754 | // work->crtc = crtc; |
1754 | // work->crtc = crtc; |
1755 | // work->fb = crtc->fb; |
1755 | // work->fb = crtc->fb; |
1756 | // work->interval = interval; |
1756 | // work->interval = interval; |
1757 | // INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); |
1757 | // INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); |
1758 | 1758 | ||
1759 | // dev_priv->fbc_work = work; |
1759 | // dev_priv->fbc_work = work; |
1760 | 1760 | ||
1761 | DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); |
1761 | DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); |
1762 | 1762 | ||
1763 | /* Delay the actual enabling to let pageflipping cease and the |
1763 | /* Delay the actual enabling to let pageflipping cease and the |
1764 | * display to settle before starting the compression. Note that |
1764 | * display to settle before starting the compression. Note that |
1765 | * this delay also serves a second purpose: it allows for a |
1765 | * this delay also serves a second purpose: it allows for a |
1766 | * vblank to pass after disabling the FBC before we attempt |
1766 | * vblank to pass after disabling the FBC before we attempt |
1767 | * to modify the control registers. |
1767 | * to modify the control registers. |
1768 | * |
1768 | * |
1769 | * A more complicated solution would involve tracking vblanks |
1769 | * A more complicated solution would involve tracking vblanks |
1770 | * following the termination of the page-flipping sequence |
1770 | * following the termination of the page-flipping sequence |
1771 | * and indeed performing the enable as a co-routine and not |
1771 | * and indeed performing the enable as a co-routine and not |
1772 | * waiting synchronously upon the vblank. |
1772 | * waiting synchronously upon the vblank. |
1773 | */ |
1773 | */ |
1774 | // schedule_delayed_work(&work->work, msecs_to_jiffies(50)); |
1774 | // schedule_delayed_work(&work->work, msecs_to_jiffies(50)); |
1775 | } |
1775 | } |
1776 | 1776 | ||
1777 | void intel_disable_fbc(struct drm_device *dev) |
1777 | void intel_disable_fbc(struct drm_device *dev) |
1778 | { |
1778 | { |
1779 | struct drm_i915_private *dev_priv = dev->dev_private; |
1779 | struct drm_i915_private *dev_priv = dev->dev_private; |
1780 | 1780 | ||
1781 | // intel_cancel_fbc_work(dev_priv); |
1781 | // intel_cancel_fbc_work(dev_priv); |
1782 | 1782 | ||
1783 | if (!dev_priv->display.disable_fbc) |
1783 | if (!dev_priv->display.disable_fbc) |
1784 | return; |
1784 | return; |
1785 | 1785 | ||
1786 | dev_priv->display.disable_fbc(dev); |
1786 | dev_priv->display.disable_fbc(dev); |
1787 | dev_priv->cfb_plane = -1; |
1787 | dev_priv->cfb_plane = -1; |
1788 | } |
1788 | } |
1789 | 1789 | ||
1790 | /** |
1790 | /** |
1791 | * intel_update_fbc - enable/disable FBC as needed |
1791 | * intel_update_fbc - enable/disable FBC as needed |
1792 | * @dev: the drm_device |
1792 | * @dev: the drm_device |
1793 | * |
1793 | * |
1794 | * Set up the framebuffer compression hardware at mode set time. We |
1794 | * Set up the framebuffer compression hardware at mode set time. We |
1795 | * enable it if possible: |
1795 | * enable it if possible: |
1796 | * - plane A only (on pre-965) |
1796 | * - plane A only (on pre-965) |
1797 | * - no pixel mulitply/line duplication |
1797 | * - no pixel mulitply/line duplication |
1798 | * - no alpha buffer discard |
1798 | * - no alpha buffer discard |
1799 | * - no dual wide |
1799 | * - no dual wide |
1800 | * - framebuffer <= 2048 in width, 1536 in height |
1800 | * - framebuffer <= 2048 in width, 1536 in height |
1801 | * |
1801 | * |
1802 | * We can't assume that any compression will take place (worst case), |
1802 | * We can't assume that any compression will take place (worst case), |
1803 | * so the compressed buffer has to be the same size as the uncompressed |
1803 | * so the compressed buffer has to be the same size as the uncompressed |
1804 | * one. It also must reside (along with the line length buffer) in |
1804 | * one. It also must reside (along with the line length buffer) in |
1805 | * stolen memory. |
1805 | * stolen memory. |
1806 | * |
1806 | * |
1807 | * We need to enable/disable FBC on a global basis. |
1807 | * We need to enable/disable FBC on a global basis. |
1808 | */ |
1808 | */ |
1809 | static void intel_update_fbc(struct drm_device *dev) |
1809 | static void intel_update_fbc(struct drm_device *dev) |
1810 | { |
1810 | { |
1811 | struct drm_i915_private *dev_priv = dev->dev_private; |
1811 | struct drm_i915_private *dev_priv = dev->dev_private; |
1812 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
1812 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
1813 | struct intel_crtc *intel_crtc; |
1813 | struct intel_crtc *intel_crtc; |
1814 | struct drm_framebuffer *fb; |
1814 | struct drm_framebuffer *fb; |
1815 | struct intel_framebuffer *intel_fb; |
1815 | struct intel_framebuffer *intel_fb; |
1816 | struct drm_i915_gem_object *obj; |
1816 | struct drm_i915_gem_object *obj; |
1817 | int enable_fbc; |
1817 | int enable_fbc; |
1818 | 1818 | ||
1819 | DRM_DEBUG_KMS("\n"); |
1819 | DRM_DEBUG_KMS("\n"); |
1820 | 1820 | ||
1821 | if (!i915_powersave) |
1821 | if (!i915_powersave) |
1822 | return; |
1822 | return; |
1823 | 1823 | ||
1824 | if (!I915_HAS_FBC(dev)) |
1824 | if (!I915_HAS_FBC(dev)) |
1825 | return; |
1825 | return; |
1826 | 1826 | ||
1827 | /* |
1827 | /* |
1828 | * If FBC is already on, we just have to verify that we can |
1828 | * If FBC is already on, we just have to verify that we can |
1829 | * keep it that way... |
1829 | * keep it that way... |
1830 | * Need to disable if: |
1830 | * Need to disable if: |
1831 | * - more than one pipe is active |
1831 | * - more than one pipe is active |
1832 | * - changing FBC params (stride, fence, mode) |
1832 | * - changing FBC params (stride, fence, mode) |
1833 | * - new fb is too large to fit in compressed buffer |
1833 | * - new fb is too large to fit in compressed buffer |
1834 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1834 | * - going to an unsupported config (interlace, pixel multiply, etc.) |
1835 | */ |
1835 | */ |
1836 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
1836 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
1837 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
1837 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
1838 | if (crtc) { |
1838 | if (crtc) { |
1839 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
1839 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
1840 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; |
1840 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; |
1841 | goto out_disable; |
1841 | goto out_disable; |
1842 | } |
1842 | } |
1843 | crtc = tmp_crtc; |
1843 | crtc = tmp_crtc; |
1844 | } |
1844 | } |
1845 | } |
1845 | } |
1846 | 1846 | ||
1847 | if (!crtc || crtc->fb == NULL) { |
1847 | if (!crtc || crtc->fb == NULL) { |
1848 | DRM_DEBUG_KMS("no output, disabling\n"); |
1848 | DRM_DEBUG_KMS("no output, disabling\n"); |
1849 | dev_priv->no_fbc_reason = FBC_NO_OUTPUT; |
1849 | dev_priv->no_fbc_reason = FBC_NO_OUTPUT; |
1850 | goto out_disable; |
1850 | goto out_disable; |
1851 | } |
1851 | } |
1852 | 1852 | ||
1853 | intel_crtc = to_intel_crtc(crtc); |
1853 | intel_crtc = to_intel_crtc(crtc); |
1854 | fb = crtc->fb; |
1854 | fb = crtc->fb; |
1855 | intel_fb = to_intel_framebuffer(fb); |
1855 | intel_fb = to_intel_framebuffer(fb); |
1856 | obj = intel_fb->obj; |
1856 | obj = intel_fb->obj; |
1857 | 1857 | ||
1858 | enable_fbc = i915_enable_fbc; |
1858 | enable_fbc = i915_enable_fbc; |
1859 | if (enable_fbc < 0) { |
1859 | if (enable_fbc < 0) { |
1860 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); |
1860 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); |
1861 | enable_fbc = 1; |
1861 | enable_fbc = 1; |
1862 | if (INTEL_INFO(dev)->gen <= 5) |
1862 | if (INTEL_INFO(dev)->gen <= 6) |
1863 | enable_fbc = 0; |
1863 | enable_fbc = 0; |
1864 | } |
1864 | } |
1865 | if (!enable_fbc) { |
1865 | if (!enable_fbc) { |
1866 | DRM_DEBUG_KMS("fbc disabled per module param\n"); |
1866 | DRM_DEBUG_KMS("fbc disabled per module param\n"); |
1867 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1867 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1868 | goto out_disable; |
1868 | goto out_disable; |
1869 | } |
1869 | } |
1870 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
1870 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
1871 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1871 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
1872 | "compression\n"); |
1872 | "compression\n"); |
1873 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
1873 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
1874 | goto out_disable; |
1874 | goto out_disable; |
1875 | } |
1875 | } |
1876 | if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
1876 | if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
1877 | (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { |
1877 | (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { |
1878 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1878 | DRM_DEBUG_KMS("mode incompatible with compression, " |
1879 | "disabling\n"); |
1879 | "disabling\n"); |
1880 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; |
1880 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; |
1881 | goto out_disable; |
1881 | goto out_disable; |
1882 | } |
1882 | } |
1883 | if ((crtc->mode.hdisplay > 2048) || |
1883 | if ((crtc->mode.hdisplay > 2048) || |
1884 | (crtc->mode.vdisplay > 1536)) { |
1884 | (crtc->mode.vdisplay > 1536)) { |
1885 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1885 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
1886 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; |
1886 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; |
1887 | goto out_disable; |
1887 | goto out_disable; |
1888 | } |
1888 | } |
1889 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { |
1889 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { |
1890 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1890 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
1891 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1891 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
1892 | goto out_disable; |
1892 | goto out_disable; |
1893 | } |
1893 | } |
1894 | 1894 | ||
1895 | /* The use of a CPU fence is mandatory in order to detect writes |
1895 | /* The use of a CPU fence is mandatory in order to detect writes |
1896 | * by the CPU to the scanout and trigger updates to the FBC. |
1896 | * by the CPU to the scanout and trigger updates to the FBC. |
1897 | */ |
1897 | */ |
1898 | // if (obj->tiling_mode != I915_TILING_X || |
1898 | // if (obj->tiling_mode != I915_TILING_X || |
1899 | // obj->fence_reg == I915_FENCE_REG_NONE) { |
1899 | // obj->fence_reg == I915_FENCE_REG_NONE) { |
1900 | // DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); |
1900 | // DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); |
1901 | // dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1901 | // dev_priv->no_fbc_reason = FBC_NOT_TILED; |
1902 | // goto out_disable; |
1902 | // goto out_disable; |
1903 | // } |
1903 | // } |
1904 | 1904 | ||
1905 | /* If the kernel debugger is active, always disable compression */ |
1905 | /* If the kernel debugger is active, always disable compression */ |
1906 | if (in_dbg_master()) |
1906 | if (in_dbg_master()) |
1907 | goto out_disable; |
1907 | goto out_disable; |
1908 | 1908 | ||
1909 | /* If the scanout has not changed, don't modify the FBC settings. |
1909 | /* If the scanout has not changed, don't modify the FBC settings. |
1910 | * Note that we make the fundamental assumption that the fb->obj |
1910 | * Note that we make the fundamental assumption that the fb->obj |
1911 | * cannot be unpinned (and have its GTT offset and fence revoked) |
1911 | * cannot be unpinned (and have its GTT offset and fence revoked) |
1912 | * without first being decoupled from the scanout and FBC disabled. |
1912 | * without first being decoupled from the scanout and FBC disabled. |
1913 | */ |
1913 | */ |
1914 | if (dev_priv->cfb_plane == intel_crtc->plane && |
1914 | if (dev_priv->cfb_plane == intel_crtc->plane && |
1915 | dev_priv->cfb_fb == fb->base.id && |
1915 | dev_priv->cfb_fb == fb->base.id && |
1916 | dev_priv->cfb_y == crtc->y) |
1916 | dev_priv->cfb_y == crtc->y) |
1917 | return; |
1917 | return; |
1918 | 1918 | ||
1919 | if (intel_fbc_enabled(dev)) { |
1919 | if (intel_fbc_enabled(dev)) { |
1920 | /* We update FBC along two paths, after changing fb/crtc |
1920 | /* We update FBC along two paths, after changing fb/crtc |
1921 | * configuration (modeswitching) and after page-flipping |
1921 | * configuration (modeswitching) and after page-flipping |
1922 | * finishes. For the latter, we know that not only did |
1922 | * finishes. For the latter, we know that not only did |
1923 | * we disable the FBC at the start of the page-flip |
1923 | * we disable the FBC at the start of the page-flip |
1924 | * sequence, but also more than one vblank has passed. |
1924 | * sequence, but also more than one vblank has passed. |
1925 | * |
1925 | * |
1926 | * For the former case of modeswitching, it is possible |
1926 | * For the former case of modeswitching, it is possible |
1927 | * to switch between two FBC valid configurations |
1927 | * to switch between two FBC valid configurations |
1928 | * instantaneously so we do need to disable the FBC |
1928 | * instantaneously so we do need to disable the FBC |
1929 | * before we can modify its control registers. We also |
1929 | * before we can modify its control registers. We also |
1930 | * have to wait for the next vblank for that to take |
1930 | * have to wait for the next vblank for that to take |
1931 | * effect. However, since we delay enabling FBC we can |
1931 | * effect. However, since we delay enabling FBC we can |
1932 | * assume that a vblank has passed since disabling and |
1932 | * assume that a vblank has passed since disabling and |
1933 | * that we can safely alter the registers in the deferred |
1933 | * that we can safely alter the registers in the deferred |
1934 | * callback. |
1934 | * callback. |
1935 | * |
1935 | * |
1936 | * In the scenario that we go from a valid to invalid |
1936 | * In the scenario that we go from a valid to invalid |
1937 | * and then back to valid FBC configuration we have |
1937 | * and then back to valid FBC configuration we have |
1938 | * no strict enforcement that a vblank occurred since |
1938 | * no strict enforcement that a vblank occurred since |
1939 | * disabling the FBC. However, along all current pipe |
1939 | * disabling the FBC. However, along all current pipe |
1940 | * disabling paths we do need to wait for a vblank at |
1940 | * disabling paths we do need to wait for a vblank at |
1941 | * some point. And we wait before enabling FBC anyway. |
1941 | * some point. And we wait before enabling FBC anyway. |
1942 | */ |
1942 | */ |
1943 | DRM_DEBUG_KMS("disabling active FBC for update\n"); |
1943 | DRM_DEBUG_KMS("disabling active FBC for update\n"); |
1944 | intel_disable_fbc(dev); |
1944 | intel_disable_fbc(dev); |
1945 | } |
1945 | } |
1946 | 1946 | ||
1947 | intel_enable_fbc(crtc, 500); |
1947 | intel_enable_fbc(crtc, 500); |
1948 | return; |
1948 | return; |
1949 | 1949 | ||
1950 | out_disable: |
1950 | out_disable: |
1951 | /* Multiple disables should be harmless */ |
1951 | /* Multiple disables should be harmless */ |
1952 | if (intel_fbc_enabled(dev)) { |
1952 | if (intel_fbc_enabled(dev)) { |
1953 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); |
1953 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); |
1954 | intel_disable_fbc(dev); |
1954 | intel_disable_fbc(dev); |
1955 | } |
1955 | } |
1956 | } |
1956 | } |
1957 | 1957 | ||
1958 | int |
1958 | int |
1959 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1959 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
1960 | struct drm_i915_gem_object *obj, |
1960 | struct drm_i915_gem_object *obj, |
1961 | struct intel_ring_buffer *pipelined) |
1961 | struct intel_ring_buffer *pipelined) |
1962 | { |
1962 | { |
1963 | struct drm_i915_private *dev_priv = dev->dev_private; |
1963 | struct drm_i915_private *dev_priv = dev->dev_private; |
1964 | u32 alignment; |
1964 | u32 alignment; |
1965 | int ret; |
1965 | int ret; |
1966 | 1966 | ||
1967 | switch (obj->tiling_mode) { |
1967 | switch (obj->tiling_mode) { |
1968 | case I915_TILING_NONE: |
1968 | case I915_TILING_NONE: |
1969 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1969 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1970 | alignment = 128 * 1024; |
1970 | alignment = 128 * 1024; |
1971 | else if (INTEL_INFO(dev)->gen >= 4) |
1971 | else if (INTEL_INFO(dev)->gen >= 4) |
1972 | alignment = 4 * 1024; |
1972 | alignment = 4 * 1024; |
1973 | else |
1973 | else |
1974 | alignment = 64 * 1024; |
1974 | alignment = 64 * 1024; |
1975 | break; |
1975 | break; |
1976 | case I915_TILING_X: |
1976 | case I915_TILING_X: |
1977 | /* pin() will align the object as required by fence */ |
1977 | /* pin() will align the object as required by fence */ |
1978 | alignment = 0; |
1978 | alignment = 0; |
1979 | break; |
1979 | break; |
1980 | case I915_TILING_Y: |
1980 | case I915_TILING_Y: |
1981 | /* FIXME: Is this true? */ |
1981 | /* FIXME: Is this true? */ |
1982 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
1982 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); |
1983 | return -EINVAL; |
1983 | return -EINVAL; |
1984 | default: |
1984 | default: |
1985 | BUG(); |
1985 | BUG(); |
1986 | } |
1986 | } |
1987 | 1987 | ||
1988 | dev_priv->mm.interruptible = false; |
1988 | dev_priv->mm.interruptible = false; |
1989 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1989 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
1990 | if (ret) |
1990 | if (ret) |
1991 | goto err_interruptible; |
1991 | goto err_interruptible; |
1992 | 1992 | ||
1993 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1993 | /* Install a fence for tiled scan-out. Pre-i965 always needs a |
1994 | * fence, whereas 965+ only requires a fence if using |
1994 | * fence, whereas 965+ only requires a fence if using |
1995 | * framebuffer compression. For simplicity, we always install |
1995 | * framebuffer compression. For simplicity, we always install |
1996 | * a fence as the cost is not that onerous. |
1996 | * a fence as the cost is not that onerous. |
1997 | */ |
1997 | */ |
1998 | // if (obj->tiling_mode != I915_TILING_NONE) { |
1998 | // if (obj->tiling_mode != I915_TILING_NONE) { |
1999 | // ret = i915_gem_object_get_fence(obj, pipelined); |
1999 | // ret = i915_gem_object_get_fence(obj, pipelined); |
2000 | // if (ret) |
2000 | // if (ret) |
2001 | // goto err_unpin; |
2001 | // goto err_unpin; |
2002 | // } |
2002 | // } |
2003 | 2003 | ||
2004 | dev_priv->mm.interruptible = true; |
2004 | dev_priv->mm.interruptible = true; |
2005 | return 0; |
2005 | return 0; |
2006 | 2006 | ||
2007 | err_unpin: |
2007 | err_unpin: |
2008 | i915_gem_object_unpin(obj); |
2008 | i915_gem_object_unpin(obj); |
2009 | err_interruptible: |
2009 | err_interruptible: |
2010 | dev_priv->mm.interruptible = true; |
2010 | dev_priv->mm.interruptible = true; |
2011 | return ret; |
2011 | return ret; |
2012 | } |
2012 | } |
2013 | 2013 | ||
2014 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2014 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2015 | int x, int y) |
2015 | int x, int y) |
2016 | { |
2016 | { |
2017 | struct drm_device *dev = crtc->dev; |
2017 | struct drm_device *dev = crtc->dev; |
2018 | struct drm_i915_private *dev_priv = dev->dev_private; |
2018 | struct drm_i915_private *dev_priv = dev->dev_private; |
2019 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2019 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2020 | struct intel_framebuffer *intel_fb; |
2020 | struct intel_framebuffer *intel_fb; |
2021 | struct drm_i915_gem_object *obj; |
2021 | struct drm_i915_gem_object *obj; |
2022 | int plane = intel_crtc->plane; |
2022 | int plane = intel_crtc->plane; |
2023 | unsigned long Start, Offset; |
2023 | unsigned long Start, Offset; |
2024 | u32 dspcntr; |
2024 | u32 dspcntr; |
2025 | u32 reg; |
2025 | u32 reg; |
2026 | 2026 | ||
2027 | switch (plane) { |
2027 | switch (plane) { |
2028 | case 0: |
2028 | case 0: |
2029 | case 1: |
2029 | case 1: |
2030 | break; |
2030 | break; |
2031 | default: |
2031 | default: |
2032 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2032 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2033 | return -EINVAL; |
2033 | return -EINVAL; |
2034 | } |
2034 | } |
2035 | 2035 | ||
2036 | intel_fb = to_intel_framebuffer(fb); |
2036 | intel_fb = to_intel_framebuffer(fb); |
2037 | obj = intel_fb->obj; |
2037 | obj = intel_fb->obj; |
2038 | 2038 | ||
2039 | reg = DSPCNTR(plane); |
2039 | reg = DSPCNTR(plane); |
2040 | dspcntr = I915_READ(reg); |
2040 | dspcntr = I915_READ(reg); |
2041 | /* Mask out pixel format bits in case we change it */ |
2041 | /* Mask out pixel format bits in case we change it */ |
2042 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2042 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2043 | switch (fb->bits_per_pixel) { |
2043 | switch (fb->bits_per_pixel) { |
2044 | case 8: |
2044 | case 8: |
2045 | dspcntr |= DISPPLANE_8BPP; |
2045 | dspcntr |= DISPPLANE_8BPP; |
2046 | break; |
2046 | break; |
2047 | case 16: |
2047 | case 16: |
2048 | if (fb->depth == 15) |
2048 | if (fb->depth == 15) |
2049 | dspcntr |= DISPPLANE_15_16BPP; |
2049 | dspcntr |= DISPPLANE_15_16BPP; |
2050 | else |
2050 | else |
2051 | dspcntr |= DISPPLANE_16BPP; |
2051 | dspcntr |= DISPPLANE_16BPP; |
2052 | break; |
2052 | break; |
2053 | case 24: |
2053 | case 24: |
2054 | case 32: |
2054 | case 32: |
2055 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
2055 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
2056 | break; |
2056 | break; |
2057 | default: |
2057 | default: |
2058 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
2058 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
2059 | return -EINVAL; |
2059 | return -EINVAL; |
2060 | } |
2060 | } |
2061 | if (INTEL_INFO(dev)->gen >= 4) { |
2061 | if (INTEL_INFO(dev)->gen >= 4) { |
2062 | if (obj->tiling_mode != I915_TILING_NONE) |
2062 | if (obj->tiling_mode != I915_TILING_NONE) |
2063 | dspcntr |= DISPPLANE_TILED; |
2063 | dspcntr |= DISPPLANE_TILED; |
2064 | else |
2064 | else |
2065 | dspcntr &= ~DISPPLANE_TILED; |
2065 | dspcntr &= ~DISPPLANE_TILED; |
2066 | } |
2066 | } |
2067 | 2067 | ||
2068 | I915_WRITE(reg, dspcntr); |
2068 | I915_WRITE(reg, dspcntr); |
2069 | 2069 | ||
2070 | Start = obj->gtt_offset; |
2070 | Start = obj->gtt_offset; |
2071 | Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2071 | Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2072 | 2072 | ||
2073 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2073 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2074 | Start, Offset, x, y, fb->pitches[0]); |
2074 | Start, Offset, x, y, fb->pitches[0]); |
2075 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2075 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2076 | if (INTEL_INFO(dev)->gen >= 4) { |
2076 | if (INTEL_INFO(dev)->gen >= 4) { |
2077 | I915_WRITE(DSPSURF(plane), Start); |
2077 | I915_WRITE(DSPSURF(plane), Start); |
2078 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2078 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2079 | I915_WRITE(DSPADDR(plane), Offset); |
2079 | I915_WRITE(DSPADDR(plane), Offset); |
2080 | } else |
2080 | } else |
2081 | I915_WRITE(DSPADDR(plane), Start + Offset); |
2081 | I915_WRITE(DSPADDR(plane), Start + Offset); |
2082 | POSTING_READ(reg); |
2082 | POSTING_READ(reg); |
2083 | 2083 | ||
2084 | return 0; |
2084 | return 0; |
2085 | } |
2085 | } |
2086 | 2086 | ||
2087 | static int ironlake_update_plane(struct drm_crtc *crtc, |
2087 | static int ironlake_update_plane(struct drm_crtc *crtc, |
2088 | struct drm_framebuffer *fb, int x, int y) |
2088 | struct drm_framebuffer *fb, int x, int y) |
2089 | { |
2089 | { |
2090 | struct drm_device *dev = crtc->dev; |
2090 | struct drm_device *dev = crtc->dev; |
2091 | struct drm_i915_private *dev_priv = dev->dev_private; |
2091 | struct drm_i915_private *dev_priv = dev->dev_private; |
2092 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2092 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2093 | struct intel_framebuffer *intel_fb; |
2093 | struct intel_framebuffer *intel_fb; |
2094 | struct drm_i915_gem_object *obj; |
2094 | struct drm_i915_gem_object *obj; |
2095 | int plane = intel_crtc->plane; |
2095 | int plane = intel_crtc->plane; |
2096 | unsigned long Start, Offset; |
2096 | unsigned long Start, Offset; |
2097 | u32 dspcntr; |
2097 | u32 dspcntr; |
2098 | u32 reg; |
2098 | u32 reg; |
2099 | 2099 | ||
2100 | switch (plane) { |
2100 | switch (plane) { |
2101 | case 0: |
2101 | case 0: |
2102 | case 1: |
2102 | case 1: |
2103 | case 2: |
2103 | case 2: |
2104 | break; |
2104 | break; |
2105 | default: |
2105 | default: |
2106 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2106 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
2107 | return -EINVAL; |
2107 | return -EINVAL; |
2108 | } |
2108 | } |
2109 | 2109 | ||
2110 | intel_fb = to_intel_framebuffer(fb); |
2110 | intel_fb = to_intel_framebuffer(fb); |
2111 | obj = intel_fb->obj; |
2111 | obj = intel_fb->obj; |
2112 | 2112 | ||
2113 | reg = DSPCNTR(plane); |
2113 | reg = DSPCNTR(plane); |
2114 | dspcntr = I915_READ(reg); |
2114 | dspcntr = I915_READ(reg); |
2115 | /* Mask out pixel format bits in case we change it */ |
2115 | /* Mask out pixel format bits in case we change it */ |
2116 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2116 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; |
2117 | switch (fb->bits_per_pixel) { |
2117 | switch (fb->bits_per_pixel) { |
2118 | case 8: |
2118 | case 8: |
2119 | dspcntr |= DISPPLANE_8BPP; |
2119 | dspcntr |= DISPPLANE_8BPP; |
2120 | break; |
2120 | break; |
2121 | case 16: |
2121 | case 16: |
2122 | if (fb->depth != 16) |
2122 | if (fb->depth != 16) |
2123 | return -EINVAL; |
2123 | return -EINVAL; |
2124 | 2124 | ||
2125 | dspcntr |= DISPPLANE_16BPP; |
2125 | dspcntr |= DISPPLANE_16BPP; |
2126 | break; |
2126 | break; |
2127 | case 24: |
2127 | case 24: |
2128 | case 32: |
2128 | case 32: |
2129 | if (fb->depth == 24) |
2129 | if (fb->depth == 24) |
2130 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
2130 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; |
2131 | else if (fb->depth == 30) |
2131 | else if (fb->depth == 30) |
2132 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; |
2132 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; |
2133 | else |
2133 | else |
2134 | return -EINVAL; |
2134 | return -EINVAL; |
2135 | break; |
2135 | break; |
2136 | default: |
2136 | default: |
2137 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
2137 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
2138 | return -EINVAL; |
2138 | return -EINVAL; |
2139 | } |
2139 | } |
2140 | 2140 | ||
2141 | // if (obj->tiling_mode != I915_TILING_NONE) |
2141 | // if (obj->tiling_mode != I915_TILING_NONE) |
2142 | // dspcntr |= DISPPLANE_TILED; |
2142 | // dspcntr |= DISPPLANE_TILED; |
2143 | // else |
2143 | // else |
2144 | dspcntr &= ~DISPPLANE_TILED; |
2144 | dspcntr &= ~DISPPLANE_TILED; |
2145 | 2145 | ||
2146 | /* must disable */ |
2146 | /* must disable */ |
2147 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2147 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; |
2148 | 2148 | ||
2149 | I915_WRITE(reg, dspcntr); |
2149 | I915_WRITE(reg, dspcntr); |
2150 | 2150 | ||
2151 | Start = obj->gtt_offset; |
2151 | Start = obj->gtt_offset; |
2152 | Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2152 | Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
2153 | 2153 | ||
2154 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2154 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2155 | Start, Offset, x, y, fb->pitches[0]); |
2155 | Start, Offset, x, y, fb->pitches[0]); |
2156 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2156 | I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); |
2157 | I915_WRITE(DSPSURF(plane), Start); |
2157 | I915_WRITE(DSPSURF(plane), Start); |
2158 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2158 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); |
2159 | I915_WRITE(DSPADDR(plane), Offset); |
2159 | I915_WRITE(DSPADDR(plane), Offset); |
2160 | POSTING_READ(reg); |
2160 | POSTING_READ(reg); |
2161 | 2161 | ||
2162 | return 0; |
2162 | return 0; |
2163 | } |
2163 | } |
2164 | 2164 | ||
2165 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
2165 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ |
2166 | static int |
2166 | static int |
2167 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2167 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2168 | int x, int y, enum mode_set_atomic state) |
2168 | int x, int y, enum mode_set_atomic state) |
2169 | { |
2169 | { |
2170 | struct drm_device *dev = crtc->dev; |
2170 | struct drm_device *dev = crtc->dev; |
2171 | struct drm_i915_private *dev_priv = dev->dev_private; |
2171 | struct drm_i915_private *dev_priv = dev->dev_private; |
2172 | int ret; |
2172 | int ret; |
2173 | - | ||
2174 | ENTER(); |
- | |
2175 | 2173 | ||
2176 | ret = dev_priv->display.update_plane(crtc, fb, x, y); |
2174 | ret = dev_priv->display.update_plane(crtc, fb, x, y); |
2177 | if (ret) |
- | |
2178 | { |
- | |
2179 | LEAVE(); |
2175 | if (ret) |
2180 | return ret; |
- | |
2181 | }; |
2176 | return ret; |
2182 | 2177 | ||
2183 | intel_update_fbc(dev); |
2178 | intel_update_fbc(dev); |
2184 | intel_increase_pllclock(crtc); |
2179 | intel_increase_pllclock(crtc); |
2185 | LEAVE(); |
- | |
2186 | 2180 | ||
2187 | return 0; |
2181 | return 0; |
2188 | } |
2182 | } |
2189 | 2183 | ||
2190 | static int |
2184 | static int |
2191 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2185 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2192 | struct drm_framebuffer *old_fb) |
2186 | struct drm_framebuffer *old_fb) |
2193 | { |
2187 | { |
2194 | struct drm_device *dev = crtc->dev; |
2188 | struct drm_device *dev = crtc->dev; |
2195 | struct drm_i915_master_private *master_priv; |
2189 | struct drm_i915_master_private *master_priv; |
2196 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2190 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2197 | int ret; |
2191 | int ret; |
2198 | 2192 | ||
2199 | ENTER(); |
2193 | ENTER(); |
2200 | 2194 | ||
2201 | /* no fb bound */ |
2195 | /* no fb bound */ |
2202 | if (!crtc->fb) { |
2196 | if (!crtc->fb) { |
2203 | DRM_ERROR("No FB bound\n"); |
2197 | DRM_ERROR("No FB bound\n"); |
2204 | return 0; |
2198 | return 0; |
2205 | } |
2199 | } |
2206 | 2200 | ||
2207 | switch (intel_crtc->plane) { |
2201 | switch (intel_crtc->plane) { |
2208 | case 0: |
2202 | case 0: |
2209 | case 1: |
2203 | case 1: |
2210 | break; |
2204 | break; |
2211 | case 2: |
2205 | case 2: |
2212 | if (IS_IVYBRIDGE(dev)) |
2206 | if (IS_IVYBRIDGE(dev)) |
2213 | break; |
2207 | break; |
2214 | /* fall through otherwise */ |
2208 | /* fall through otherwise */ |
2215 | default: |
2209 | default: |
2216 | DRM_ERROR("no plane for crtc\n"); |
2210 | DRM_ERROR("no plane for crtc\n"); |
2217 | return -EINVAL; |
2211 | return -EINVAL; |
2218 | } |
2212 | } |
2219 | 2213 | ||
2220 | mutex_lock(&dev->struct_mutex); |
2214 | mutex_lock(&dev->struct_mutex); |
2221 | 2215 | ||
2222 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
2216 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
2223 | LEAVE_ATOMIC_MODE_SET); |
2217 | LEAVE_ATOMIC_MODE_SET); |
2224 | if (ret) { |
2218 | if (ret) { |
2225 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
2219 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
2226 | mutex_unlock(&dev->struct_mutex); |
2220 | mutex_unlock(&dev->struct_mutex); |
2227 | DRM_ERROR("failed to update base address\n"); |
2221 | DRM_ERROR("failed to update base address\n"); |
2228 | LEAVE(); |
2222 | LEAVE(); |
2229 | return ret; |
2223 | return ret; |
2230 | } |
2224 | } |
2231 | 2225 | ||
2232 | mutex_unlock(&dev->struct_mutex); |
2226 | mutex_unlock(&dev->struct_mutex); |
2233 | 2227 | ||
2234 | 2228 | ||
2235 | LEAVE(); |
2229 | LEAVE(); |
2236 | return 0; |
2230 | return 0; |
2237 | - | ||
2238 | #if 0 |
- | |
2239 | if (!dev->primary->master) |
- | |
2240 | { |
- | |
2241 | LEAVE(); |
- | |
2242 | return 0; |
- | |
2243 | }; |
- | |
2244 | - | ||
2245 | master_priv = dev->primary->master->driver_priv; |
- | |
2246 | if (!master_priv->sarea_priv) |
- | |
2247 | { |
- | |
2248 | LEAVE(); |
- | |
2249 | return 0; |
- | |
2250 | }; |
- | |
2251 | - | ||
2252 | if (intel_crtc->pipe) { |
- | |
2253 | master_priv->sarea_priv->pipeB_x = x; |
- | |
2254 | master_priv->sarea_priv->pipeB_y = y; |
- | |
2255 | } else { |
- | |
2256 | master_priv->sarea_priv->pipeA_x = x; |
- | |
2257 | master_priv->sarea_priv->pipeA_y = y; |
- | |
2258 | } |
- | |
2259 | LEAVE(); |
- | |
2260 | - | ||
2261 | return 0; |
- | |
2262 | #endif |
2231 | |
2263 | 2232 | ||
2264 | } |
2233 | } |
2265 | 2234 | ||
2266 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
2235 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
2267 | { |
2236 | { |
2268 | struct drm_device *dev = crtc->dev; |
2237 | struct drm_device *dev = crtc->dev; |
2269 | struct drm_i915_private *dev_priv = dev->dev_private; |
2238 | struct drm_i915_private *dev_priv = dev->dev_private; |
2270 | u32 dpa_ctl; |
2239 | u32 dpa_ctl; |
2271 | 2240 | ||
2272 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
2241 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
2273 | dpa_ctl = I915_READ(DP_A); |
2242 | dpa_ctl = I915_READ(DP_A); |
2274 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
2243 | dpa_ctl &= ~DP_PLL_FREQ_MASK; |
2275 | 2244 | ||
2276 | if (clock < 200000) { |
2245 | if (clock < 200000) { |
2277 | u32 temp; |
2246 | u32 temp; |
2278 | dpa_ctl |= DP_PLL_FREQ_160MHZ; |
2247 | dpa_ctl |= DP_PLL_FREQ_160MHZ; |
2279 | /* workaround for 160Mhz: |
2248 | /* workaround for 160Mhz: |
2280 | 1) program 0x4600c bits 15:0 = 0x8124 |
2249 | 1) program 0x4600c bits 15:0 = 0x8124 |
2281 | 2) program 0x46010 bit 0 = 1 |
2250 | 2) program 0x46010 bit 0 = 1 |
2282 | 3) program 0x46034 bit 24 = 1 |
2251 | 3) program 0x46034 bit 24 = 1 |
2283 | 4) program 0x64000 bit 14 = 1 |
2252 | 4) program 0x64000 bit 14 = 1 |
2284 | */ |
2253 | */ |
2285 | temp = I915_READ(0x4600c); |
2254 | temp = I915_READ(0x4600c); |
2286 | temp &= 0xffff0000; |
2255 | temp &= 0xffff0000; |
2287 | I915_WRITE(0x4600c, temp | 0x8124); |
2256 | I915_WRITE(0x4600c, temp | 0x8124); |
2288 | 2257 | ||
2289 | temp = I915_READ(0x46010); |
2258 | temp = I915_READ(0x46010); |
2290 | I915_WRITE(0x46010, temp | 1); |
2259 | I915_WRITE(0x46010, temp | 1); |
2291 | 2260 | ||
2292 | temp = I915_READ(0x46034); |
2261 | temp = I915_READ(0x46034); |
2293 | I915_WRITE(0x46034, temp | (1 << 24)); |
2262 | I915_WRITE(0x46034, temp | (1 << 24)); |
2294 | } else { |
2263 | } else { |
2295 | dpa_ctl |= DP_PLL_FREQ_270MHZ; |
2264 | dpa_ctl |= DP_PLL_FREQ_270MHZ; |
2296 | } |
2265 | } |
2297 | I915_WRITE(DP_A, dpa_ctl); |
2266 | I915_WRITE(DP_A, dpa_ctl); |
2298 | 2267 | ||
2299 | POSTING_READ(DP_A); |
2268 | POSTING_READ(DP_A); |
2300 | udelay(500); |
2269 | udelay(500); |
2301 | } |
2270 | } |
2302 | 2271 | ||
2303 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2272 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2304 | { |
2273 | { |
2305 | struct drm_device *dev = crtc->dev; |
2274 | struct drm_device *dev = crtc->dev; |
2306 | struct drm_i915_private *dev_priv = dev->dev_private; |
2275 | struct drm_i915_private *dev_priv = dev->dev_private; |
2307 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2276 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2308 | int pipe = intel_crtc->pipe; |
2277 | int pipe = intel_crtc->pipe; |
2309 | u32 reg, temp; |
2278 | u32 reg, temp; |
2310 | 2279 | ||
2311 | /* enable normal train */ |
2280 | /* enable normal train */ |
2312 | reg = FDI_TX_CTL(pipe); |
2281 | reg = FDI_TX_CTL(pipe); |
2313 | temp = I915_READ(reg); |
2282 | temp = I915_READ(reg); |
2314 | if (IS_IVYBRIDGE(dev)) { |
2283 | if (IS_IVYBRIDGE(dev)) { |
2315 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2284 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2316 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; |
2285 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; |
2317 | } else { |
2286 | } else { |
2318 | temp &= ~FDI_LINK_TRAIN_NONE; |
2287 | temp &= ~FDI_LINK_TRAIN_NONE; |
2319 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; |
2288 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; |
2320 | } |
2289 | } |
2321 | I915_WRITE(reg, temp); |
2290 | I915_WRITE(reg, temp); |
2322 | 2291 | ||
2323 | reg = FDI_RX_CTL(pipe); |
2292 | reg = FDI_RX_CTL(pipe); |
2324 | temp = I915_READ(reg); |
2293 | temp = I915_READ(reg); |
2325 | if (HAS_PCH_CPT(dev)) { |
2294 | if (HAS_PCH_CPT(dev)) { |
2326 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2295 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2327 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; |
2296 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; |
2328 | } else { |
2297 | } else { |
2329 | temp &= ~FDI_LINK_TRAIN_NONE; |
2298 | temp &= ~FDI_LINK_TRAIN_NONE; |
2330 | temp |= FDI_LINK_TRAIN_NONE; |
2299 | temp |= FDI_LINK_TRAIN_NONE; |
2331 | } |
2300 | } |
2332 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); |
2301 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); |
2333 | 2302 | ||
2334 | /* wait one idle pattern time */ |
2303 | /* wait one idle pattern time */ |
2335 | POSTING_READ(reg); |
2304 | POSTING_READ(reg); |
2336 | udelay(1000); |
2305 | udelay(1000); |
2337 | 2306 | ||
2338 | /* IVB wants error correction enabled */ |
2307 | /* IVB wants error correction enabled */ |
2339 | if (IS_IVYBRIDGE(dev)) |
2308 | if (IS_IVYBRIDGE(dev)) |
2340 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | |
2309 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | |
2341 | FDI_FE_ERRC_ENABLE); |
2310 | FDI_FE_ERRC_ENABLE); |
2342 | } |
2311 | } |
2343 | 2312 | ||
2344 | static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) |
2313 | static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) |
2345 | { |
2314 | { |
2346 | struct drm_i915_private *dev_priv = dev->dev_private; |
2315 | struct drm_i915_private *dev_priv = dev->dev_private; |
2347 | u32 flags = I915_READ(SOUTH_CHICKEN1); |
2316 | u32 flags = I915_READ(SOUTH_CHICKEN1); |
2348 | 2317 | ||
2349 | flags |= FDI_PHASE_SYNC_OVR(pipe); |
2318 | flags |= FDI_PHASE_SYNC_OVR(pipe); |
2350 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ |
2319 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ |
2351 | flags |= FDI_PHASE_SYNC_EN(pipe); |
2320 | flags |= FDI_PHASE_SYNC_EN(pipe); |
2352 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ |
2321 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ |
2353 | POSTING_READ(SOUTH_CHICKEN1); |
2322 | POSTING_READ(SOUTH_CHICKEN1); |
2354 | } |
2323 | } |
2355 | 2324 | ||
2356 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2325 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2357 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2326 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
2358 | { |
2327 | { |
2359 | struct drm_device *dev = crtc->dev; |
2328 | struct drm_device *dev = crtc->dev; |
2360 | struct drm_i915_private *dev_priv = dev->dev_private; |
2329 | struct drm_i915_private *dev_priv = dev->dev_private; |
2361 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2330 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2362 | int pipe = intel_crtc->pipe; |
2331 | int pipe = intel_crtc->pipe; |
2363 | int plane = intel_crtc->plane; |
2332 | int plane = intel_crtc->plane; |
2364 | u32 reg, temp, tries; |
2333 | u32 reg, temp, tries; |
2365 | 2334 | ||
2366 | /* FDI needs bits from pipe & plane first */ |
2335 | /* FDI needs bits from pipe & plane first */ |
2367 | assert_pipe_enabled(dev_priv, pipe); |
2336 | assert_pipe_enabled(dev_priv, pipe); |
2368 | assert_plane_enabled(dev_priv, plane); |
2337 | assert_plane_enabled(dev_priv, plane); |
2369 | 2338 | ||
2370 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2339 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2371 | for train result */ |
2340 | for train result */ |
2372 | reg = FDI_RX_IMR(pipe); |
2341 | reg = FDI_RX_IMR(pipe); |
2373 | temp = I915_READ(reg); |
2342 | temp = I915_READ(reg); |
2374 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2343 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2375 | temp &= ~FDI_RX_BIT_LOCK; |
2344 | temp &= ~FDI_RX_BIT_LOCK; |
2376 | I915_WRITE(reg, temp); |
2345 | I915_WRITE(reg, temp); |
2377 | I915_READ(reg); |
2346 | I915_READ(reg); |
2378 | udelay(150); |
2347 | udelay(150); |
2379 | 2348 | ||
2380 | /* enable CPU FDI TX and PCH FDI RX */ |
2349 | /* enable CPU FDI TX and PCH FDI RX */ |
2381 | reg = FDI_TX_CTL(pipe); |
2350 | reg = FDI_TX_CTL(pipe); |
2382 | temp = I915_READ(reg); |
2351 | temp = I915_READ(reg); |
2383 | temp &= ~(7 << 19); |
2352 | temp &= ~(7 << 19); |
2384 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2353 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2385 | temp &= ~FDI_LINK_TRAIN_NONE; |
2354 | temp &= ~FDI_LINK_TRAIN_NONE; |
2386 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2355 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2387 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2356 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2388 | 2357 | ||
2389 | reg = FDI_RX_CTL(pipe); |
2358 | reg = FDI_RX_CTL(pipe); |
2390 | temp = I915_READ(reg); |
2359 | temp = I915_READ(reg); |
2391 | temp &= ~FDI_LINK_TRAIN_NONE; |
2360 | temp &= ~FDI_LINK_TRAIN_NONE; |
2392 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2361 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2393 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2362 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2394 | 2363 | ||
2395 | POSTING_READ(reg); |
2364 | POSTING_READ(reg); |
2396 | udelay(150); |
2365 | udelay(150); |
2397 | 2366 | ||
2398 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2367 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
2399 | if (HAS_PCH_IBX(dev)) { |
2368 | if (HAS_PCH_IBX(dev)) { |
2400 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2369 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2401 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2370 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | |
2402 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2371 | FDI_RX_PHASE_SYNC_POINTER_EN); |
2403 | } |
2372 | } |
2404 | 2373 | ||
2405 | reg = FDI_RX_IIR(pipe); |
2374 | reg = FDI_RX_IIR(pipe); |
2406 | for (tries = 0; tries < 5; tries++) { |
2375 | for (tries = 0; tries < 5; tries++) { |
2407 | temp = I915_READ(reg); |
2376 | temp = I915_READ(reg); |
2408 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2377 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2409 | 2378 | ||
2410 | if ((temp & FDI_RX_BIT_LOCK)) { |
2379 | if ((temp & FDI_RX_BIT_LOCK)) { |
2411 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2380 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2412 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2381 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2413 | break; |
2382 | break; |
2414 | } |
2383 | } |
2415 | } |
2384 | } |
2416 | if (tries == 5) |
2385 | if (tries == 5) |
2417 | DRM_ERROR("FDI train 1 fail!\n"); |
2386 | DRM_ERROR("FDI train 1 fail!\n"); |
2418 | 2387 | ||
2419 | /* Train 2 */ |
2388 | /* Train 2 */ |
2420 | reg = FDI_TX_CTL(pipe); |
2389 | reg = FDI_TX_CTL(pipe); |
2421 | temp = I915_READ(reg); |
2390 | temp = I915_READ(reg); |
2422 | temp &= ~FDI_LINK_TRAIN_NONE; |
2391 | temp &= ~FDI_LINK_TRAIN_NONE; |
2423 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2392 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2424 | I915_WRITE(reg, temp); |
2393 | I915_WRITE(reg, temp); |
2425 | 2394 | ||
2426 | reg = FDI_RX_CTL(pipe); |
2395 | reg = FDI_RX_CTL(pipe); |
2427 | temp = I915_READ(reg); |
2396 | temp = I915_READ(reg); |
2428 | temp &= ~FDI_LINK_TRAIN_NONE; |
2397 | temp &= ~FDI_LINK_TRAIN_NONE; |
2429 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2398 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2430 | I915_WRITE(reg, temp); |
2399 | I915_WRITE(reg, temp); |
2431 | 2400 | ||
2432 | POSTING_READ(reg); |
2401 | POSTING_READ(reg); |
2433 | udelay(150); |
2402 | udelay(150); |
2434 | 2403 | ||
2435 | reg = FDI_RX_IIR(pipe); |
2404 | reg = FDI_RX_IIR(pipe); |
2436 | for (tries = 0; tries < 5; tries++) { |
2405 | for (tries = 0; tries < 5; tries++) { |
2437 | temp = I915_READ(reg); |
2406 | temp = I915_READ(reg); |
2438 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2407 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2439 | 2408 | ||
2440 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2409 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2441 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2410 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2442 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2411 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2443 | break; |
2412 | break; |
2444 | } |
2413 | } |
2445 | } |
2414 | } |
2446 | if (tries == 5) |
2415 | if (tries == 5) |
2447 | DRM_ERROR("FDI train 2 fail!\n"); |
2416 | DRM_ERROR("FDI train 2 fail!\n"); |
2448 | 2417 | ||
2449 | DRM_DEBUG_KMS("FDI train done\n"); |
2418 | DRM_DEBUG_KMS("FDI train done\n"); |
2450 | 2419 | ||
2451 | } |
2420 | } |
2452 | 2421 | ||
2453 | static const int snb_b_fdi_train_param[] = { |
2422 | static const int snb_b_fdi_train_param[] = { |
2454 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2423 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2455 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2424 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, |
2456 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
2425 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, |
2457 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, |
2426 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, |
2458 | }; |
2427 | }; |
2459 | 2428 | ||
2460 | /* The FDI link training functions for SNB/Cougarpoint. */ |
2429 | /* The FDI link training functions for SNB/Cougarpoint. */ |
2461 | static void gen6_fdi_link_train(struct drm_crtc *crtc) |
2430 | static void gen6_fdi_link_train(struct drm_crtc *crtc) |
2462 | { |
2431 | { |
2463 | struct drm_device *dev = crtc->dev; |
2432 | struct drm_device *dev = crtc->dev; |
2464 | struct drm_i915_private *dev_priv = dev->dev_private; |
2433 | struct drm_i915_private *dev_priv = dev->dev_private; |
2465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2434 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2466 | int pipe = intel_crtc->pipe; |
2435 | int pipe = intel_crtc->pipe; |
2467 | u32 reg, temp, i; |
2436 | u32 reg, temp, i; |
2468 | 2437 | ||
2469 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2438 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2470 | for train result */ |
2439 | for train result */ |
2471 | reg = FDI_RX_IMR(pipe); |
2440 | reg = FDI_RX_IMR(pipe); |
2472 | temp = I915_READ(reg); |
2441 | temp = I915_READ(reg); |
2473 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2442 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2474 | temp &= ~FDI_RX_BIT_LOCK; |
2443 | temp &= ~FDI_RX_BIT_LOCK; |
2475 | I915_WRITE(reg, temp); |
2444 | I915_WRITE(reg, temp); |
2476 | 2445 | ||
2477 | POSTING_READ(reg); |
2446 | POSTING_READ(reg); |
2478 | udelay(150); |
2447 | udelay(150); |
2479 | 2448 | ||
2480 | /* enable CPU FDI TX and PCH FDI RX */ |
2449 | /* enable CPU FDI TX and PCH FDI RX */ |
2481 | reg = FDI_TX_CTL(pipe); |
2450 | reg = FDI_TX_CTL(pipe); |
2482 | temp = I915_READ(reg); |
2451 | temp = I915_READ(reg); |
2483 | temp &= ~(7 << 19); |
2452 | temp &= ~(7 << 19); |
2484 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2453 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2485 | temp &= ~FDI_LINK_TRAIN_NONE; |
2454 | temp &= ~FDI_LINK_TRAIN_NONE; |
2486 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2455 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2487 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2456 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2488 | /* SNB-B */ |
2457 | /* SNB-B */ |
2489 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2458 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2490 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2459 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2491 | 2460 | ||
2492 | reg = FDI_RX_CTL(pipe); |
2461 | reg = FDI_RX_CTL(pipe); |
2493 | temp = I915_READ(reg); |
2462 | temp = I915_READ(reg); |
2494 | if (HAS_PCH_CPT(dev)) { |
2463 | if (HAS_PCH_CPT(dev)) { |
2495 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2464 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2496 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2465 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2497 | } else { |
2466 | } else { |
2498 | temp &= ~FDI_LINK_TRAIN_NONE; |
2467 | temp &= ~FDI_LINK_TRAIN_NONE; |
2499 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2468 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2500 | } |
2469 | } |
2501 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2470 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2502 | 2471 | ||
2503 | POSTING_READ(reg); |
2472 | POSTING_READ(reg); |
2504 | udelay(150); |
2473 | udelay(150); |
2505 | 2474 | ||
2506 | if (HAS_PCH_CPT(dev)) |
2475 | if (HAS_PCH_CPT(dev)) |
2507 | cpt_phase_pointer_enable(dev, pipe); |
2476 | cpt_phase_pointer_enable(dev, pipe); |
2508 | 2477 | ||
2509 | for (i = 0; i < 4; i++) { |
2478 | for (i = 0; i < 4; i++) { |
2510 | reg = FDI_TX_CTL(pipe); |
2479 | reg = FDI_TX_CTL(pipe); |
2511 | temp = I915_READ(reg); |
2480 | temp = I915_READ(reg); |
2512 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2481 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2513 | temp |= snb_b_fdi_train_param[i]; |
2482 | temp |= snb_b_fdi_train_param[i]; |
2514 | I915_WRITE(reg, temp); |
2483 | I915_WRITE(reg, temp); |
2515 | 2484 | ||
2516 | POSTING_READ(reg); |
2485 | POSTING_READ(reg); |
2517 | udelay(500); |
2486 | udelay(500); |
2518 | 2487 | ||
2519 | reg = FDI_RX_IIR(pipe); |
2488 | reg = FDI_RX_IIR(pipe); |
2520 | temp = I915_READ(reg); |
2489 | temp = I915_READ(reg); |
2521 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2490 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2522 | 2491 | ||
2523 | if (temp & FDI_RX_BIT_LOCK) { |
2492 | if (temp & FDI_RX_BIT_LOCK) { |
2524 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2493 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2525 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2494 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2526 | break; |
2495 | break; |
2527 | } |
2496 | } |
2528 | } |
2497 | } |
2529 | if (i == 4) |
2498 | if (i == 4) |
2530 | DRM_ERROR("FDI train 1 fail!\n"); |
2499 | DRM_ERROR("FDI train 1 fail!\n"); |
2531 | 2500 | ||
2532 | /* Train 2 */ |
2501 | /* Train 2 */ |
2533 | reg = FDI_TX_CTL(pipe); |
2502 | reg = FDI_TX_CTL(pipe); |
2534 | temp = I915_READ(reg); |
2503 | temp = I915_READ(reg); |
2535 | temp &= ~FDI_LINK_TRAIN_NONE; |
2504 | temp &= ~FDI_LINK_TRAIN_NONE; |
2536 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2505 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2537 | if (IS_GEN6(dev)) { |
2506 | if (IS_GEN6(dev)) { |
2538 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2507 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2539 | /* SNB-B */ |
2508 | /* SNB-B */ |
2540 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2509 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2541 | } |
2510 | } |
2542 | I915_WRITE(reg, temp); |
2511 | I915_WRITE(reg, temp); |
2543 | 2512 | ||
2544 | reg = FDI_RX_CTL(pipe); |
2513 | reg = FDI_RX_CTL(pipe); |
2545 | temp = I915_READ(reg); |
2514 | temp = I915_READ(reg); |
2546 | if (HAS_PCH_CPT(dev)) { |
2515 | if (HAS_PCH_CPT(dev)) { |
2547 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2516 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2548 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2517 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2549 | } else { |
2518 | } else { |
2550 | temp &= ~FDI_LINK_TRAIN_NONE; |
2519 | temp &= ~FDI_LINK_TRAIN_NONE; |
2551 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2520 | temp |= FDI_LINK_TRAIN_PATTERN_2; |
2552 | } |
2521 | } |
2553 | I915_WRITE(reg, temp); |
2522 | I915_WRITE(reg, temp); |
2554 | 2523 | ||
2555 | POSTING_READ(reg); |
2524 | POSTING_READ(reg); |
2556 | udelay(150); |
2525 | udelay(150); |
2557 | 2526 | ||
2558 | for (i = 0; i < 4; i++) { |
2527 | for (i = 0; i < 4; i++) { |
2559 | reg = FDI_TX_CTL(pipe); |
2528 | reg = FDI_TX_CTL(pipe); |
2560 | temp = I915_READ(reg); |
2529 | temp = I915_READ(reg); |
2561 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2530 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2562 | temp |= snb_b_fdi_train_param[i]; |
2531 | temp |= snb_b_fdi_train_param[i]; |
2563 | I915_WRITE(reg, temp); |
2532 | I915_WRITE(reg, temp); |
2564 | 2533 | ||
2565 | POSTING_READ(reg); |
2534 | POSTING_READ(reg); |
2566 | udelay(500); |
2535 | udelay(500); |
2567 | 2536 | ||
2568 | reg = FDI_RX_IIR(pipe); |
2537 | reg = FDI_RX_IIR(pipe); |
2569 | temp = I915_READ(reg); |
2538 | temp = I915_READ(reg); |
2570 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2539 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2571 | 2540 | ||
2572 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2541 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2573 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2542 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2574 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2543 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2575 | break; |
2544 | break; |
2576 | } |
2545 | } |
2577 | } |
2546 | } |
2578 | if (i == 4) |
2547 | if (i == 4) |
2579 | DRM_ERROR("FDI train 2 fail!\n"); |
2548 | DRM_ERROR("FDI train 2 fail!\n"); |
2580 | 2549 | ||
2581 | DRM_DEBUG_KMS("FDI train done.\n"); |
2550 | DRM_DEBUG_KMS("FDI train done.\n"); |
2582 | } |
2551 | } |
2583 | 2552 | ||
2584 | /* Manual link training for Ivy Bridge A0 parts */ |
2553 | /* Manual link training for Ivy Bridge A0 parts */ |
2585 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) |
2554 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) |
2586 | { |
2555 | { |
2587 | struct drm_device *dev = crtc->dev; |
2556 | struct drm_device *dev = crtc->dev; |
2588 | struct drm_i915_private *dev_priv = dev->dev_private; |
2557 | struct drm_i915_private *dev_priv = dev->dev_private; |
2589 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2558 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2590 | int pipe = intel_crtc->pipe; |
2559 | int pipe = intel_crtc->pipe; |
2591 | u32 reg, temp, i; |
2560 | u32 reg, temp, i; |
2592 | 2561 | ||
2593 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2562 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2594 | for train result */ |
2563 | for train result */ |
2595 | reg = FDI_RX_IMR(pipe); |
2564 | reg = FDI_RX_IMR(pipe); |
2596 | temp = I915_READ(reg); |
2565 | temp = I915_READ(reg); |
2597 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2566 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2598 | temp &= ~FDI_RX_BIT_LOCK; |
2567 | temp &= ~FDI_RX_BIT_LOCK; |
2599 | I915_WRITE(reg, temp); |
2568 | I915_WRITE(reg, temp); |
2600 | 2569 | ||
2601 | POSTING_READ(reg); |
2570 | POSTING_READ(reg); |
2602 | udelay(150); |
2571 | udelay(150); |
2603 | 2572 | ||
2604 | /* enable CPU FDI TX and PCH FDI RX */ |
2573 | /* enable CPU FDI TX and PCH FDI RX */ |
2605 | reg = FDI_TX_CTL(pipe); |
2574 | reg = FDI_TX_CTL(pipe); |
2606 | temp = I915_READ(reg); |
2575 | temp = I915_READ(reg); |
2607 | temp &= ~(7 << 19); |
2576 | temp &= ~(7 << 19); |
2608 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2577 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2609 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); |
2578 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); |
2610 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2579 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2611 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2580 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2612 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2581 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2613 | temp |= FDI_COMPOSITE_SYNC; |
2582 | temp |= FDI_COMPOSITE_SYNC; |
2614 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2583 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2615 | 2584 | ||
2616 | reg = FDI_RX_CTL(pipe); |
2585 | reg = FDI_RX_CTL(pipe); |
2617 | temp = I915_READ(reg); |
2586 | temp = I915_READ(reg); |
2618 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2587 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2619 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2588 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2620 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2589 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2621 | temp |= FDI_COMPOSITE_SYNC; |
2590 | temp |= FDI_COMPOSITE_SYNC; |
2622 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2591 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2623 | 2592 | ||
2624 | POSTING_READ(reg); |
2593 | POSTING_READ(reg); |
2625 | udelay(150); |
2594 | udelay(150); |
2626 | 2595 | ||
2627 | if (HAS_PCH_CPT(dev)) |
2596 | if (HAS_PCH_CPT(dev)) |
2628 | cpt_phase_pointer_enable(dev, pipe); |
2597 | cpt_phase_pointer_enable(dev, pipe); |
2629 | 2598 | ||
2630 | for (i = 0; i < 4; i++) { |
2599 | for (i = 0; i < 4; i++) { |
2631 | reg = FDI_TX_CTL(pipe); |
2600 | reg = FDI_TX_CTL(pipe); |
2632 | temp = I915_READ(reg); |
2601 | temp = I915_READ(reg); |
2633 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2602 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2634 | temp |= snb_b_fdi_train_param[i]; |
2603 | temp |= snb_b_fdi_train_param[i]; |
2635 | I915_WRITE(reg, temp); |
2604 | I915_WRITE(reg, temp); |
2636 | 2605 | ||
2637 | POSTING_READ(reg); |
2606 | POSTING_READ(reg); |
2638 | udelay(500); |
2607 | udelay(500); |
2639 | 2608 | ||
2640 | reg = FDI_RX_IIR(pipe); |
2609 | reg = FDI_RX_IIR(pipe); |
2641 | temp = I915_READ(reg); |
2610 | temp = I915_READ(reg); |
2642 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2611 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2643 | 2612 | ||
2644 | if (temp & FDI_RX_BIT_LOCK || |
2613 | if (temp & FDI_RX_BIT_LOCK || |
2645 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2614 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { |
2646 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2615 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
2647 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2616 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2648 | break; |
2617 | break; |
2649 | } |
2618 | } |
2650 | } |
2619 | } |
2651 | if (i == 4) |
2620 | if (i == 4) |
2652 | DRM_ERROR("FDI train 1 fail!\n"); |
2621 | DRM_ERROR("FDI train 1 fail!\n"); |
2653 | 2622 | ||
2654 | /* Train 2 */ |
2623 | /* Train 2 */ |
2655 | reg = FDI_TX_CTL(pipe); |
2624 | reg = FDI_TX_CTL(pipe); |
2656 | temp = I915_READ(reg); |
2625 | temp = I915_READ(reg); |
2657 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2626 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2658 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
2627 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; |
2659 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2628 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2660 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2629 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2661 | I915_WRITE(reg, temp); |
2630 | I915_WRITE(reg, temp); |
2662 | 2631 | ||
2663 | reg = FDI_RX_CTL(pipe); |
2632 | reg = FDI_RX_CTL(pipe); |
2664 | temp = I915_READ(reg); |
2633 | temp = I915_READ(reg); |
2665 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2634 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2666 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2635 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; |
2667 | I915_WRITE(reg, temp); |
2636 | I915_WRITE(reg, temp); |
2668 | 2637 | ||
2669 | POSTING_READ(reg); |
2638 | POSTING_READ(reg); |
2670 | udelay(150); |
2639 | udelay(150); |
2671 | 2640 | ||
2672 | for (i = 0; i < 4; i++) { |
2641 | for (i = 0; i < 4; i++) { |
2673 | reg = FDI_TX_CTL(pipe); |
2642 | reg = FDI_TX_CTL(pipe); |
2674 | temp = I915_READ(reg); |
2643 | temp = I915_READ(reg); |
2675 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2644 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2676 | temp |= snb_b_fdi_train_param[i]; |
2645 | temp |= snb_b_fdi_train_param[i]; |
2677 | I915_WRITE(reg, temp); |
2646 | I915_WRITE(reg, temp); |
2678 | 2647 | ||
2679 | POSTING_READ(reg); |
2648 | POSTING_READ(reg); |
2680 | udelay(500); |
2649 | udelay(500); |
2681 | 2650 | ||
2682 | reg = FDI_RX_IIR(pipe); |
2651 | reg = FDI_RX_IIR(pipe); |
2683 | temp = I915_READ(reg); |
2652 | temp = I915_READ(reg); |
2684 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2653 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2685 | 2654 | ||
2686 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2655 | if (temp & FDI_RX_SYMBOL_LOCK) { |
2687 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2656 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
2688 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2657 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2689 | break; |
2658 | break; |
2690 | } |
2659 | } |
2691 | } |
2660 | } |
2692 | if (i == 4) |
2661 | if (i == 4) |
2693 | DRM_ERROR("FDI train 2 fail!\n"); |
2662 | DRM_ERROR("FDI train 2 fail!\n"); |
2694 | 2663 | ||
2695 | DRM_DEBUG_KMS("FDI train done.\n"); |
2664 | DRM_DEBUG_KMS("FDI train done.\n"); |
2696 | } |
2665 | } |
2697 | 2666 | ||
2698 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) |
2667 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) |
2699 | { |
2668 | { |
2700 | struct drm_device *dev = crtc->dev; |
2669 | struct drm_device *dev = crtc->dev; |
2701 | struct drm_i915_private *dev_priv = dev->dev_private; |
2670 | struct drm_i915_private *dev_priv = dev->dev_private; |
2702 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2671 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2703 | int pipe = intel_crtc->pipe; |
2672 | int pipe = intel_crtc->pipe; |
2704 | u32 reg, temp; |
2673 | u32 reg, temp; |
2705 | 2674 | ||
2706 | /* Write the TU size bits so error detection works */ |
2675 | /* Write the TU size bits so error detection works */ |
2707 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
2676 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
2708 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
2677 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); |
2709 | 2678 | ||
2710 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2679 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
2711 | reg = FDI_RX_CTL(pipe); |
2680 | reg = FDI_RX_CTL(pipe); |
2712 | temp = I915_READ(reg); |
2681 | temp = I915_READ(reg); |
2713 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2682 | temp &= ~((0x7 << 19) | (0x7 << 16)); |
2714 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2683 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
2715 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2684 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2716 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2685 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
2717 | 2686 | ||
2718 | POSTING_READ(reg); |
2687 | POSTING_READ(reg); |
2719 | udelay(200); |
2688 | udelay(200); |
2720 | 2689 | ||
2721 | /* Switch from Rawclk to PCDclk */ |
2690 | /* Switch from Rawclk to PCDclk */ |
2722 | temp = I915_READ(reg); |
2691 | temp = I915_READ(reg); |
2723 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2692 | I915_WRITE(reg, temp | FDI_PCDCLK); |
2724 | 2693 | ||
2725 | POSTING_READ(reg); |
2694 | POSTING_READ(reg); |
2726 | udelay(200); |
2695 | udelay(200); |
2727 | 2696 | ||
2728 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2697 | /* Enable CPU FDI TX PLL, always on for Ironlake */ |
2729 | reg = FDI_TX_CTL(pipe); |
2698 | reg = FDI_TX_CTL(pipe); |
2730 | temp = I915_READ(reg); |
2699 | temp = I915_READ(reg); |
2731 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
2700 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
2732 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
2701 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
2733 | 2702 | ||
2734 | POSTING_READ(reg); |
2703 | POSTING_READ(reg); |
2735 | udelay(100); |
2704 | udelay(100); |
2736 | } |
2705 | } |
2737 | } |
2706 | } |
2738 | 2707 | ||
2739 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) |
2708 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) |
2740 | { |
2709 | { |
2741 | struct drm_i915_private *dev_priv = dev->dev_private; |
2710 | struct drm_i915_private *dev_priv = dev->dev_private; |
2742 | u32 flags = I915_READ(SOUTH_CHICKEN1); |
2711 | u32 flags = I915_READ(SOUTH_CHICKEN1); |
2743 | 2712 | ||
2744 | flags &= ~(FDI_PHASE_SYNC_EN(pipe)); |
2713 | flags &= ~(FDI_PHASE_SYNC_EN(pipe)); |
2745 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ |
2714 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ |
2746 | flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); |
2715 | flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); |
2747 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ |
2716 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ |
2748 | POSTING_READ(SOUTH_CHICKEN1); |
2717 | POSTING_READ(SOUTH_CHICKEN1); |
2749 | } |
2718 | } |
2750 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2719 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2751 | { |
2720 | { |
2752 | struct drm_device *dev = crtc->dev; |
2721 | struct drm_device *dev = crtc->dev; |
2753 | struct drm_i915_private *dev_priv = dev->dev_private; |
2722 | struct drm_i915_private *dev_priv = dev->dev_private; |
2754 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2723 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2755 | int pipe = intel_crtc->pipe; |
2724 | int pipe = intel_crtc->pipe; |
2756 | u32 reg, temp; |
2725 | u32 reg, temp; |
2757 | 2726 | ||
2758 | /* disable CPU FDI tx and PCH FDI rx */ |
2727 | /* disable CPU FDI tx and PCH FDI rx */ |
2759 | reg = FDI_TX_CTL(pipe); |
2728 | reg = FDI_TX_CTL(pipe); |
2760 | temp = I915_READ(reg); |
2729 | temp = I915_READ(reg); |
2761 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); |
2730 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); |
2762 | POSTING_READ(reg); |
2731 | POSTING_READ(reg); |
2763 | 2732 | ||
2764 | reg = FDI_RX_CTL(pipe); |
2733 | reg = FDI_RX_CTL(pipe); |
2765 | temp = I915_READ(reg); |
2734 | temp = I915_READ(reg); |
2766 | temp &= ~(0x7 << 16); |
2735 | temp &= ~(0x7 << 16); |
2767 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2736 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2768 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
2737 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
2769 | 2738 | ||
2770 | POSTING_READ(reg); |
2739 | POSTING_READ(reg); |
2771 | udelay(100); |
2740 | udelay(100); |
2772 | 2741 | ||
2773 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2742 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2774 | if (HAS_PCH_IBX(dev)) { |
2743 | if (HAS_PCH_IBX(dev)) { |
2775 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2744 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); |
2776 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2745 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2777 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2746 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2778 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); |
2747 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); |
2779 | } else if (HAS_PCH_CPT(dev)) { |
2748 | } else if (HAS_PCH_CPT(dev)) { |
2780 | cpt_phase_pointer_disable(dev, pipe); |
2749 | cpt_phase_pointer_disable(dev, pipe); |
2781 | } |
2750 | } |
2782 | 2751 | ||
2783 | /* still set train pattern 1 */ |
2752 | /* still set train pattern 1 */ |
2784 | reg = FDI_TX_CTL(pipe); |
2753 | reg = FDI_TX_CTL(pipe); |
2785 | temp = I915_READ(reg); |
2754 | temp = I915_READ(reg); |
2786 | temp &= ~FDI_LINK_TRAIN_NONE; |
2755 | temp &= ~FDI_LINK_TRAIN_NONE; |
2787 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2756 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2788 | I915_WRITE(reg, temp); |
2757 | I915_WRITE(reg, temp); |
2789 | 2758 | ||
2790 | reg = FDI_RX_CTL(pipe); |
2759 | reg = FDI_RX_CTL(pipe); |
2791 | temp = I915_READ(reg); |
2760 | temp = I915_READ(reg); |
2792 | if (HAS_PCH_CPT(dev)) { |
2761 | if (HAS_PCH_CPT(dev)) { |
2793 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2762 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2794 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2763 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2795 | } else { |
2764 | } else { |
2796 | temp &= ~FDI_LINK_TRAIN_NONE; |
2765 | temp &= ~FDI_LINK_TRAIN_NONE; |
2797 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2766 | temp |= FDI_LINK_TRAIN_PATTERN_1; |
2798 | } |
2767 | } |
2799 | /* BPC in FDI rx is consistent with that in PIPECONF */ |
2768 | /* BPC in FDI rx is consistent with that in PIPECONF */ |
2800 | temp &= ~(0x07 << 16); |
2769 | temp &= ~(0x07 << 16); |
2801 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2770 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2802 | I915_WRITE(reg, temp); |
2771 | I915_WRITE(reg, temp); |
2803 | 2772 | ||
2804 | POSTING_READ(reg); |
2773 | POSTING_READ(reg); |
2805 | udelay(100); |
2774 | udelay(100); |
2806 | } |
2775 | } |
2807 | 2776 | ||
2808 | /* |
2777 | /* |
2809 | * When we disable a pipe, we need to clear any pending scanline wait events |
2778 | * When we disable a pipe, we need to clear any pending scanline wait events |
2810 | * to avoid hanging the ring, which we assume we are waiting on. |
2779 | * to avoid hanging the ring, which we assume we are waiting on. |
2811 | */ |
2780 | */ |
2812 | static void intel_clear_scanline_wait(struct drm_device *dev) |
2781 | static void intel_clear_scanline_wait(struct drm_device *dev) |
2813 | { |
2782 | { |
2814 | struct drm_i915_private *dev_priv = dev->dev_private; |
2783 | struct drm_i915_private *dev_priv = dev->dev_private; |
2815 | struct intel_ring_buffer *ring; |
2784 | struct intel_ring_buffer *ring; |
2816 | u32 tmp; |
2785 | u32 tmp; |
2817 | 2786 | ||
2818 | if (IS_GEN2(dev)) |
2787 | if (IS_GEN2(dev)) |
2819 | /* Can't break the hang on i8xx */ |
2788 | /* Can't break the hang on i8xx */ |
2820 | return; |
2789 | return; |
2821 | 2790 | ||
2822 | ring = LP_RING(dev_priv); |
2791 | ring = LP_RING(dev_priv); |
2823 | tmp = I915_READ_CTL(ring); |
2792 | tmp = I915_READ_CTL(ring); |
2824 | if (tmp & RING_WAIT) |
2793 | if (tmp & RING_WAIT) |
2825 | I915_WRITE_CTL(ring, tmp); |
2794 | I915_WRITE_CTL(ring, tmp); |
2826 | } |
2795 | } |
2827 | 2796 | ||
2828 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2797 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2829 | { |
2798 | { |
2830 | struct drm_i915_gem_object *obj; |
2799 | struct drm_i915_gem_object *obj; |
2831 | struct drm_i915_private *dev_priv; |
2800 | struct drm_i915_private *dev_priv; |
2832 | 2801 | ||
2833 | if (crtc->fb == NULL) |
2802 | if (crtc->fb == NULL) |
2834 | return; |
2803 | return; |
2835 | 2804 | ||
2836 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2805 | obj = to_intel_framebuffer(crtc->fb)->obj; |
2837 | dev_priv = crtc->dev->dev_private; |
2806 | dev_priv = crtc->dev->dev_private; |
2838 | // wait_event(dev_priv->pending_flip_queue, |
2807 | wait_event(dev_priv->pending_flip_queue, |
2839 | // atomic_read(&obj->pending_flip) == 0); |
2808 | atomic_read(&obj->pending_flip) == 0); |
2840 | } |
2809 | } |
2841 | 2810 | ||
2842 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2811 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2843 | { |
2812 | { |
2844 | struct drm_device *dev = crtc->dev; |
2813 | struct drm_device *dev = crtc->dev; |
2845 | struct drm_mode_config *mode_config = &dev->mode_config; |
2814 | struct drm_mode_config *mode_config = &dev->mode_config; |
2846 | struct intel_encoder *encoder; |
2815 | struct intel_encoder *encoder; |
2847 | 2816 | ||
2848 | /* |
2817 | /* |
2849 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2818 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that |
2850 | * must be driven by its own crtc; no sharing is possible. |
2819 | * must be driven by its own crtc; no sharing is possible. |
2851 | */ |
2820 | */ |
2852 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
2821 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
2853 | if (encoder->base.crtc != crtc) |
2822 | if (encoder->base.crtc != crtc) |
2854 | continue; |
2823 | continue; |
2855 | 2824 | ||
2856 | switch (encoder->type) { |
2825 | switch (encoder->type) { |
2857 | case INTEL_OUTPUT_EDP: |
2826 | case INTEL_OUTPUT_EDP: |
2858 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
2827 | if (!intel_encoder_is_pch_edp(&encoder->base)) |
2859 | return false; |
2828 | return false; |
2860 | continue; |
2829 | continue; |
2861 | } |
2830 | } |
2862 | } |
2831 | } |
2863 | 2832 | ||
2864 | return true; |
2833 | return true; |
2865 | } |
2834 | } |
2866 | 2835 | ||
2867 | /* |
2836 | /* |
2868 | * Enable PCH resources required for PCH ports: |
2837 | * Enable PCH resources required for PCH ports: |
2869 | * - PCH PLLs |
2838 | * - PCH PLLs |
2870 | * - FDI training & RX/TX |
2839 | * - FDI training & RX/TX |
2871 | * - update transcoder timings |
2840 | * - update transcoder timings |
2872 | * - DP transcoding bits |
2841 | * - DP transcoding bits |
2873 | * - transcoder |
2842 | * - transcoder |
2874 | */ |
2843 | */ |
2875 | static void ironlake_pch_enable(struct drm_crtc *crtc) |
2844 | static void ironlake_pch_enable(struct drm_crtc *crtc) |
2876 | { |
2845 | { |
2877 | struct drm_device *dev = crtc->dev; |
2846 | struct drm_device *dev = crtc->dev; |
2878 | struct drm_i915_private *dev_priv = dev->dev_private; |
2847 | struct drm_i915_private *dev_priv = dev->dev_private; |
2879 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2848 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2880 | int pipe = intel_crtc->pipe; |
2849 | int pipe = intel_crtc->pipe; |
2881 | u32 reg, temp, transc_sel; |
2850 | u32 reg, temp, transc_sel; |
2882 | 2851 | ||
2883 | /* For PCH output, training FDI link */ |
2852 | /* For PCH output, training FDI link */ |
2884 | dev_priv->display.fdi_link_train(crtc); |
2853 | dev_priv->display.fdi_link_train(crtc); |
2885 | 2854 | ||
2886 | intel_enable_pch_pll(dev_priv, pipe); |
2855 | intel_enable_pch_pll(dev_priv, pipe); |
2887 | 2856 | ||
2888 | if (HAS_PCH_CPT(dev)) { |
2857 | if (HAS_PCH_CPT(dev)) { |
2889 | transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : |
2858 | transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : |
2890 | TRANSC_DPLLB_SEL; |
2859 | TRANSC_DPLLB_SEL; |
2891 | 2860 | ||
2892 | /* Be sure PCH DPLL SEL is set */ |
2861 | /* Be sure PCH DPLL SEL is set */ |
2893 | temp = I915_READ(PCH_DPLL_SEL); |
2862 | temp = I915_READ(PCH_DPLL_SEL); |
2894 | if (pipe == 0) { |
2863 | if (pipe == 0) { |
2895 | temp &= ~(TRANSA_DPLLB_SEL); |
2864 | temp &= ~(TRANSA_DPLLB_SEL); |
2896 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2865 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2897 | } else if (pipe == 1) { |
2866 | } else if (pipe == 1) { |
2898 | temp &= ~(TRANSB_DPLLB_SEL); |
2867 | temp &= ~(TRANSB_DPLLB_SEL); |
2899 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
2868 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
2900 | } else if (pipe == 2) { |
2869 | } else if (pipe == 2) { |
2901 | temp &= ~(TRANSC_DPLLB_SEL); |
2870 | temp &= ~(TRANSC_DPLLB_SEL); |
2902 | temp |= (TRANSC_DPLL_ENABLE | transc_sel); |
2871 | temp |= (TRANSC_DPLL_ENABLE | transc_sel); |
2903 | } |
2872 | } |
2904 | I915_WRITE(PCH_DPLL_SEL, temp); |
2873 | I915_WRITE(PCH_DPLL_SEL, temp); |
2905 | } |
2874 | } |
2906 | 2875 | ||
2907 | /* set transcoder timing, panel must allow it */ |
2876 | /* set transcoder timing, panel must allow it */ |
2908 | assert_panel_unlocked(dev_priv, pipe); |
2877 | assert_panel_unlocked(dev_priv, pipe); |
2909 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
2878 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
2910 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); |
2879 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); |
2911 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); |
2880 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); |
2912 | 2881 | ||
2913 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
2882 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
2914 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2883 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2915 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2884 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2916 | 2885 | ||
2917 | intel_fdi_normal_train(crtc); |
2886 | intel_fdi_normal_train(crtc); |
2918 | 2887 | ||
2919 | /* For PCH DP, enable TRANS_DP_CTL */ |
2888 | /* For PCH DP, enable TRANS_DP_CTL */ |
2920 | if (HAS_PCH_CPT(dev) && |
2889 | if (HAS_PCH_CPT(dev) && |
2921 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
2890 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
2922 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
2891 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
2923 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; |
2892 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; |
2924 | reg = TRANS_DP_CTL(pipe); |
2893 | reg = TRANS_DP_CTL(pipe); |
2925 | temp = I915_READ(reg); |
2894 | temp = I915_READ(reg); |
2926 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
2895 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
2927 | TRANS_DP_SYNC_MASK | |
2896 | TRANS_DP_SYNC_MASK | |
2928 | TRANS_DP_BPC_MASK); |
2897 | TRANS_DP_BPC_MASK); |
2929 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2898 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2930 | TRANS_DP_ENH_FRAMING); |
2899 | TRANS_DP_ENH_FRAMING); |
2931 | temp |= bpc << 9; /* same format but at 11:9 */ |
2900 | temp |= bpc << 9; /* same format but at 11:9 */ |
2932 | 2901 | ||
2933 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2902 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2934 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
2903 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
2935 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
2904 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
2936 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
2905 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
2937 | 2906 | ||
2938 | switch (intel_trans_dp_port_sel(crtc)) { |
2907 | switch (intel_trans_dp_port_sel(crtc)) { |
2939 | case PCH_DP_B: |
2908 | case PCH_DP_B: |
2940 | temp |= TRANS_DP_PORT_SEL_B; |
2909 | temp |= TRANS_DP_PORT_SEL_B; |
2941 | break; |
2910 | break; |
2942 | case PCH_DP_C: |
2911 | case PCH_DP_C: |
2943 | temp |= TRANS_DP_PORT_SEL_C; |
2912 | temp |= TRANS_DP_PORT_SEL_C; |
2944 | break; |
2913 | break; |
2945 | case PCH_DP_D: |
2914 | case PCH_DP_D: |
2946 | temp |= TRANS_DP_PORT_SEL_D; |
2915 | temp |= TRANS_DP_PORT_SEL_D; |
2947 | break; |
2916 | break; |
2948 | default: |
2917 | default: |
2949 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); |
2918 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); |
2950 | temp |= TRANS_DP_PORT_SEL_B; |
2919 | temp |= TRANS_DP_PORT_SEL_B; |
2951 | break; |
2920 | break; |
2952 | } |
2921 | } |
2953 | 2922 | ||
2954 | I915_WRITE(reg, temp); |
2923 | I915_WRITE(reg, temp); |
2955 | } |
2924 | } |
2956 | 2925 | ||
2957 | intel_enable_transcoder(dev_priv, pipe); |
2926 | intel_enable_transcoder(dev_priv, pipe); |
2958 | } |
2927 | } |
2959 | 2928 | ||
2960 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
2929 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
2961 | { |
2930 | { |
2962 | struct drm_i915_private *dev_priv = dev->dev_private; |
2931 | struct drm_i915_private *dev_priv = dev->dev_private; |
2963 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); |
2932 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); |
2964 | u32 temp; |
2933 | u32 temp; |
2965 | 2934 | ||
2966 | temp = I915_READ(dslreg); |
2935 | temp = I915_READ(dslreg); |
2967 | udelay(500); |
2936 | udelay(500); |
2968 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
2937 | if (wait_for(I915_READ(dslreg) != temp, 5)) { |
2969 | /* Without this, mode sets may fail silently on FDI */ |
2938 | /* Without this, mode sets may fail silently on FDI */ |
2970 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); |
2939 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); |
2971 | udelay(250); |
2940 | udelay(250); |
2972 | I915_WRITE(tc2reg, 0); |
2941 | I915_WRITE(tc2reg, 0); |
2973 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
2942 | if (wait_for(I915_READ(dslreg) != temp, 5)) |
2974 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
2943 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); |
2975 | } |
2944 | } |
2976 | } |
2945 | } |
2977 | 2946 | ||
2978 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2947 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2979 | { |
2948 | { |
2980 | struct drm_device *dev = crtc->dev; |
2949 | struct drm_device *dev = crtc->dev; |
2981 | struct drm_i915_private *dev_priv = dev->dev_private; |
2950 | struct drm_i915_private *dev_priv = dev->dev_private; |
2982 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2951 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2983 | int pipe = intel_crtc->pipe; |
2952 | int pipe = intel_crtc->pipe; |
2984 | int plane = intel_crtc->plane; |
2953 | int plane = intel_crtc->plane; |
2985 | u32 temp; |
2954 | u32 temp; |
2986 | bool is_pch_port; |
2955 | bool is_pch_port; |
2987 | 2956 | ||
2988 | if (intel_crtc->active) |
2957 | if (intel_crtc->active) |
2989 | return; |
2958 | return; |
2990 | 2959 | ||
2991 | intel_crtc->active = true; |
2960 | intel_crtc->active = true; |
2992 | intel_update_watermarks(dev); |
2961 | intel_update_watermarks(dev); |
2993 | 2962 | ||
2994 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
2963 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
2995 | temp = I915_READ(PCH_LVDS); |
2964 | temp = I915_READ(PCH_LVDS); |
2996 | if ((temp & LVDS_PORT_EN) == 0) |
2965 | if ((temp & LVDS_PORT_EN) == 0) |
2997 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2966 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2998 | } |
2967 | } |
2999 | 2968 | ||
3000 | is_pch_port = intel_crtc_driving_pch(crtc); |
2969 | is_pch_port = intel_crtc_driving_pch(crtc); |
3001 | 2970 | ||
3002 | if (is_pch_port) |
2971 | if (is_pch_port) |
3003 | ironlake_fdi_pll_enable(crtc); |
2972 | ironlake_fdi_pll_enable(crtc); |
3004 | else |
2973 | else |
3005 | ironlake_fdi_disable(crtc); |
2974 | ironlake_fdi_disable(crtc); |
3006 | 2975 | ||
3007 | /* Enable panel fitting for LVDS */ |
2976 | /* Enable panel fitting for LVDS */ |
3008 | if (dev_priv->pch_pf_size && |
2977 | if (dev_priv->pch_pf_size && |
3009 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
2978 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
3010 | /* Force use of hard-coded filter coefficients |
2979 | /* Force use of hard-coded filter coefficients |
3011 | * as some pre-programmed values are broken, |
2980 | * as some pre-programmed values are broken, |
3012 | * e.g. x201. |
2981 | * e.g. x201. |
3013 | */ |
2982 | */ |
3014 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
2983 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
3015 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
2984 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); |
3016 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
2985 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); |
3017 | } |
2986 | } |
3018 | 2987 | ||
3019 | /* |
2988 | /* |
3020 | * On ILK+ LUT must be loaded before the pipe is running but with |
2989 | * On ILK+ LUT must be loaded before the pipe is running but with |
3021 | * clocks enabled |
2990 | * clocks enabled |
3022 | */ |
2991 | */ |
3023 | intel_crtc_load_lut(crtc); |
2992 | intel_crtc_load_lut(crtc); |
3024 | 2993 | ||
3025 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
2994 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3026 | intel_enable_plane(dev_priv, plane, pipe); |
2995 | intel_enable_plane(dev_priv, plane, pipe); |
3027 | 2996 | ||
3028 | if (is_pch_port) |
2997 | if (is_pch_port) |
3029 | ironlake_pch_enable(crtc); |
2998 | ironlake_pch_enable(crtc); |
3030 | 2999 | ||
3031 | mutex_lock(&dev->struct_mutex); |
3000 | mutex_lock(&dev->struct_mutex); |
3032 | intel_update_fbc(dev); |
3001 | intel_update_fbc(dev); |
3033 | mutex_unlock(&dev->struct_mutex); |
3002 | mutex_unlock(&dev->struct_mutex); |
3034 | 3003 | ||
3035 | // intel_crtc_update_cursor(crtc, true); |
3004 | // intel_crtc_update_cursor(crtc, true); |
3036 | } |
3005 | } |
3037 | 3006 | ||
3038 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3007 | static void ironlake_crtc_disable(struct drm_crtc *crtc) |
3039 | { |
3008 | { |
3040 | struct drm_device *dev = crtc->dev; |
3009 | struct drm_device *dev = crtc->dev; |
3041 | struct drm_i915_private *dev_priv = dev->dev_private; |
3010 | struct drm_i915_private *dev_priv = dev->dev_private; |
3042 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3011 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3043 | int pipe = intel_crtc->pipe; |
3012 | int pipe = intel_crtc->pipe; |
3044 | int plane = intel_crtc->plane; |
3013 | int plane = intel_crtc->plane; |
3045 | u32 reg, temp; |
3014 | u32 reg, temp; |
3046 | 3015 | ||
3047 | if (!intel_crtc->active) |
3016 | if (!intel_crtc->active) |
3048 | return; |
3017 | return; |
3049 | 3018 | ||
3050 | ENTER(); |
3019 | ENTER(); |
3051 | 3020 | ||
3052 | intel_crtc_wait_for_pending_flips(crtc); |
3021 | intel_crtc_wait_for_pending_flips(crtc); |
3053 | // drm_vblank_off(dev, pipe); |
3022 | // drm_vblank_off(dev, pipe); |
3054 | // intel_crtc_update_cursor(crtc, false); |
3023 | // intel_crtc_update_cursor(crtc, false); |
3055 | 3024 | ||
3056 | intel_disable_plane(dev_priv, plane, pipe); |
3025 | intel_disable_plane(dev_priv, plane, pipe); |
3057 | 3026 | ||
3058 | if (dev_priv->cfb_plane == plane) |
3027 | if (dev_priv->cfb_plane == plane) |
3059 | intel_disable_fbc(dev); |
3028 | intel_disable_fbc(dev); |
3060 | 3029 | ||
3061 | intel_disable_pipe(dev_priv, pipe); |
3030 | intel_disable_pipe(dev_priv, pipe); |
3062 | 3031 | ||
3063 | /* Disable PF */ |
3032 | /* Disable PF */ |
3064 | I915_WRITE(PF_CTL(pipe), 0); |
3033 | I915_WRITE(PF_CTL(pipe), 0); |
3065 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3034 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
3066 | 3035 | ||
3067 | ironlake_fdi_disable(crtc); |
3036 | ironlake_fdi_disable(crtc); |
3068 | 3037 | ||
3069 | /* This is a horrible layering violation; we should be doing this in |
3038 | /* This is a horrible layering violation; we should be doing this in |
3070 | * the connector/encoder ->prepare instead, but we don't always have |
3039 | * the connector/encoder ->prepare instead, but we don't always have |
3071 | * enough information there about the config to know whether it will |
3040 | * enough information there about the config to know whether it will |
3072 | * actually be necessary or just cause undesired flicker. |
3041 | * actually be necessary or just cause undesired flicker. |
3073 | */ |
3042 | */ |
3074 | intel_disable_pch_ports(dev_priv, pipe); |
3043 | intel_disable_pch_ports(dev_priv, pipe); |
3075 | 3044 | ||
3076 | intel_disable_transcoder(dev_priv, pipe); |
3045 | intel_disable_transcoder(dev_priv, pipe); |
3077 | 3046 | ||
3078 | if (HAS_PCH_CPT(dev)) { |
3047 | if (HAS_PCH_CPT(dev)) { |
3079 | /* disable TRANS_DP_CTL */ |
3048 | /* disable TRANS_DP_CTL */ |
3080 | reg = TRANS_DP_CTL(pipe); |
3049 | reg = TRANS_DP_CTL(pipe); |
3081 | temp = I915_READ(reg); |
3050 | temp = I915_READ(reg); |
3082 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); |
3051 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); |
3083 | temp |= TRANS_DP_PORT_SEL_NONE; |
3052 | temp |= TRANS_DP_PORT_SEL_NONE; |
3084 | I915_WRITE(reg, temp); |
3053 | I915_WRITE(reg, temp); |
3085 | 3054 | ||
3086 | /* disable DPLL_SEL */ |
3055 | /* disable DPLL_SEL */ |
3087 | temp = I915_READ(PCH_DPLL_SEL); |
3056 | temp = I915_READ(PCH_DPLL_SEL); |
3088 | switch (pipe) { |
3057 | switch (pipe) { |
3089 | case 0: |
3058 | case 0: |
3090 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3059 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3091 | break; |
3060 | break; |
3092 | case 1: |
3061 | case 1: |
3093 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3062 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3094 | break; |
3063 | break; |
3095 | case 2: |
3064 | case 2: |
3096 | /* C shares PLL A or B */ |
3065 | /* C shares PLL A or B */ |
3097 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3066 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3098 | break; |
3067 | break; |
3099 | default: |
3068 | default: |
3100 | BUG(); /* wtf */ |
3069 | BUG(); /* wtf */ |
3101 | } |
3070 | } |
3102 | I915_WRITE(PCH_DPLL_SEL, temp); |
3071 | I915_WRITE(PCH_DPLL_SEL, temp); |
3103 | } |
3072 | } |
3104 | 3073 | ||
3105 | /* disable PCH DPLL */ |
3074 | /* disable PCH DPLL */ |
3106 | if (!intel_crtc->no_pll) |
3075 | if (!intel_crtc->no_pll) |
3107 | intel_disable_pch_pll(dev_priv, pipe); |
3076 | intel_disable_pch_pll(dev_priv, pipe); |
3108 | 3077 | ||
3109 | /* Switch from PCDclk to Rawclk */ |
3078 | /* Switch from PCDclk to Rawclk */ |
3110 | reg = FDI_RX_CTL(pipe); |
3079 | reg = FDI_RX_CTL(pipe); |
3111 | temp = I915_READ(reg); |
3080 | temp = I915_READ(reg); |
3112 | I915_WRITE(reg, temp & ~FDI_PCDCLK); |
3081 | I915_WRITE(reg, temp & ~FDI_PCDCLK); |
3113 | 3082 | ||
3114 | /* Disable CPU FDI TX PLL */ |
3083 | /* Disable CPU FDI TX PLL */ |
3115 | reg = FDI_TX_CTL(pipe); |
3084 | reg = FDI_TX_CTL(pipe); |
3116 | temp = I915_READ(reg); |
3085 | temp = I915_READ(reg); |
3117 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); |
3086 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); |
3118 | 3087 | ||
3119 | POSTING_READ(reg); |
3088 | POSTING_READ(reg); |
3120 | udelay(100); |
3089 | udelay(100); |
3121 | 3090 | ||
3122 | reg = FDI_RX_CTL(pipe); |
3091 | reg = FDI_RX_CTL(pipe); |
3123 | temp = I915_READ(reg); |
3092 | temp = I915_READ(reg); |
3124 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); |
3093 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); |
3125 | 3094 | ||
3126 | /* Wait for the clocks to turn off. */ |
3095 | /* Wait for the clocks to turn off. */ |
3127 | POSTING_READ(reg); |
3096 | POSTING_READ(reg); |
3128 | udelay(100); |
3097 | udelay(100); |
3129 | 3098 | ||
3130 | intel_crtc->active = false; |
3099 | intel_crtc->active = false; |
3131 | intel_update_watermarks(dev); |
3100 | intel_update_watermarks(dev); |
3132 | 3101 | ||
3133 | mutex_lock(&dev->struct_mutex); |
3102 | mutex_lock(&dev->struct_mutex); |
3134 | intel_update_fbc(dev); |
3103 | intel_update_fbc(dev); |
3135 | intel_clear_scanline_wait(dev); |
3104 | intel_clear_scanline_wait(dev); |
3136 | mutex_unlock(&dev->struct_mutex); |
3105 | mutex_unlock(&dev->struct_mutex); |
3137 | 3106 | ||
3138 | LEAVE(); |
3107 | LEAVE(); |
3139 | 3108 | ||
3140 | } |
3109 | } |
3141 | 3110 | ||
3142 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
3111 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
3143 | { |
3112 | { |
3144 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3113 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3145 | int pipe = intel_crtc->pipe; |
3114 | int pipe = intel_crtc->pipe; |
3146 | int plane = intel_crtc->plane; |
3115 | int plane = intel_crtc->plane; |
3147 | 3116 | ||
3148 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
3117 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
3149 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
3118 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
3150 | */ |
3119 | */ |
3151 | switch (mode) { |
3120 | switch (mode) { |
3152 | case DRM_MODE_DPMS_ON: |
3121 | case DRM_MODE_DPMS_ON: |
3153 | case DRM_MODE_DPMS_STANDBY: |
3122 | case DRM_MODE_DPMS_STANDBY: |
3154 | case DRM_MODE_DPMS_SUSPEND: |
3123 | case DRM_MODE_DPMS_SUSPEND: |
3155 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); |
3124 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); |
3156 | ironlake_crtc_enable(crtc); |
3125 | ironlake_crtc_enable(crtc); |
3157 | break; |
3126 | break; |
3158 | 3127 | ||
3159 | case DRM_MODE_DPMS_OFF: |
3128 | case DRM_MODE_DPMS_OFF: |
3160 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); |
3129 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); |
3161 | ironlake_crtc_disable(crtc); |
3130 | ironlake_crtc_disable(crtc); |
3162 | break; |
3131 | break; |
3163 | } |
3132 | } |
3164 | } |
3133 | } |
3165 | 3134 | ||
3166 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3135 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3167 | { |
3136 | { |
3168 | if (!enable && intel_crtc->overlay) { |
3137 | if (!enable && intel_crtc->overlay) { |
3169 | struct drm_device *dev = intel_crtc->base.dev; |
3138 | struct drm_device *dev = intel_crtc->base.dev; |
3170 | struct drm_i915_private *dev_priv = dev->dev_private; |
3139 | struct drm_i915_private *dev_priv = dev->dev_private; |
3171 | 3140 | ||
3172 | mutex_lock(&dev->struct_mutex); |
3141 | mutex_lock(&dev->struct_mutex); |
3173 | dev_priv->mm.interruptible = false; |
3142 | dev_priv->mm.interruptible = false; |
3174 | // (void) intel_overlay_switch_off(intel_crtc->overlay); |
3143 | // (void) intel_overlay_switch_off(intel_crtc->overlay); |
3175 | dev_priv->mm.interruptible = true; |
3144 | dev_priv->mm.interruptible = true; |
3176 | mutex_unlock(&dev->struct_mutex); |
3145 | mutex_unlock(&dev->struct_mutex); |
3177 | } |
3146 | } |
3178 | 3147 | ||
3179 | /* Let userspace switch the overlay on again. In most cases userspace |
3148 | /* Let userspace switch the overlay on again. In most cases userspace |
3180 | * has to recompute where to put it anyway. |
3149 | * has to recompute where to put it anyway. |
3181 | */ |
3150 | */ |
3182 | } |
3151 | } |
3183 | 3152 | ||
3184 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
3153 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
3185 | { |
3154 | { |
3186 | struct drm_device *dev = crtc->dev; |
3155 | struct drm_device *dev = crtc->dev; |
3187 | struct drm_i915_private *dev_priv = dev->dev_private; |
3156 | struct drm_i915_private *dev_priv = dev->dev_private; |
3188 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3157 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3189 | int pipe = intel_crtc->pipe; |
3158 | int pipe = intel_crtc->pipe; |
3190 | int plane = intel_crtc->plane; |
3159 | int plane = intel_crtc->plane; |
3191 | 3160 | ||
3192 | if (intel_crtc->active) |
3161 | if (intel_crtc->active) |
3193 | return; |
3162 | return; |
3194 | 3163 | ||
3195 | intel_crtc->active = true; |
3164 | intel_crtc->active = true; |
3196 | intel_update_watermarks(dev); |
3165 | intel_update_watermarks(dev); |
3197 | 3166 | ||
3198 | intel_enable_pll(dev_priv, pipe); |
3167 | intel_enable_pll(dev_priv, pipe); |
3199 | intel_enable_pipe(dev_priv, pipe, false); |
3168 | intel_enable_pipe(dev_priv, pipe, false); |
3200 | intel_enable_plane(dev_priv, plane, pipe); |
3169 | intel_enable_plane(dev_priv, plane, pipe); |
3201 | 3170 | ||
3202 | intel_crtc_load_lut(crtc); |
3171 | intel_crtc_load_lut(crtc); |
3203 | intel_update_fbc(dev); |
3172 | intel_update_fbc(dev); |
3204 | 3173 | ||
3205 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
3174 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
3206 | intel_crtc_dpms_overlay(intel_crtc, true); |
3175 | intel_crtc_dpms_overlay(intel_crtc, true); |
3207 | // intel_crtc_update_cursor(crtc, true); |
3176 | // intel_crtc_update_cursor(crtc, true); |
3208 | } |
3177 | } |
3209 | 3178 | ||
3210 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3179 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3211 | { |
3180 | { |
3212 | struct drm_device *dev = crtc->dev; |
3181 | struct drm_device *dev = crtc->dev; |
3213 | struct drm_i915_private *dev_priv = dev->dev_private; |
3182 | struct drm_i915_private *dev_priv = dev->dev_private; |
3214 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3215 | int pipe = intel_crtc->pipe; |
3184 | int pipe = intel_crtc->pipe; |
3216 | int plane = intel_crtc->plane; |
3185 | int plane = intel_crtc->plane; |
3217 | 3186 | ||
3218 | if (!intel_crtc->active) |
3187 | if (!intel_crtc->active) |
3219 | return; |
3188 | return; |
3220 | 3189 | ||
3221 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
3190 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
3222 | intel_crtc_wait_for_pending_flips(crtc); |
3191 | intel_crtc_wait_for_pending_flips(crtc); |
3223 | // drm_vblank_off(dev, pipe); |
3192 | // drm_vblank_off(dev, pipe); |
3224 | intel_crtc_dpms_overlay(intel_crtc, false); |
3193 | intel_crtc_dpms_overlay(intel_crtc, false); |
3225 | // intel_crtc_update_cursor(crtc, false); |
3194 | // intel_crtc_update_cursor(crtc, false); |
3226 | 3195 | ||
3227 | if (dev_priv->cfb_plane == plane) |
3196 | if (dev_priv->cfb_plane == plane) |
3228 | intel_disable_fbc(dev); |
3197 | intel_disable_fbc(dev); |
3229 | 3198 | ||
3230 | intel_disable_plane(dev_priv, plane, pipe); |
3199 | intel_disable_plane(dev_priv, plane, pipe); |
3231 | intel_disable_pipe(dev_priv, pipe); |
3200 | intel_disable_pipe(dev_priv, pipe); |
3232 | intel_disable_pll(dev_priv, pipe); |
3201 | intel_disable_pll(dev_priv, pipe); |
3233 | 3202 | ||
3234 | intel_crtc->active = false; |
3203 | intel_crtc->active = false; |
3235 | intel_update_fbc(dev); |
3204 | intel_update_fbc(dev); |
3236 | intel_update_watermarks(dev); |
3205 | intel_update_watermarks(dev); |
3237 | intel_clear_scanline_wait(dev); |
3206 | intel_clear_scanline_wait(dev); |
3238 | } |
3207 | } |
3239 | 3208 | ||
3240 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) |
3209 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) |
3241 | { |
3210 | { |
3242 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
3211 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
3243 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
3212 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
3244 | */ |
3213 | */ |
3245 | switch (mode) { |
3214 | switch (mode) { |
3246 | case DRM_MODE_DPMS_ON: |
3215 | case DRM_MODE_DPMS_ON: |
3247 | case DRM_MODE_DPMS_STANDBY: |
3216 | case DRM_MODE_DPMS_STANDBY: |
3248 | case DRM_MODE_DPMS_SUSPEND: |
3217 | case DRM_MODE_DPMS_SUSPEND: |
3249 | i9xx_crtc_enable(crtc); |
3218 | i9xx_crtc_enable(crtc); |
3250 | break; |
3219 | break; |
3251 | case DRM_MODE_DPMS_OFF: |
3220 | case DRM_MODE_DPMS_OFF: |
3252 | i9xx_crtc_disable(crtc); |
3221 | i9xx_crtc_disable(crtc); |
3253 | break; |
3222 | break; |
3254 | } |
3223 | } |
3255 | } |
3224 | } |
3256 | 3225 | ||
3257 | /** |
3226 | /** |
3258 | * Sets the power management mode of the pipe and plane. |
3227 | * Sets the power management mode of the pipe and plane. |
3259 | */ |
3228 | */ |
3260 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) |
3229 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) |
3261 | { |
3230 | { |
3262 | struct drm_device *dev = crtc->dev; |
3231 | struct drm_device *dev = crtc->dev; |
3263 | struct drm_i915_private *dev_priv = dev->dev_private; |
3232 | struct drm_i915_private *dev_priv = dev->dev_private; |
3264 | struct drm_i915_master_private *master_priv; |
3233 | struct drm_i915_master_private *master_priv; |
3265 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3234 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3266 | int pipe = intel_crtc->pipe; |
3235 | int pipe = intel_crtc->pipe; |
3267 | bool enabled; |
3236 | bool enabled; |
3268 | 3237 | ||
3269 | if (intel_crtc->dpms_mode == mode) |
3238 | if (intel_crtc->dpms_mode == mode) |
3270 | return; |
3239 | return; |
3271 | 3240 | ||
3272 | intel_crtc->dpms_mode = mode; |
3241 | intel_crtc->dpms_mode = mode; |
3273 | 3242 | ||
3274 | dev_priv->display.dpms(crtc, mode); |
3243 | dev_priv->display.dpms(crtc, mode); |
3275 | 3244 | ||
3276 | #if 0 |
3245 | #if 0 |
3277 | if (!dev->primary->master) |
3246 | if (!dev->primary->master) |
3278 | return; |
3247 | return; |
3279 | 3248 | ||
3280 | master_priv = dev->primary->master->driver_priv; |
3249 | master_priv = dev->primary->master->driver_priv; |
3281 | if (!master_priv->sarea_priv) |
3250 | if (!master_priv->sarea_priv) |
3282 | return; |
3251 | return; |
3283 | 3252 | ||
3284 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; |
3253 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; |
3285 | 3254 | ||
3286 | switch (pipe) { |
3255 | switch (pipe) { |
3287 | case 0: |
3256 | case 0: |
3288 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; |
3257 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; |
3289 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; |
3258 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; |
3290 | break; |
3259 | break; |
3291 | case 1: |
3260 | case 1: |
3292 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; |
3261 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; |
3293 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
3262 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; |
3294 | break; |
3263 | break; |
3295 | default: |
3264 | default: |
3296 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
3265 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
3297 | break; |
3266 | break; |
3298 | } |
3267 | } |
3299 | #endif |
3268 | #endif |
3300 | 3269 | ||
3301 | } |
3270 | } |
3302 | 3271 | ||
3303 | static void intel_crtc_disable(struct drm_crtc *crtc) |
3272 | static void intel_crtc_disable(struct drm_crtc *crtc) |
3304 | { |
3273 | { |
3305 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
3274 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
3306 | struct drm_device *dev = crtc->dev; |
3275 | struct drm_device *dev = crtc->dev; |
3307 | 3276 | ||
3308 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
3277 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
3309 | 3278 | ||
3310 | if (crtc->fb) { |
3279 | if (crtc->fb) { |
3311 | mutex_lock(&dev->struct_mutex); |
3280 | mutex_lock(&dev->struct_mutex); |
3312 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
3281 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
3313 | mutex_unlock(&dev->struct_mutex); |
3282 | mutex_unlock(&dev->struct_mutex); |
3314 | } |
3283 | } |
3315 | } |
3284 | } |
3316 | 3285 | ||
3317 | /* Prepare for a mode set. |
3286 | /* Prepare for a mode set. |
3318 | * |
3287 | * |
3319 | * Note we could be a lot smarter here. We need to figure out which outputs |
3288 | * Note we could be a lot smarter here. We need to figure out which outputs |
3320 | * will be enabled, which disabled (in short, how the config will changes) |
3289 | * will be enabled, which disabled (in short, how the config will changes) |
3321 | * and perform the minimum necessary steps to accomplish that, e.g. updating |
3290 | * and perform the minimum necessary steps to accomplish that, e.g. updating |
3322 | * watermarks, FBC configuration, making sure PLLs are programmed correctly, |
3291 | * watermarks, FBC configuration, making sure PLLs are programmed correctly, |
3323 | * panel fitting is in the proper state, etc. |
3292 | * panel fitting is in the proper state, etc. |
3324 | */ |
3293 | */ |
3325 | static void i9xx_crtc_prepare(struct drm_crtc *crtc) |
3294 | static void i9xx_crtc_prepare(struct drm_crtc *crtc) |
3326 | { |
3295 | { |
3327 | i9xx_crtc_disable(crtc); |
3296 | i9xx_crtc_disable(crtc); |
3328 | } |
3297 | } |
3329 | 3298 | ||
3330 | static void i9xx_crtc_commit(struct drm_crtc *crtc) |
3299 | static void i9xx_crtc_commit(struct drm_crtc *crtc) |
3331 | { |
3300 | { |
3332 | i9xx_crtc_enable(crtc); |
3301 | i9xx_crtc_enable(crtc); |
3333 | } |
3302 | } |
3334 | 3303 | ||
3335 | static void ironlake_crtc_prepare(struct drm_crtc *crtc) |
3304 | static void ironlake_crtc_prepare(struct drm_crtc *crtc) |
3336 | { |
3305 | { |
3337 | ironlake_crtc_disable(crtc); |
3306 | ironlake_crtc_disable(crtc); |
3338 | } |
3307 | } |
3339 | 3308 | ||
3340 | static void ironlake_crtc_commit(struct drm_crtc *crtc) |
3309 | static void ironlake_crtc_commit(struct drm_crtc *crtc) |
3341 | { |
3310 | { |
3342 | ironlake_crtc_enable(crtc); |
3311 | ironlake_crtc_enable(crtc); |
3343 | } |
3312 | } |
3344 | 3313 | ||
3345 | void intel_encoder_prepare(struct drm_encoder *encoder) |
3314 | void intel_encoder_prepare(struct drm_encoder *encoder) |
3346 | { |
3315 | { |
3347 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3316 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3348 | /* lvds has its own version of prepare see intel_lvds_prepare */ |
3317 | /* lvds has its own version of prepare see intel_lvds_prepare */ |
3349 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
3318 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
3350 | } |
3319 | } |
3351 | 3320 | ||
3352 | void intel_encoder_commit(struct drm_encoder *encoder) |
3321 | void intel_encoder_commit(struct drm_encoder *encoder) |
3353 | { |
3322 | { |
3354 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3323 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3355 | struct drm_device *dev = encoder->dev; |
3324 | struct drm_device *dev = encoder->dev; |
3356 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3325 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3357 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); |
3326 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); |
3358 | 3327 | ||
3359 | /* lvds has its own version of commit see intel_lvds_commit */ |
3328 | /* lvds has its own version of commit see intel_lvds_commit */ |
3360 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
3329 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
3361 | 3330 | ||
3362 | if (HAS_PCH_CPT(dev)) |
3331 | if (HAS_PCH_CPT(dev)) |
3363 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); |
3332 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); |
3364 | } |
3333 | } |
3365 | 3334 | ||
3366 | void intel_encoder_destroy(struct drm_encoder *encoder) |
3335 | void intel_encoder_destroy(struct drm_encoder *encoder) |
3367 | { |
3336 | { |
3368 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3337 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3369 | 3338 | ||
3370 | drm_encoder_cleanup(encoder); |
3339 | drm_encoder_cleanup(encoder); |
3371 | kfree(intel_encoder); |
3340 | kfree(intel_encoder); |
3372 | } |
3341 | } |
3373 | 3342 | ||
3374 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
3343 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
3375 | struct drm_display_mode *mode, |
3344 | struct drm_display_mode *mode, |
3376 | struct drm_display_mode *adjusted_mode) |
3345 | struct drm_display_mode *adjusted_mode) |
3377 | { |
3346 | { |
3378 | struct drm_device *dev = crtc->dev; |
3347 | struct drm_device *dev = crtc->dev; |
3379 | 3348 | ||
3380 | if (HAS_PCH_SPLIT(dev)) { |
3349 | if (HAS_PCH_SPLIT(dev)) { |
3381 | /* FDI link clock is fixed at 2.7G */ |
3350 | /* FDI link clock is fixed at 2.7G */ |
3382 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
3351 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
3383 | return false; |
3352 | return false; |
3384 | } |
3353 | } |
3385 | 3354 | ||
3386 | /* XXX some encoders set the crtcinfo, others don't. |
3355 | /* XXX some encoders set the crtcinfo, others don't. |
3387 | * Obviously we need some form of conflict resolution here... |
3356 | * Obviously we need some form of conflict resolution here... |
3388 | */ |
3357 | */ |
3389 | if (adjusted_mode->crtc_htotal == 0) |
3358 | if (adjusted_mode->crtc_htotal == 0) |
3390 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3359 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
3391 | 3360 | ||
3392 | return true; |
3361 | return true; |
3393 | } |
3362 | } |
3394 | 3363 | ||
3395 | static int i945_get_display_clock_speed(struct drm_device *dev) |
3364 | static int i945_get_display_clock_speed(struct drm_device *dev) |
3396 | { |
3365 | { |
3397 | return 400000; |
3366 | return 400000; |
3398 | } |
3367 | } |
3399 | 3368 | ||
3400 | static int i915_get_display_clock_speed(struct drm_device *dev) |
3369 | static int i915_get_display_clock_speed(struct drm_device *dev) |
3401 | { |
3370 | { |
3402 | return 333000; |
3371 | return 333000; |
3403 | } |
3372 | } |
3404 | 3373 | ||
3405 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
3374 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
3406 | { |
3375 | { |
3407 | return 200000; |
3376 | return 200000; |
3408 | } |
3377 | } |
3409 | 3378 | ||
3410 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
3379 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
3411 | { |
3380 | { |
3412 | u16 gcfgc = 0; |
3381 | u16 gcfgc = 0; |
3413 | 3382 | ||
3414 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
3383 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
3415 | 3384 | ||
3416 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
3385 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) |
3417 | return 133000; |
3386 | return 133000; |
3418 | else { |
3387 | else { |
3419 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
3388 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { |
3420 | case GC_DISPLAY_CLOCK_333_MHZ: |
3389 | case GC_DISPLAY_CLOCK_333_MHZ: |
3421 | return 333000; |
3390 | return 333000; |
3422 | default: |
3391 | default: |
3423 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
3392 | case GC_DISPLAY_CLOCK_190_200_MHZ: |
3424 | return 190000; |
3393 | return 190000; |
3425 | } |
3394 | } |
3426 | } |
3395 | } |
3427 | } |
3396 | } |
3428 | 3397 | ||
3429 | static int i865_get_display_clock_speed(struct drm_device *dev) |
3398 | static int i865_get_display_clock_speed(struct drm_device *dev) |
3430 | { |
3399 | { |
3431 | return 266000; |
3400 | return 266000; |
3432 | } |
3401 | } |
3433 | 3402 | ||
3434 | static int i855_get_display_clock_speed(struct drm_device *dev) |
3403 | static int i855_get_display_clock_speed(struct drm_device *dev) |
3435 | { |
3404 | { |
3436 | u16 hpllcc = 0; |
3405 | u16 hpllcc = 0; |
3437 | /* Assume that the hardware is in the high speed state. This |
3406 | /* Assume that the hardware is in the high speed state. This |
3438 | * should be the default. |
3407 | * should be the default. |
3439 | */ |
3408 | */ |
3440 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
3409 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { |
3441 | case GC_CLOCK_133_200: |
3410 | case GC_CLOCK_133_200: |
3442 | case GC_CLOCK_100_200: |
3411 | case GC_CLOCK_100_200: |
3443 | return 200000; |
3412 | return 200000; |
3444 | case GC_CLOCK_166_250: |
3413 | case GC_CLOCK_166_250: |
3445 | return 250000; |
3414 | return 250000; |
3446 | case GC_CLOCK_100_133: |
3415 | case GC_CLOCK_100_133: |
3447 | return 133000; |
3416 | return 133000; |
3448 | } |
3417 | } |
3449 | 3418 | ||
3450 | /* Shouldn't happen */ |
3419 | /* Shouldn't happen */ |
3451 | return 0; |
3420 | return 0; |
3452 | } |
3421 | } |
3453 | 3422 | ||
3454 | static int i830_get_display_clock_speed(struct drm_device *dev) |
3423 | static int i830_get_display_clock_speed(struct drm_device *dev) |
3455 | { |
3424 | { |
3456 | return 133000; |
3425 | return 133000; |
3457 | } |
3426 | } |
3458 | 3427 | ||
3459 | struct fdi_m_n { |
3428 | struct fdi_m_n { |
3460 | u32 tu; |
3429 | u32 tu; |
3461 | u32 gmch_m; |
3430 | u32 gmch_m; |
3462 | u32 gmch_n; |
3431 | u32 gmch_n; |
3463 | u32 link_m; |
3432 | u32 link_m; |
3464 | u32 link_n; |
3433 | u32 link_n; |
3465 | }; |
3434 | }; |
3466 | 3435 | ||
3467 | static void |
3436 | static void |
3468 | fdi_reduce_ratio(u32 *num, u32 *den) |
3437 | fdi_reduce_ratio(u32 *num, u32 *den) |
3469 | { |
3438 | { |
3470 | while (*num > 0xffffff || *den > 0xffffff) { |
3439 | while (*num > 0xffffff || *den > 0xffffff) { |
3471 | *num >>= 1; |
3440 | *num >>= 1; |
3472 | *den >>= 1; |
3441 | *den >>= 1; |
3473 | } |
3442 | } |
3474 | } |
3443 | } |
3475 | 3444 | ||
3476 | static void |
3445 | static void |
3477 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
3446 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
3478 | int link_clock, struct fdi_m_n *m_n) |
3447 | int link_clock, struct fdi_m_n *m_n) |
3479 | { |
3448 | { |
3480 | m_n->tu = 64; /* default size */ |
3449 | m_n->tu = 64; /* default size */ |
3481 | 3450 | ||
3482 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
3451 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
3483 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
3452 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
3484 | m_n->gmch_n = link_clock * nlanes * 8; |
3453 | m_n->gmch_n = link_clock * nlanes * 8; |
3485 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
3454 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
3486 | 3455 | ||
3487 | m_n->link_m = pixel_clock; |
3456 | m_n->link_m = pixel_clock; |
3488 | m_n->link_n = link_clock; |
3457 | m_n->link_n = link_clock; |
3489 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
3458 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
3490 | } |
3459 | } |
3491 | 3460 | ||
3492 | 3461 | ||
3493 | struct intel_watermark_params { |
3462 | struct intel_watermark_params { |
3494 | unsigned long fifo_size; |
3463 | unsigned long fifo_size; |
3495 | unsigned long max_wm; |
3464 | unsigned long max_wm; |
3496 | unsigned long default_wm; |
3465 | unsigned long default_wm; |
3497 | unsigned long guard_size; |
3466 | unsigned long guard_size; |
3498 | unsigned long cacheline_size; |
3467 | unsigned long cacheline_size; |
3499 | }; |
3468 | }; |
3500 | 3469 | ||
3501 | /* Pineview has different values for various configs */ |
3470 | /* Pineview has different values for various configs */ |
3502 | static const struct intel_watermark_params pineview_display_wm = { |
3471 | static const struct intel_watermark_params pineview_display_wm = { |
3503 | PINEVIEW_DISPLAY_FIFO, |
3472 | PINEVIEW_DISPLAY_FIFO, |
3504 | PINEVIEW_MAX_WM, |
3473 | PINEVIEW_MAX_WM, |
3505 | PINEVIEW_DFT_WM, |
3474 | PINEVIEW_DFT_WM, |
3506 | PINEVIEW_GUARD_WM, |
3475 | PINEVIEW_GUARD_WM, |
3507 | PINEVIEW_FIFO_LINE_SIZE |
3476 | PINEVIEW_FIFO_LINE_SIZE |
3508 | }; |
3477 | }; |
3509 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
3478 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
3510 | PINEVIEW_DISPLAY_FIFO, |
3479 | PINEVIEW_DISPLAY_FIFO, |
3511 | PINEVIEW_MAX_WM, |
3480 | PINEVIEW_MAX_WM, |
3512 | PINEVIEW_DFT_HPLLOFF_WM, |
3481 | PINEVIEW_DFT_HPLLOFF_WM, |
3513 | PINEVIEW_GUARD_WM, |
3482 | PINEVIEW_GUARD_WM, |
3514 | PINEVIEW_FIFO_LINE_SIZE |
3483 | PINEVIEW_FIFO_LINE_SIZE |
3515 | }; |
3484 | }; |
3516 | static const struct intel_watermark_params pineview_cursor_wm = { |
3485 | static const struct intel_watermark_params pineview_cursor_wm = { |
3517 | PINEVIEW_CURSOR_FIFO, |
3486 | PINEVIEW_CURSOR_FIFO, |
3518 | PINEVIEW_CURSOR_MAX_WM, |
3487 | PINEVIEW_CURSOR_MAX_WM, |
3519 | PINEVIEW_CURSOR_DFT_WM, |
3488 | PINEVIEW_CURSOR_DFT_WM, |
3520 | PINEVIEW_CURSOR_GUARD_WM, |
3489 | PINEVIEW_CURSOR_GUARD_WM, |
3521 | PINEVIEW_FIFO_LINE_SIZE, |
3490 | PINEVIEW_FIFO_LINE_SIZE, |
3522 | }; |
3491 | }; |
3523 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
3492 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
3524 | PINEVIEW_CURSOR_FIFO, |
3493 | PINEVIEW_CURSOR_FIFO, |
3525 | PINEVIEW_CURSOR_MAX_WM, |
3494 | PINEVIEW_CURSOR_MAX_WM, |
3526 | PINEVIEW_CURSOR_DFT_WM, |
3495 | PINEVIEW_CURSOR_DFT_WM, |
3527 | PINEVIEW_CURSOR_GUARD_WM, |
3496 | PINEVIEW_CURSOR_GUARD_WM, |
3528 | PINEVIEW_FIFO_LINE_SIZE |
3497 | PINEVIEW_FIFO_LINE_SIZE |
3529 | }; |
3498 | }; |
3530 | static const struct intel_watermark_params g4x_wm_info = { |
3499 | static const struct intel_watermark_params g4x_wm_info = { |
3531 | G4X_FIFO_SIZE, |
3500 | G4X_FIFO_SIZE, |
3532 | G4X_MAX_WM, |
3501 | G4X_MAX_WM, |
3533 | G4X_MAX_WM, |
3502 | G4X_MAX_WM, |
3534 | 2, |
3503 | 2, |
3535 | G4X_FIFO_LINE_SIZE, |
3504 | G4X_FIFO_LINE_SIZE, |
3536 | }; |
3505 | }; |
3537 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
3506 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
3538 | I965_CURSOR_FIFO, |
3507 | I965_CURSOR_FIFO, |
3539 | I965_CURSOR_MAX_WM, |
3508 | I965_CURSOR_MAX_WM, |
3540 | I965_CURSOR_DFT_WM, |
3509 | I965_CURSOR_DFT_WM, |
3541 | 2, |
3510 | 2, |
3542 | G4X_FIFO_LINE_SIZE, |
3511 | G4X_FIFO_LINE_SIZE, |
3543 | }; |
3512 | }; |
3544 | static const struct intel_watermark_params i965_cursor_wm_info = { |
3513 | static const struct intel_watermark_params i965_cursor_wm_info = { |
3545 | I965_CURSOR_FIFO, |
3514 | I965_CURSOR_FIFO, |
3546 | I965_CURSOR_MAX_WM, |
3515 | I965_CURSOR_MAX_WM, |
3547 | I965_CURSOR_DFT_WM, |
3516 | I965_CURSOR_DFT_WM, |
3548 | 2, |
3517 | 2, |
3549 | I915_FIFO_LINE_SIZE, |
3518 | I915_FIFO_LINE_SIZE, |
3550 | }; |
3519 | }; |
3551 | static const struct intel_watermark_params i945_wm_info = { |
3520 | static const struct intel_watermark_params i945_wm_info = { |
3552 | I945_FIFO_SIZE, |
3521 | I945_FIFO_SIZE, |
3553 | I915_MAX_WM, |
3522 | I915_MAX_WM, |
3554 | 1, |
3523 | 1, |
3555 | 2, |
3524 | 2, |
3556 | I915_FIFO_LINE_SIZE |
3525 | I915_FIFO_LINE_SIZE |
3557 | }; |
3526 | }; |
3558 | static const struct intel_watermark_params i915_wm_info = { |
3527 | static const struct intel_watermark_params i915_wm_info = { |
3559 | I915_FIFO_SIZE, |
3528 | I915_FIFO_SIZE, |
3560 | I915_MAX_WM, |
3529 | I915_MAX_WM, |
3561 | 1, |
3530 | 1, |
3562 | 2, |
3531 | 2, |
3563 | I915_FIFO_LINE_SIZE |
3532 | I915_FIFO_LINE_SIZE |
3564 | }; |
3533 | }; |
3565 | static const struct intel_watermark_params i855_wm_info = { |
3534 | static const struct intel_watermark_params i855_wm_info = { |
3566 | I855GM_FIFO_SIZE, |
3535 | I855GM_FIFO_SIZE, |
3567 | I915_MAX_WM, |
3536 | I915_MAX_WM, |
3568 | 1, |
3537 | 1, |
3569 | 2, |
3538 | 2, |
3570 | I830_FIFO_LINE_SIZE |
3539 | I830_FIFO_LINE_SIZE |
3571 | }; |
3540 | }; |
3572 | static const struct intel_watermark_params i830_wm_info = { |
3541 | static const struct intel_watermark_params i830_wm_info = { |
3573 | I830_FIFO_SIZE, |
3542 | I830_FIFO_SIZE, |
3574 | I915_MAX_WM, |
3543 | I915_MAX_WM, |
3575 | 1, |
3544 | 1, |
3576 | 2, |
3545 | 2, |
3577 | I830_FIFO_LINE_SIZE |
3546 | I830_FIFO_LINE_SIZE |
3578 | }; |
3547 | }; |
3579 | 3548 | ||
3580 | static const struct intel_watermark_params ironlake_display_wm_info = { |
3549 | static const struct intel_watermark_params ironlake_display_wm_info = { |
3581 | ILK_DISPLAY_FIFO, |
3550 | ILK_DISPLAY_FIFO, |
3582 | ILK_DISPLAY_MAXWM, |
3551 | ILK_DISPLAY_MAXWM, |
3583 | ILK_DISPLAY_DFTWM, |
3552 | ILK_DISPLAY_DFTWM, |
3584 | 2, |
3553 | 2, |
3585 | ILK_FIFO_LINE_SIZE |
3554 | ILK_FIFO_LINE_SIZE |
3586 | }; |
3555 | }; |
3587 | static const struct intel_watermark_params ironlake_cursor_wm_info = { |
3556 | static const struct intel_watermark_params ironlake_cursor_wm_info = { |
3588 | ILK_CURSOR_FIFO, |
3557 | ILK_CURSOR_FIFO, |
3589 | ILK_CURSOR_MAXWM, |
3558 | ILK_CURSOR_MAXWM, |
3590 | ILK_CURSOR_DFTWM, |
3559 | ILK_CURSOR_DFTWM, |
3591 | 2, |
3560 | 2, |
3592 | ILK_FIFO_LINE_SIZE |
3561 | ILK_FIFO_LINE_SIZE |
3593 | }; |
3562 | }; |
3594 | static const struct intel_watermark_params ironlake_display_srwm_info = { |
3563 | static const struct intel_watermark_params ironlake_display_srwm_info = { |
3595 | ILK_DISPLAY_SR_FIFO, |
3564 | ILK_DISPLAY_SR_FIFO, |
3596 | ILK_DISPLAY_MAX_SRWM, |
3565 | ILK_DISPLAY_MAX_SRWM, |
3597 | ILK_DISPLAY_DFT_SRWM, |
3566 | ILK_DISPLAY_DFT_SRWM, |
3598 | 2, |
3567 | 2, |
3599 | ILK_FIFO_LINE_SIZE |
3568 | ILK_FIFO_LINE_SIZE |
3600 | }; |
3569 | }; |
3601 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { |
3570 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { |
3602 | ILK_CURSOR_SR_FIFO, |
3571 | ILK_CURSOR_SR_FIFO, |
3603 | ILK_CURSOR_MAX_SRWM, |
3572 | ILK_CURSOR_MAX_SRWM, |
3604 | ILK_CURSOR_DFT_SRWM, |
3573 | ILK_CURSOR_DFT_SRWM, |
3605 | 2, |
3574 | 2, |
3606 | ILK_FIFO_LINE_SIZE |
3575 | ILK_FIFO_LINE_SIZE |
3607 | }; |
3576 | }; |
3608 | 3577 | ||
3609 | static const struct intel_watermark_params sandybridge_display_wm_info = { |
3578 | static const struct intel_watermark_params sandybridge_display_wm_info = { |
3610 | SNB_DISPLAY_FIFO, |
3579 | SNB_DISPLAY_FIFO, |
3611 | SNB_DISPLAY_MAXWM, |
3580 | SNB_DISPLAY_MAXWM, |
3612 | SNB_DISPLAY_DFTWM, |
3581 | SNB_DISPLAY_DFTWM, |
3613 | 2, |
3582 | 2, |
3614 | SNB_FIFO_LINE_SIZE |
3583 | SNB_FIFO_LINE_SIZE |
3615 | }; |
3584 | }; |
3616 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { |
3585 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { |
3617 | SNB_CURSOR_FIFO, |
3586 | SNB_CURSOR_FIFO, |
3618 | SNB_CURSOR_MAXWM, |
3587 | SNB_CURSOR_MAXWM, |
3619 | SNB_CURSOR_DFTWM, |
3588 | SNB_CURSOR_DFTWM, |
3620 | 2, |
3589 | 2, |
3621 | SNB_FIFO_LINE_SIZE |
3590 | SNB_FIFO_LINE_SIZE |
3622 | }; |
3591 | }; |
3623 | static const struct intel_watermark_params sandybridge_display_srwm_info = { |
3592 | static const struct intel_watermark_params sandybridge_display_srwm_info = { |
3624 | SNB_DISPLAY_SR_FIFO, |
3593 | SNB_DISPLAY_SR_FIFO, |
3625 | SNB_DISPLAY_MAX_SRWM, |
3594 | SNB_DISPLAY_MAX_SRWM, |
3626 | SNB_DISPLAY_DFT_SRWM, |
3595 | SNB_DISPLAY_DFT_SRWM, |
3627 | 2, |
3596 | 2, |
3628 | SNB_FIFO_LINE_SIZE |
3597 | SNB_FIFO_LINE_SIZE |
3629 | }; |
3598 | }; |
3630 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { |
3599 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { |
3631 | SNB_CURSOR_SR_FIFO, |
3600 | SNB_CURSOR_SR_FIFO, |
3632 | SNB_CURSOR_MAX_SRWM, |
3601 | SNB_CURSOR_MAX_SRWM, |
3633 | SNB_CURSOR_DFT_SRWM, |
3602 | SNB_CURSOR_DFT_SRWM, |
3634 | 2, |
3603 | 2, |
3635 | SNB_FIFO_LINE_SIZE |
3604 | SNB_FIFO_LINE_SIZE |
3636 | }; |
3605 | }; |
3637 | 3606 | ||
3638 | 3607 | ||
3639 | /** |
3608 | /** |
3640 | * intel_calculate_wm - calculate watermark level |
3609 | * intel_calculate_wm - calculate watermark level |
3641 | * @clock_in_khz: pixel clock |
3610 | * @clock_in_khz: pixel clock |
3642 | * @wm: chip FIFO params |
3611 | * @wm: chip FIFO params |
3643 | * @pixel_size: display pixel size |
3612 | * @pixel_size: display pixel size |
3644 | * @latency_ns: memory latency for the platform |
3613 | * @latency_ns: memory latency for the platform |
3645 | * |
3614 | * |
3646 | * Calculate the watermark level (the level at which the display plane will |
3615 | * Calculate the watermark level (the level at which the display plane will |
3647 | * start fetching from memory again). Each chip has a different display |
3616 | * start fetching from memory again). Each chip has a different display |
3648 | * FIFO size and allocation, so the caller needs to figure that out and pass |
3617 | * FIFO size and allocation, so the caller needs to figure that out and pass |
3649 | * in the correct intel_watermark_params structure. |
3618 | * in the correct intel_watermark_params structure. |
3650 | * |
3619 | * |
3651 | * As the pixel clock runs, the FIFO will be drained at a rate that depends |
3620 | * As the pixel clock runs, the FIFO will be drained at a rate that depends |
3652 | * on the pixel size. When it reaches the watermark level, it'll start |
3621 | * on the pixel size. When it reaches the watermark level, it'll start |
3653 | * fetching FIFO line sized based chunks from memory until the FIFO fills |
3622 | * fetching FIFO line sized based chunks from memory until the FIFO fills |
3654 | * past the watermark point. If the FIFO drains completely, a FIFO underrun |
3623 | * past the watermark point. If the FIFO drains completely, a FIFO underrun |
3655 | * will occur, and a display engine hang could result. |
3624 | * will occur, and a display engine hang could result. |
3656 | */ |
3625 | */ |
3657 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
3626 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
3658 | const struct intel_watermark_params *wm, |
3627 | const struct intel_watermark_params *wm, |
3659 | int fifo_size, |
3628 | int fifo_size, |
3660 | int pixel_size, |
3629 | int pixel_size, |
3661 | unsigned long latency_ns) |
3630 | unsigned long latency_ns) |
3662 | { |
3631 | { |
3663 | long entries_required, wm_size; |
3632 | long entries_required, wm_size; |
3664 | 3633 | ||
3665 | /* |
3634 | /* |
3666 | * Note: we need to make sure we don't overflow for various clock & |
3635 | * Note: we need to make sure we don't overflow for various clock & |
3667 | * latency values. |
3636 | * latency values. |
3668 | * clocks go from a few thousand to several hundred thousand. |
3637 | * clocks go from a few thousand to several hundred thousand. |
3669 | * latency is usually a few thousand |
3638 | * latency is usually a few thousand |
3670 | */ |
3639 | */ |
3671 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / |
3640 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / |
3672 | 1000; |
3641 | 1000; |
3673 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
3642 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
3674 | 3643 | ||
3675 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); |
3644 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); |
3676 | 3645 | ||
3677 | wm_size = fifo_size - (entries_required + wm->guard_size); |
3646 | wm_size = fifo_size - (entries_required + wm->guard_size); |
3678 | 3647 | ||
3679 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); |
3648 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); |
3680 | 3649 | ||
3681 | /* Don't promote wm_size to unsigned... */ |
3650 | /* Don't promote wm_size to unsigned... */ |
3682 | if (wm_size > (long)wm->max_wm) |
3651 | if (wm_size > (long)wm->max_wm) |
3683 | wm_size = wm->max_wm; |
3652 | wm_size = wm->max_wm; |
3684 | if (wm_size <= 0) |
3653 | if (wm_size <= 0) |
3685 | wm_size = wm->default_wm; |
3654 | wm_size = wm->default_wm; |
3686 | return wm_size; |
3655 | return wm_size; |
3687 | } |
3656 | } |
3688 | 3657 | ||
3689 | struct cxsr_latency { |
3658 | struct cxsr_latency { |
3690 | int is_desktop; |
3659 | int is_desktop; |
3691 | int is_ddr3; |
3660 | int is_ddr3; |
3692 | unsigned long fsb_freq; |
3661 | unsigned long fsb_freq; |
3693 | unsigned long mem_freq; |
3662 | unsigned long mem_freq; |
3694 | unsigned long display_sr; |
3663 | unsigned long display_sr; |
3695 | unsigned long display_hpll_disable; |
3664 | unsigned long display_hpll_disable; |
3696 | unsigned long cursor_sr; |
3665 | unsigned long cursor_sr; |
3697 | unsigned long cursor_hpll_disable; |
3666 | unsigned long cursor_hpll_disable; |
3698 | }; |
3667 | }; |
3699 | 3668 | ||
3700 | static const struct cxsr_latency cxsr_latency_table[] = { |
3669 | static const struct cxsr_latency cxsr_latency_table[] = { |
3701 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ |
3670 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ |
3702 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ |
3671 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ |
3703 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ |
3672 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ |
3704 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ |
3673 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ |
3705 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ |
3674 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ |
3706 | 3675 | ||
3707 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ |
3676 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ |
3708 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ |
3677 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ |
3709 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ |
3678 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ |
3710 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ |
3679 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ |
3711 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ |
3680 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ |
3712 | 3681 | ||
3713 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ |
3682 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ |
3714 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ |
3683 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ |
3715 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ |
3684 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ |
3716 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ |
3685 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ |
3717 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ |
3686 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ |
3718 | 3687 | ||
3719 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ |
3688 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ |
3720 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ |
3689 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ |
3721 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ |
3690 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ |
3722 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ |
3691 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ |
3723 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ |
3692 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ |
3724 | 3693 | ||
3725 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ |
3694 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ |
3726 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ |
3695 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ |
3727 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ |
3696 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ |
3728 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ |
3697 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ |
3729 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ |
3698 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ |
3730 | 3699 | ||
3731 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ |
3700 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ |
3732 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ |
3701 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ |
3733 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ |
3702 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ |
3734 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ |
3703 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ |
3735 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ |
3704 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ |
3736 | }; |
3705 | }; |
3737 | 3706 | ||
3738 | static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, |
3707 | static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, |
3739 | int is_ddr3, |
3708 | int is_ddr3, |
3740 | int fsb, |
3709 | int fsb, |
3741 | int mem) |
3710 | int mem) |
3742 | { |
3711 | { |
3743 | const struct cxsr_latency *latency; |
3712 | const struct cxsr_latency *latency; |
3744 | int i; |
3713 | int i; |
3745 | 3714 | ||
3746 | if (fsb == 0 || mem == 0) |
3715 | if (fsb == 0 || mem == 0) |
3747 | return NULL; |
3716 | return NULL; |
3748 | 3717 | ||
3749 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { |
3718 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { |
3750 | latency = &cxsr_latency_table[i]; |
3719 | latency = &cxsr_latency_table[i]; |
3751 | if (is_desktop == latency->is_desktop && |
3720 | if (is_desktop == latency->is_desktop && |
3752 | is_ddr3 == latency->is_ddr3 && |
3721 | is_ddr3 == latency->is_ddr3 && |
3753 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
3722 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
3754 | return latency; |
3723 | return latency; |
3755 | } |
3724 | } |
3756 | 3725 | ||
3757 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
3726 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
3758 | 3727 | ||
3759 | return NULL; |
3728 | return NULL; |
3760 | } |
3729 | } |
3761 | 3730 | ||
3762 | static void pineview_disable_cxsr(struct drm_device *dev) |
3731 | static void pineview_disable_cxsr(struct drm_device *dev) |
3763 | { |
3732 | { |
3764 | struct drm_i915_private *dev_priv = dev->dev_private; |
3733 | struct drm_i915_private *dev_priv = dev->dev_private; |
3765 | 3734 | ||
3766 | /* deactivate cxsr */ |
3735 | /* deactivate cxsr */ |
3767 | I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); |
3736 | I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); |
3768 | } |
3737 | } |
3769 | 3738 | ||
3770 | /* |
3739 | /* |
3771 | * Latency for FIFO fetches is dependent on several factors: |
3740 | * Latency for FIFO fetches is dependent on several factors: |
3772 | * - memory configuration (speed, channels) |
3741 | * - memory configuration (speed, channels) |
3773 | * - chipset |
3742 | * - chipset |
3774 | * - current MCH state |
3743 | * - current MCH state |
3775 | * It can be fairly high in some situations, so here we assume a fairly |
3744 | * It can be fairly high in some situations, so here we assume a fairly |
3776 | * pessimal value. It's a tradeoff between extra memory fetches (if we |
3745 | * pessimal value. It's a tradeoff between extra memory fetches (if we |
3777 | * set this value too high, the FIFO will fetch frequently to stay full) |
3746 | * set this value too high, the FIFO will fetch frequently to stay full) |
3778 | * and power consumption (set it too low to save power and we might see |
3747 | * and power consumption (set it too low to save power and we might see |
3779 | * FIFO underruns and display "flicker"). |
3748 | * FIFO underruns and display "flicker"). |
3780 | * |
3749 | * |
3781 | * A value of 5us seems to be a good balance; safe for very low end |
3750 | * A value of 5us seems to be a good balance; safe for very low end |
3782 | * platforms but not overly aggressive on lower latency configs. |
3751 | * platforms but not overly aggressive on lower latency configs. |
3783 | */ |
3752 | */ |
3784 | static const int latency_ns = 5000; |
3753 | static const int latency_ns = 5000; |
3785 | 3754 | ||
3786 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
3755 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
3787 | { |
3756 | { |
3788 | struct drm_i915_private *dev_priv = dev->dev_private; |
3757 | struct drm_i915_private *dev_priv = dev->dev_private; |
3789 | uint32_t dsparb = I915_READ(DSPARB); |
3758 | uint32_t dsparb = I915_READ(DSPARB); |
3790 | int size; |
3759 | int size; |
3791 | 3760 | ||
3792 | size = dsparb & 0x7f; |
3761 | size = dsparb & 0x7f; |
3793 | if (plane) |
3762 | if (plane) |
3794 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
3763 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; |
3795 | 3764 | ||
3796 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3765 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3797 | plane ? "B" : "A", size); |
3766 | plane ? "B" : "A", size); |
3798 | 3767 | ||
3799 | return size; |
3768 | return size; |
3800 | } |
3769 | } |
3801 | 3770 | ||
3802 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) |
3771 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) |
3803 | { |
3772 | { |
3804 | struct drm_i915_private *dev_priv = dev->dev_private; |
3773 | struct drm_i915_private *dev_priv = dev->dev_private; |
3805 | uint32_t dsparb = I915_READ(DSPARB); |
3774 | uint32_t dsparb = I915_READ(DSPARB); |
3806 | int size; |
3775 | int size; |
3807 | 3776 | ||
3808 | size = dsparb & 0x1ff; |
3777 | size = dsparb & 0x1ff; |
3809 | if (plane) |
3778 | if (plane) |
3810 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; |
3779 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; |
3811 | size >>= 1; /* Convert to cachelines */ |
3780 | size >>= 1; /* Convert to cachelines */ |
3812 | 3781 | ||
3813 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3782 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3814 | plane ? "B" : "A", size); |
3783 | plane ? "B" : "A", size); |
3815 | 3784 | ||
3816 | return size; |
3785 | return size; |
3817 | } |
3786 | } |
3818 | 3787 | ||
3819 | static int i845_get_fifo_size(struct drm_device *dev, int plane) |
3788 | static int i845_get_fifo_size(struct drm_device *dev, int plane) |
3820 | { |
3789 | { |
3821 | struct drm_i915_private *dev_priv = dev->dev_private; |
3790 | struct drm_i915_private *dev_priv = dev->dev_private; |
3822 | uint32_t dsparb = I915_READ(DSPARB); |
3791 | uint32_t dsparb = I915_READ(DSPARB); |
3823 | int size; |
3792 | int size; |
3824 | 3793 | ||
3825 | size = dsparb & 0x7f; |
3794 | size = dsparb & 0x7f; |
3826 | size >>= 2; /* Convert to cachelines */ |
3795 | size >>= 2; /* Convert to cachelines */ |
3827 | 3796 | ||
3828 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3797 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3829 | plane ? "B" : "A", |
3798 | plane ? "B" : "A", |
3830 | size); |
3799 | size); |
3831 | 3800 | ||
3832 | return size; |
3801 | return size; |
3833 | } |
3802 | } |
3834 | 3803 | ||
3835 | static int i830_get_fifo_size(struct drm_device *dev, int plane) |
3804 | static int i830_get_fifo_size(struct drm_device *dev, int plane) |
3836 | { |
3805 | { |
3837 | struct drm_i915_private *dev_priv = dev->dev_private; |
3806 | struct drm_i915_private *dev_priv = dev->dev_private; |
3838 | uint32_t dsparb = I915_READ(DSPARB); |
3807 | uint32_t dsparb = I915_READ(DSPARB); |
3839 | int size; |
3808 | int size; |
3840 | 3809 | ||
3841 | size = dsparb & 0x7f; |
3810 | size = dsparb & 0x7f; |
3842 | size >>= 1; /* Convert to cachelines */ |
3811 | size >>= 1; /* Convert to cachelines */ |
3843 | 3812 | ||
3844 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3813 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
3845 | plane ? "B" : "A", size); |
3814 | plane ? "B" : "A", size); |
3846 | 3815 | ||
3847 | return size; |
3816 | return size; |
3848 | } |
3817 | } |
3849 | 3818 | ||
3850 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
3819 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
3851 | { |
3820 | { |
3852 | struct drm_crtc *crtc, *enabled = NULL; |
3821 | struct drm_crtc *crtc, *enabled = NULL; |
3853 | 3822 | ||
3854 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3823 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
3855 | if (crtc->enabled && crtc->fb) { |
3824 | if (crtc->enabled && crtc->fb) { |
3856 | if (enabled) |
3825 | if (enabled) |
3857 | return NULL; |
3826 | return NULL; |
3858 | enabled = crtc; |
3827 | enabled = crtc; |
3859 | } |
3828 | } |
3860 | } |
3829 | } |
3861 | 3830 | ||
3862 | return enabled; |
3831 | return enabled; |
3863 | } |
3832 | } |
3864 | 3833 | ||
3865 | static void pineview_update_wm(struct drm_device *dev) |
3834 | static void pineview_update_wm(struct drm_device *dev) |
3866 | { |
3835 | { |
3867 | struct drm_i915_private *dev_priv = dev->dev_private; |
3836 | struct drm_i915_private *dev_priv = dev->dev_private; |
3868 | struct drm_crtc *crtc; |
3837 | struct drm_crtc *crtc; |
3869 | const struct cxsr_latency *latency; |
3838 | const struct cxsr_latency *latency; |
3870 | u32 reg; |
3839 | u32 reg; |
3871 | unsigned long wm; |
3840 | unsigned long wm; |
3872 | 3841 | ||
3873 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
3842 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
3874 | dev_priv->fsb_freq, dev_priv->mem_freq); |
3843 | dev_priv->fsb_freq, dev_priv->mem_freq); |
3875 | if (!latency) { |
3844 | if (!latency) { |
3876 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
3845 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
3877 | pineview_disable_cxsr(dev); |
3846 | pineview_disable_cxsr(dev); |
3878 | return; |
3847 | return; |
3879 | } |
3848 | } |
3880 | 3849 | ||
3881 | crtc = single_enabled_crtc(dev); |
3850 | crtc = single_enabled_crtc(dev); |
3882 | if (crtc) { |
3851 | if (crtc) { |
3883 | int clock = crtc->mode.clock; |
3852 | int clock = crtc->mode.clock; |
3884 | int pixel_size = crtc->fb->bits_per_pixel / 8; |
3853 | int pixel_size = crtc->fb->bits_per_pixel / 8; |
3885 | 3854 | ||
3886 | /* Display SR */ |
3855 | /* Display SR */ |
3887 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3856 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3888 | pineview_display_wm.fifo_size, |
3857 | pineview_display_wm.fifo_size, |
3889 | pixel_size, latency->display_sr); |
3858 | pixel_size, latency->display_sr); |
3890 | reg = I915_READ(DSPFW1); |
3859 | reg = I915_READ(DSPFW1); |
3891 | reg &= ~DSPFW_SR_MASK; |
3860 | reg &= ~DSPFW_SR_MASK; |
3892 | reg |= wm << DSPFW_SR_SHIFT; |
3861 | reg |= wm << DSPFW_SR_SHIFT; |
3893 | I915_WRITE(DSPFW1, reg); |
3862 | I915_WRITE(DSPFW1, reg); |
3894 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
3863 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); |
3895 | 3864 | ||
3896 | /* cursor SR */ |
3865 | /* cursor SR */ |
3897 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3866 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3898 | pineview_display_wm.fifo_size, |
3867 | pineview_display_wm.fifo_size, |
3899 | pixel_size, latency->cursor_sr); |
3868 | pixel_size, latency->cursor_sr); |
3900 | reg = I915_READ(DSPFW3); |
3869 | reg = I915_READ(DSPFW3); |
3901 | reg &= ~DSPFW_CURSOR_SR_MASK; |
3870 | reg &= ~DSPFW_CURSOR_SR_MASK; |
3902 | reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; |
3871 | reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; |
3903 | I915_WRITE(DSPFW3, reg); |
3872 | I915_WRITE(DSPFW3, reg); |
3904 | 3873 | ||
3905 | /* Display HPLL off SR */ |
3874 | /* Display HPLL off SR */ |
3906 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3875 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3907 | pineview_display_hplloff_wm.fifo_size, |
3876 | pineview_display_hplloff_wm.fifo_size, |
3908 | pixel_size, latency->display_hpll_disable); |
3877 | pixel_size, latency->display_hpll_disable); |
3909 | reg = I915_READ(DSPFW3); |
3878 | reg = I915_READ(DSPFW3); |
3910 | reg &= ~DSPFW_HPLL_SR_MASK; |
3879 | reg &= ~DSPFW_HPLL_SR_MASK; |
3911 | reg |= wm & DSPFW_HPLL_SR_MASK; |
3880 | reg |= wm & DSPFW_HPLL_SR_MASK; |
3912 | I915_WRITE(DSPFW3, reg); |
3881 | I915_WRITE(DSPFW3, reg); |
3913 | 3882 | ||
3914 | /* cursor HPLL off SR */ |
3883 | /* cursor HPLL off SR */ |
3915 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3884 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3916 | pineview_display_hplloff_wm.fifo_size, |
3885 | pineview_display_hplloff_wm.fifo_size, |
3917 | pixel_size, latency->cursor_hpll_disable); |
3886 | pixel_size, latency->cursor_hpll_disable); |
3918 | reg = I915_READ(DSPFW3); |
3887 | reg = I915_READ(DSPFW3); |
3919 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
3888 | reg &= ~DSPFW_HPLL_CURSOR_MASK; |
3920 | reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; |
3889 | reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; |
3921 | I915_WRITE(DSPFW3, reg); |
3890 | I915_WRITE(DSPFW3, reg); |
3922 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
3891 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); |
3923 | 3892 | ||
3924 | /* activate cxsr */ |
3893 | /* activate cxsr */ |
3925 | I915_WRITE(DSPFW3, |
3894 | I915_WRITE(DSPFW3, |
3926 | I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); |
3895 | I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); |
3927 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); |
3896 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); |
3928 | } else { |
3897 | } else { |
3929 | pineview_disable_cxsr(dev); |
3898 | pineview_disable_cxsr(dev); |
3930 | DRM_DEBUG_KMS("Self-refresh is disabled\n"); |
3899 | DRM_DEBUG_KMS("Self-refresh is disabled\n"); |
3931 | } |
3900 | } |
3932 | } |
3901 | } |
3933 | 3902 | ||
3934 | static bool g4x_compute_wm0(struct drm_device *dev, |
3903 | static bool g4x_compute_wm0(struct drm_device *dev, |
3935 | int plane, |
3904 | int plane, |
3936 | const struct intel_watermark_params *display, |
3905 | const struct intel_watermark_params *display, |
3937 | int display_latency_ns, |
3906 | int display_latency_ns, |
3938 | const struct intel_watermark_params *cursor, |
3907 | const struct intel_watermark_params *cursor, |
3939 | int cursor_latency_ns, |
3908 | int cursor_latency_ns, |
3940 | int *plane_wm, |
3909 | int *plane_wm, |
3941 | int *cursor_wm) |
3910 | int *cursor_wm) |
3942 | { |
3911 | { |
3943 | struct drm_crtc *crtc; |
3912 | struct drm_crtc *crtc; |
3944 | int htotal, hdisplay, clock, pixel_size; |
3913 | int htotal, hdisplay, clock, pixel_size; |
3945 | int line_time_us, line_count; |
3914 | int line_time_us, line_count; |
3946 | int entries, tlb_miss; |
3915 | int entries, tlb_miss; |
3947 | 3916 | ||
3948 | crtc = intel_get_crtc_for_plane(dev, plane); |
3917 | crtc = intel_get_crtc_for_plane(dev, plane); |
3949 | if (crtc->fb == NULL || !crtc->enabled) { |
3918 | if (crtc->fb == NULL || !crtc->enabled) { |
3950 | *cursor_wm = cursor->guard_size; |
3919 | *cursor_wm = cursor->guard_size; |
3951 | *plane_wm = display->guard_size; |
3920 | *plane_wm = display->guard_size; |
3952 | return false; |
3921 | return false; |
3953 | } |
3922 | } |
3954 | 3923 | ||
3955 | htotal = crtc->mode.htotal; |
3924 | htotal = crtc->mode.htotal; |
3956 | hdisplay = crtc->mode.hdisplay; |
3925 | hdisplay = crtc->mode.hdisplay; |
3957 | clock = crtc->mode.clock; |
3926 | clock = crtc->mode.clock; |
3958 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3927 | pixel_size = crtc->fb->bits_per_pixel / 8; |
3959 | 3928 | ||
3960 | /* Use the small buffer method to calculate plane watermark */ |
3929 | /* Use the small buffer method to calculate plane watermark */ |
3961 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3930 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
3962 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; |
3931 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; |
3963 | if (tlb_miss > 0) |
3932 | if (tlb_miss > 0) |
3964 | entries += tlb_miss; |
3933 | entries += tlb_miss; |
3965 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3934 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
3966 | *plane_wm = entries + display->guard_size; |
3935 | *plane_wm = entries + display->guard_size; |
3967 | if (*plane_wm > (int)display->max_wm) |
3936 | if (*plane_wm > (int)display->max_wm) |
3968 | *plane_wm = display->max_wm; |
3937 | *plane_wm = display->max_wm; |
3969 | 3938 | ||
3970 | /* Use the large buffer method to calculate cursor watermark */ |
3939 | /* Use the large buffer method to calculate cursor watermark */ |
3971 | line_time_us = ((htotal * 1000) / clock); |
3940 | line_time_us = ((htotal * 1000) / clock); |
3972 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3941 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; |
3973 | entries = line_count * 64 * pixel_size; |
3942 | entries = line_count * 64 * pixel_size; |
3974 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; |
3943 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; |
3975 | if (tlb_miss > 0) |
3944 | if (tlb_miss > 0) |
3976 | entries += tlb_miss; |
3945 | entries += tlb_miss; |
3977 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3946 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
3978 | *cursor_wm = entries + cursor->guard_size; |
3947 | *cursor_wm = entries + cursor->guard_size; |
3979 | if (*cursor_wm > (int)cursor->max_wm) |
3948 | if (*cursor_wm > (int)cursor->max_wm) |
3980 | *cursor_wm = (int)cursor->max_wm; |
3949 | *cursor_wm = (int)cursor->max_wm; |
3981 | 3950 | ||
3982 | return true; |
3951 | return true; |
3983 | } |
3952 | } |
3984 | 3953 | ||
3985 | /* |
3954 | /* |
3986 | * Check the wm result. |
3955 | * Check the wm result. |
3987 | * |
3956 | * |
3988 | * If any calculated watermark values is larger than the maximum value that |
3957 | * If any calculated watermark values is larger than the maximum value that |
3989 | * can be programmed into the associated watermark register, that watermark |
3958 | * can be programmed into the associated watermark register, that watermark |
3990 | * must be disabled. |
3959 | * must be disabled. |
3991 | */ |
3960 | */ |
3992 | static bool g4x_check_srwm(struct drm_device *dev, |
3961 | static bool g4x_check_srwm(struct drm_device *dev, |
3993 | int display_wm, int cursor_wm, |
3962 | int display_wm, int cursor_wm, |
3994 | const struct intel_watermark_params *display, |
3963 | const struct intel_watermark_params *display, |
3995 | const struct intel_watermark_params *cursor) |
3964 | const struct intel_watermark_params *cursor) |
3996 | { |
3965 | { |
3997 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", |
3966 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", |
3998 | display_wm, cursor_wm); |
3967 | display_wm, cursor_wm); |
3999 | 3968 | ||
4000 | if (display_wm > display->max_wm) { |
3969 | if (display_wm > display->max_wm) { |
4001 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", |
3970 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", |
4002 | display_wm, display->max_wm); |
3971 | display_wm, display->max_wm); |
4003 | return false; |
3972 | return false; |
4004 | } |
3973 | } |
4005 | 3974 | ||
4006 | if (cursor_wm > cursor->max_wm) { |
3975 | if (cursor_wm > cursor->max_wm) { |
4007 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", |
3976 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", |
4008 | cursor_wm, cursor->max_wm); |
3977 | cursor_wm, cursor->max_wm); |
4009 | return false; |
3978 | return false; |
4010 | } |
3979 | } |
4011 | 3980 | ||
4012 | if (!(display_wm || cursor_wm)) { |
3981 | if (!(display_wm || cursor_wm)) { |
4013 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); |
3982 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); |
4014 | return false; |
3983 | return false; |
4015 | } |
3984 | } |
4016 | 3985 | ||
4017 | return true; |
3986 | return true; |
4018 | } |
3987 | } |
4019 | 3988 | ||
4020 | static bool g4x_compute_srwm(struct drm_device *dev, |
3989 | static bool g4x_compute_srwm(struct drm_device *dev, |
4021 | int plane, |
3990 | int plane, |
4022 | int latency_ns, |
3991 | int latency_ns, |
4023 | const struct intel_watermark_params *display, |
3992 | const struct intel_watermark_params *display, |
4024 | const struct intel_watermark_params *cursor, |
3993 | const struct intel_watermark_params *cursor, |
4025 | int *display_wm, int *cursor_wm) |
3994 | int *display_wm, int *cursor_wm) |
4026 | { |
3995 | { |
4027 | struct drm_crtc *crtc; |
3996 | struct drm_crtc *crtc; |
4028 | int hdisplay, htotal, pixel_size, clock; |
3997 | int hdisplay, htotal, pixel_size, clock; |
4029 | unsigned long line_time_us; |
3998 | unsigned long line_time_us; |
4030 | int line_count, line_size; |
3999 | int line_count, line_size; |
4031 | int small, large; |
4000 | int small, large; |
4032 | int entries; |
4001 | int entries; |
4033 | 4002 | ||
4034 | if (!latency_ns) { |
4003 | if (!latency_ns) { |
4035 | *display_wm = *cursor_wm = 0; |
4004 | *display_wm = *cursor_wm = 0; |
4036 | return false; |
4005 | return false; |
4037 | } |
4006 | } |
4038 | 4007 | ||
4039 | crtc = intel_get_crtc_for_plane(dev, plane); |
4008 | crtc = intel_get_crtc_for_plane(dev, plane); |
4040 | hdisplay = crtc->mode.hdisplay; |
4009 | hdisplay = crtc->mode.hdisplay; |
4041 | htotal = crtc->mode.htotal; |
4010 | htotal = crtc->mode.htotal; |
4042 | clock = crtc->mode.clock; |
4011 | clock = crtc->mode.clock; |
4043 | pixel_size = crtc->fb->bits_per_pixel / 8; |
4012 | pixel_size = crtc->fb->bits_per_pixel / 8; |
4044 | 4013 | ||
4045 | line_time_us = (htotal * 1000) / clock; |
4014 | line_time_us = (htotal * 1000) / clock; |
4046 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4015 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4047 | line_size = hdisplay * pixel_size; |
4016 | line_size = hdisplay * pixel_size; |
4048 | 4017 | ||
4049 | /* Use the minimum of the small and large buffer method for primary */ |
4018 | /* Use the minimum of the small and large buffer method for primary */ |
4050 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4019 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4051 | large = line_count * line_size; |
4020 | large = line_count * line_size; |
4052 | 4021 | ||
4053 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4022 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4054 | *display_wm = entries + display->guard_size; |
4023 | *display_wm = entries + display->guard_size; |
4055 | 4024 | ||
4056 | /* calculate the self-refresh watermark for display cursor */ |
4025 | /* calculate the self-refresh watermark for display cursor */ |
4057 | entries = line_count * pixel_size * 64; |
4026 | entries = line_count * pixel_size * 64; |
4058 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
4027 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
4059 | *cursor_wm = entries + cursor->guard_size; |
4028 | *cursor_wm = entries + cursor->guard_size; |
4060 | 4029 | ||
4061 | return g4x_check_srwm(dev, |
4030 | return g4x_check_srwm(dev, |
4062 | *display_wm, *cursor_wm, |
4031 | *display_wm, *cursor_wm, |
4063 | display, cursor); |
4032 | display, cursor); |
4064 | } |
4033 | } |
4065 | 4034 | ||
4066 | #define single_plane_enabled(mask) is_power_of_2(mask) |
4035 | #define single_plane_enabled(mask) is_power_of_2(mask) |
4067 | 4036 | ||
4068 | static void g4x_update_wm(struct drm_device *dev) |
4037 | static void g4x_update_wm(struct drm_device *dev) |
4069 | { |
4038 | { |
4070 | static const int sr_latency_ns = 12000; |
4039 | static const int sr_latency_ns = 12000; |
4071 | struct drm_i915_private *dev_priv = dev->dev_private; |
4040 | struct drm_i915_private *dev_priv = dev->dev_private; |
4072 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
4041 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; |
4073 | int plane_sr, cursor_sr; |
4042 | int plane_sr, cursor_sr; |
4074 | unsigned int enabled = 0; |
4043 | unsigned int enabled = 0; |
4075 | 4044 | ||
4076 | if (g4x_compute_wm0(dev, 0, |
4045 | if (g4x_compute_wm0(dev, 0, |
4077 | &g4x_wm_info, latency_ns, |
4046 | &g4x_wm_info, latency_ns, |
4078 | &g4x_cursor_wm_info, latency_ns, |
4047 | &g4x_cursor_wm_info, latency_ns, |
4079 | &planea_wm, &cursora_wm)) |
4048 | &planea_wm, &cursora_wm)) |
4080 | enabled |= 1; |
4049 | enabled |= 1; |
4081 | 4050 | ||
4082 | if (g4x_compute_wm0(dev, 1, |
4051 | if (g4x_compute_wm0(dev, 1, |
4083 | &g4x_wm_info, latency_ns, |
4052 | &g4x_wm_info, latency_ns, |
4084 | &g4x_cursor_wm_info, latency_ns, |
4053 | &g4x_cursor_wm_info, latency_ns, |
4085 | &planeb_wm, &cursorb_wm)) |
4054 | &planeb_wm, &cursorb_wm)) |
4086 | enabled |= 2; |
4055 | enabled |= 2; |
4087 | 4056 | ||
4088 | plane_sr = cursor_sr = 0; |
4057 | plane_sr = cursor_sr = 0; |
4089 | if (single_plane_enabled(enabled) && |
4058 | if (single_plane_enabled(enabled) && |
4090 | g4x_compute_srwm(dev, ffs(enabled) - 1, |
4059 | g4x_compute_srwm(dev, ffs(enabled) - 1, |
4091 | sr_latency_ns, |
4060 | sr_latency_ns, |
4092 | &g4x_wm_info, |
4061 | &g4x_wm_info, |
4093 | &g4x_cursor_wm_info, |
4062 | &g4x_cursor_wm_info, |
4094 | &plane_sr, &cursor_sr)) |
4063 | &plane_sr, &cursor_sr)) |
4095 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
4064 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
4096 | else |
4065 | else |
4097 | I915_WRITE(FW_BLC_SELF, |
4066 | I915_WRITE(FW_BLC_SELF, |
4098 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); |
4067 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); |
4099 | 4068 | ||
4100 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
4069 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
4101 | planea_wm, cursora_wm, |
4070 | planea_wm, cursora_wm, |
4102 | planeb_wm, cursorb_wm, |
4071 | planeb_wm, cursorb_wm, |
4103 | plane_sr, cursor_sr); |
4072 | plane_sr, cursor_sr); |
4104 | 4073 | ||
4105 | I915_WRITE(DSPFW1, |
4074 | I915_WRITE(DSPFW1, |
4106 | (plane_sr << DSPFW_SR_SHIFT) | |
4075 | (plane_sr << DSPFW_SR_SHIFT) | |
4107 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
4076 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
4108 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
4077 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
4109 | planea_wm); |
4078 | planea_wm); |
4110 | I915_WRITE(DSPFW2, |
4079 | I915_WRITE(DSPFW2, |
4111 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | |
4080 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | |
4112 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
4081 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
4113 | /* HPLL off in SR has some issues on G4x... disable it */ |
4082 | /* HPLL off in SR has some issues on G4x... disable it */ |
4114 | I915_WRITE(DSPFW3, |
4083 | I915_WRITE(DSPFW3, |
4115 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | |
4084 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | |
4116 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
4085 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
4117 | } |
4086 | } |
4118 | 4087 | ||
4119 | static void i965_update_wm(struct drm_device *dev) |
4088 | static void i965_update_wm(struct drm_device *dev) |
4120 | { |
4089 | { |
4121 | struct drm_i915_private *dev_priv = dev->dev_private; |
4090 | struct drm_i915_private *dev_priv = dev->dev_private; |
4122 | struct drm_crtc *crtc; |
4091 | struct drm_crtc *crtc; |
4123 | int srwm = 1; |
4092 | int srwm = 1; |
4124 | int cursor_sr = 16; |
4093 | int cursor_sr = 16; |
4125 | 4094 | ||
4126 | /* Calc sr entries for one plane configs */ |
4095 | /* Calc sr entries for one plane configs */ |
4127 | crtc = single_enabled_crtc(dev); |
4096 | crtc = single_enabled_crtc(dev); |
4128 | if (crtc) { |
4097 | if (crtc) { |
4129 | /* self-refresh has much higher latency */ |
4098 | /* self-refresh has much higher latency */ |
4130 | static const int sr_latency_ns = 12000; |
4099 | static const int sr_latency_ns = 12000; |
4131 | int clock = crtc->mode.clock; |
4100 | int clock = crtc->mode.clock; |
4132 | int htotal = crtc->mode.htotal; |
4101 | int htotal = crtc->mode.htotal; |
4133 | int hdisplay = crtc->mode.hdisplay; |
4102 | int hdisplay = crtc->mode.hdisplay; |
4134 | int pixel_size = crtc->fb->bits_per_pixel / 8; |
4103 | int pixel_size = crtc->fb->bits_per_pixel / 8; |
4135 | unsigned long line_time_us; |
4104 | unsigned long line_time_us; |
4136 | int entries; |
4105 | int entries; |
4137 | 4106 | ||
4138 | line_time_us = ((htotal * 1000) / clock); |
4107 | line_time_us = ((htotal * 1000) / clock); |
4139 | 4108 | ||
4140 | /* Use ns/us then divide to preserve precision */ |
4109 | /* Use ns/us then divide to preserve precision */ |
4141 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4110 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4142 | pixel_size * hdisplay; |
4111 | pixel_size * hdisplay; |
4143 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
4112 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); |
4144 | srwm = I965_FIFO_SIZE - entries; |
4113 | srwm = I965_FIFO_SIZE - entries; |
4145 | if (srwm < 0) |
4114 | if (srwm < 0) |
4146 | srwm = 1; |
4115 | srwm = 1; |
4147 | srwm &= 0x1ff; |
4116 | srwm &= 0x1ff; |
4148 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", |
4117 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", |
4149 | entries, srwm); |
4118 | entries, srwm); |
4150 | 4119 | ||
4151 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4120 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4152 | pixel_size * 64; |
4121 | pixel_size * 64; |
4153 | entries = DIV_ROUND_UP(entries, |
4122 | entries = DIV_ROUND_UP(entries, |
4154 | i965_cursor_wm_info.cacheline_size); |
4123 | i965_cursor_wm_info.cacheline_size); |
4155 | cursor_sr = i965_cursor_wm_info.fifo_size - |
4124 | cursor_sr = i965_cursor_wm_info.fifo_size - |
4156 | (entries + i965_cursor_wm_info.guard_size); |
4125 | (entries + i965_cursor_wm_info.guard_size); |
4157 | 4126 | ||
4158 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
4127 | if (cursor_sr > i965_cursor_wm_info.max_wm) |
4159 | cursor_sr = i965_cursor_wm_info.max_wm; |
4128 | cursor_sr = i965_cursor_wm_info.max_wm; |
4160 | 4129 | ||
4161 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " |
4130 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " |
4162 | "cursor %d\n", srwm, cursor_sr); |
4131 | "cursor %d\n", srwm, cursor_sr); |
4163 | 4132 | ||
4164 | if (IS_CRESTLINE(dev)) |
4133 | if (IS_CRESTLINE(dev)) |
4165 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
4134 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
4166 | } else { |
4135 | } else { |
4167 | /* Turn off self refresh if both pipes are enabled */ |
4136 | /* Turn off self refresh if both pipes are enabled */ |
4168 | if (IS_CRESTLINE(dev)) |
4137 | if (IS_CRESTLINE(dev)) |
4169 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
4138 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
4170 | & ~FW_BLC_SELF_EN); |
4139 | & ~FW_BLC_SELF_EN); |
4171 | } |
4140 | } |
4172 | 4141 | ||
4173 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
4142 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
4174 | srwm); |
4143 | srwm); |
4175 | 4144 | ||
4176 | /* 965 has limitations... */ |
4145 | /* 965 has limitations... */ |
4177 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
4146 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
4178 | (8 << 16) | (8 << 8) | (8 << 0)); |
4147 | (8 << 16) | (8 << 8) | (8 << 0)); |
4179 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
4148 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
4180 | /* update cursor SR watermark */ |
4149 | /* update cursor SR watermark */ |
4181 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
4150 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
4182 | } |
4151 | } |
4183 | 4152 | ||
4184 | static void i9xx_update_wm(struct drm_device *dev) |
4153 | static void i9xx_update_wm(struct drm_device *dev) |
4185 | { |
4154 | { |
4186 | struct drm_i915_private *dev_priv = dev->dev_private; |
4155 | struct drm_i915_private *dev_priv = dev->dev_private; |
4187 | const struct intel_watermark_params *wm_info; |
4156 | const struct intel_watermark_params *wm_info; |
4188 | uint32_t fwater_lo; |
4157 | uint32_t fwater_lo; |
4189 | uint32_t fwater_hi; |
4158 | uint32_t fwater_hi; |
4190 | int cwm, srwm = 1; |
4159 | int cwm, srwm = 1; |
4191 | int fifo_size; |
4160 | int fifo_size; |
4192 | int planea_wm, planeb_wm; |
4161 | int planea_wm, planeb_wm; |
4193 | struct drm_crtc *crtc, *enabled = NULL; |
4162 | struct drm_crtc *crtc, *enabled = NULL; |
4194 | 4163 | ||
4195 | if (IS_I945GM(dev)) |
4164 | if (IS_I945GM(dev)) |
4196 | wm_info = &i945_wm_info; |
4165 | wm_info = &i945_wm_info; |
4197 | else if (!IS_GEN2(dev)) |
4166 | else if (!IS_GEN2(dev)) |
4198 | wm_info = &i915_wm_info; |
4167 | wm_info = &i915_wm_info; |
4199 | else |
4168 | else |
4200 | wm_info = &i855_wm_info; |
4169 | wm_info = &i855_wm_info; |
4201 | 4170 | ||
4202 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
4171 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); |
4203 | crtc = intel_get_crtc_for_plane(dev, 0); |
4172 | crtc = intel_get_crtc_for_plane(dev, 0); |
4204 | if (crtc->enabled && crtc->fb) { |
4173 | if (crtc->enabled && crtc->fb) { |
4205 | planea_wm = intel_calculate_wm(crtc->mode.clock, |
4174 | planea_wm = intel_calculate_wm(crtc->mode.clock, |
4206 | wm_info, fifo_size, |
4175 | wm_info, fifo_size, |
4207 | crtc->fb->bits_per_pixel / 8, |
4176 | crtc->fb->bits_per_pixel / 8, |
4208 | latency_ns); |
4177 | latency_ns); |
4209 | enabled = crtc; |
4178 | enabled = crtc; |
4210 | } else |
4179 | } else |
4211 | planea_wm = fifo_size - wm_info->guard_size; |
4180 | planea_wm = fifo_size - wm_info->guard_size; |
4212 | 4181 | ||
4213 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
4182 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); |
4214 | crtc = intel_get_crtc_for_plane(dev, 1); |
4183 | crtc = intel_get_crtc_for_plane(dev, 1); |
4215 | if (crtc->enabled && crtc->fb) { |
4184 | if (crtc->enabled && crtc->fb) { |
4216 | planeb_wm = intel_calculate_wm(crtc->mode.clock, |
4185 | planeb_wm = intel_calculate_wm(crtc->mode.clock, |
4217 | wm_info, fifo_size, |
4186 | wm_info, fifo_size, |
4218 | crtc->fb->bits_per_pixel / 8, |
4187 | crtc->fb->bits_per_pixel / 8, |
4219 | latency_ns); |
4188 | latency_ns); |
4220 | if (enabled == NULL) |
4189 | if (enabled == NULL) |
4221 | enabled = crtc; |
4190 | enabled = crtc; |
4222 | else |
4191 | else |
4223 | enabled = NULL; |
4192 | enabled = NULL; |
4224 | } else |
4193 | } else |
4225 | planeb_wm = fifo_size - wm_info->guard_size; |
4194 | planeb_wm = fifo_size - wm_info->guard_size; |
4226 | 4195 | ||
4227 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
4196 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
4228 | 4197 | ||
4229 | /* |
4198 | /* |
4230 | * Overlay gets an aggressive default since video jitter is bad. |
4199 | * Overlay gets an aggressive default since video jitter is bad. |
4231 | */ |
4200 | */ |
4232 | cwm = 2; |
4201 | cwm = 2; |
4233 | 4202 | ||
4234 | /* Play safe and disable self-refresh before adjusting watermarks. */ |
4203 | /* Play safe and disable self-refresh before adjusting watermarks. */ |
4235 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4204 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4236 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); |
4205 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); |
4237 | else if (IS_I915GM(dev)) |
4206 | else if (IS_I915GM(dev)) |
4238 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); |
4207 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); |
4239 | 4208 | ||
4240 | /* Calc sr entries for one plane configs */ |
4209 | /* Calc sr entries for one plane configs */ |
4241 | if (HAS_FW_BLC(dev) && enabled) { |
4210 | if (HAS_FW_BLC(dev) && enabled) { |
4242 | /* self-refresh has much higher latency */ |
4211 | /* self-refresh has much higher latency */ |
4243 | static const int sr_latency_ns = 6000; |
4212 | static const int sr_latency_ns = 6000; |
4244 | int clock = enabled->mode.clock; |
4213 | int clock = enabled->mode.clock; |
4245 | int htotal = enabled->mode.htotal; |
4214 | int htotal = enabled->mode.htotal; |
4246 | int hdisplay = enabled->mode.hdisplay; |
4215 | int hdisplay = enabled->mode.hdisplay; |
4247 | int pixel_size = enabled->fb->bits_per_pixel / 8; |
4216 | int pixel_size = enabled->fb->bits_per_pixel / 8; |
4248 | unsigned long line_time_us; |
4217 | unsigned long line_time_us; |
4249 | int entries; |
4218 | int entries; |
4250 | 4219 | ||
4251 | line_time_us = (htotal * 1000) / clock; |
4220 | line_time_us = (htotal * 1000) / clock; |
4252 | 4221 | ||
4253 | /* Use ns/us then divide to preserve precision */ |
4222 | /* Use ns/us then divide to preserve precision */ |
4254 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4223 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4255 | pixel_size * hdisplay; |
4224 | pixel_size * hdisplay; |
4256 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
4225 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); |
4257 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); |
4226 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); |
4258 | srwm = wm_info->fifo_size - entries; |
4227 | srwm = wm_info->fifo_size - entries; |
4259 | if (srwm < 0) |
4228 | if (srwm < 0) |
4260 | srwm = 1; |
4229 | srwm = 1; |
4261 | 4230 | ||
4262 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4231 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4263 | I915_WRITE(FW_BLC_SELF, |
4232 | I915_WRITE(FW_BLC_SELF, |
4264 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); |
4233 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); |
4265 | else if (IS_I915GM(dev)) |
4234 | else if (IS_I915GM(dev)) |
4266 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
4235 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
4267 | } |
4236 | } |
4268 | 4237 | ||
4269 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
4238 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
4270 | planea_wm, planeb_wm, cwm, srwm); |
4239 | planea_wm, planeb_wm, cwm, srwm); |
4271 | 4240 | ||
4272 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
4241 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
4273 | fwater_hi = (cwm & 0x1f); |
4242 | fwater_hi = (cwm & 0x1f); |
4274 | 4243 | ||
4275 | /* Set request length to 8 cachelines per fetch */ |
4244 | /* Set request length to 8 cachelines per fetch */ |
4276 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); |
4245 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); |
4277 | fwater_hi = fwater_hi | (1 << 8); |
4246 | fwater_hi = fwater_hi | (1 << 8); |
4278 | 4247 | ||
4279 | I915_WRITE(FW_BLC, fwater_lo); |
4248 | I915_WRITE(FW_BLC, fwater_lo); |
4280 | I915_WRITE(FW_BLC2, fwater_hi); |
4249 | I915_WRITE(FW_BLC2, fwater_hi); |
4281 | 4250 | ||
4282 | if (HAS_FW_BLC(dev)) { |
4251 | if (HAS_FW_BLC(dev)) { |
4283 | if (enabled) { |
4252 | if (enabled) { |
4284 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4253 | if (IS_I945G(dev) || IS_I945GM(dev)) |
4285 | I915_WRITE(FW_BLC_SELF, |
4254 | I915_WRITE(FW_BLC_SELF, |
4286 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); |
4255 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); |
4287 | else if (IS_I915GM(dev)) |
4256 | else if (IS_I915GM(dev)) |
4288 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); |
4257 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); |
4289 | DRM_DEBUG_KMS("memory self refresh enabled\n"); |
4258 | DRM_DEBUG_KMS("memory self refresh enabled\n"); |
4290 | } else |
4259 | } else |
4291 | DRM_DEBUG_KMS("memory self refresh disabled\n"); |
4260 | DRM_DEBUG_KMS("memory self refresh disabled\n"); |
4292 | } |
4261 | } |
4293 | } |
4262 | } |
4294 | 4263 | ||
4295 | static void i830_update_wm(struct drm_device *dev) |
4264 | static void i830_update_wm(struct drm_device *dev) |
4296 | { |
4265 | { |
4297 | struct drm_i915_private *dev_priv = dev->dev_private; |
4266 | struct drm_i915_private *dev_priv = dev->dev_private; |
4298 | struct drm_crtc *crtc; |
4267 | struct drm_crtc *crtc; |
4299 | uint32_t fwater_lo; |
4268 | uint32_t fwater_lo; |
4300 | int planea_wm; |
4269 | int planea_wm; |
4301 | 4270 | ||
4302 | crtc = single_enabled_crtc(dev); |
4271 | crtc = single_enabled_crtc(dev); |
4303 | if (crtc == NULL) |
4272 | if (crtc == NULL) |
4304 | return; |
4273 | return; |
4305 | 4274 | ||
4306 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
4275 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
4307 | dev_priv->display.get_fifo_size(dev, 0), |
4276 | dev_priv->display.get_fifo_size(dev, 0), |
4308 | crtc->fb->bits_per_pixel / 8, |
4277 | crtc->fb->bits_per_pixel / 8, |
4309 | latency_ns); |
4278 | latency_ns); |
4310 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
4279 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; |
4311 | fwater_lo |= (3<<8) | planea_wm; |
4280 | fwater_lo |= (3<<8) | planea_wm; |
4312 | 4281 | ||
4313 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
4282 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
4314 | 4283 | ||
4315 | I915_WRITE(FW_BLC, fwater_lo); |
4284 | I915_WRITE(FW_BLC, fwater_lo); |
4316 | } |
4285 | } |
4317 | 4286 | ||
4318 | #define ILK_LP0_PLANE_LATENCY 700 |
4287 | #define ILK_LP0_PLANE_LATENCY 700 |
4319 | #define ILK_LP0_CURSOR_LATENCY 1300 |
4288 | #define ILK_LP0_CURSOR_LATENCY 1300 |
4320 | 4289 | ||
4321 | /* |
4290 | /* |
4322 | * Check the wm result. |
4291 | * Check the wm result. |
4323 | * |
4292 | * |
4324 | * If any calculated watermark values is larger than the maximum value that |
4293 | * If any calculated watermark values is larger than the maximum value that |
4325 | * can be programmed into the associated watermark register, that watermark |
4294 | * can be programmed into the associated watermark register, that watermark |
4326 | * must be disabled. |
4295 | * must be disabled. |
4327 | */ |
4296 | */ |
4328 | static bool ironlake_check_srwm(struct drm_device *dev, int level, |
4297 | static bool ironlake_check_srwm(struct drm_device *dev, int level, |
4329 | int fbc_wm, int display_wm, int cursor_wm, |
4298 | int fbc_wm, int display_wm, int cursor_wm, |
4330 | const struct intel_watermark_params *display, |
4299 | const struct intel_watermark_params *display, |
4331 | const struct intel_watermark_params *cursor) |
4300 | const struct intel_watermark_params *cursor) |
4332 | { |
4301 | { |
4333 | struct drm_i915_private *dev_priv = dev->dev_private; |
4302 | struct drm_i915_private *dev_priv = dev->dev_private; |
4334 | 4303 | ||
4335 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," |
4304 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," |
4336 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); |
4305 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); |
4337 | 4306 | ||
4338 | if (fbc_wm > SNB_FBC_MAX_SRWM) { |
4307 | if (fbc_wm > SNB_FBC_MAX_SRWM) { |
4339 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", |
4308 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", |
4340 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
4309 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
4341 | 4310 | ||
4342 | /* fbc has it's own way to disable FBC WM */ |
4311 | /* fbc has it's own way to disable FBC WM */ |
4343 | I915_WRITE(DISP_ARB_CTL, |
4312 | I915_WRITE(DISP_ARB_CTL, |
4344 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); |
4313 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); |
4345 | return false; |
4314 | return false; |
4346 | } |
4315 | } |
4347 | 4316 | ||
4348 | if (display_wm > display->max_wm) { |
4317 | if (display_wm > display->max_wm) { |
4349 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", |
4318 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", |
4350 | display_wm, SNB_DISPLAY_MAX_SRWM, level); |
4319 | display_wm, SNB_DISPLAY_MAX_SRWM, level); |
4351 | return false; |
4320 | return false; |
4352 | } |
4321 | } |
4353 | 4322 | ||
4354 | if (cursor_wm > cursor->max_wm) { |
4323 | if (cursor_wm > cursor->max_wm) { |
4355 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", |
4324 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", |
4356 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); |
4325 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); |
4357 | return false; |
4326 | return false; |
4358 | } |
4327 | } |
4359 | 4328 | ||
4360 | if (!(fbc_wm || display_wm || cursor_wm)) { |
4329 | if (!(fbc_wm || display_wm || cursor_wm)) { |
4361 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); |
4330 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); |
4362 | return false; |
4331 | return false; |
4363 | } |
4332 | } |
4364 | 4333 | ||
4365 | return true; |
4334 | return true; |
4366 | } |
4335 | } |
4367 | 4336 | ||
4368 | /* |
4337 | /* |
4369 | * Compute watermark values of WM[1-3], |
4338 | * Compute watermark values of WM[1-3], |
4370 | */ |
4339 | */ |
4371 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, |
4340 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, |
4372 | int latency_ns, |
4341 | int latency_ns, |
4373 | const struct intel_watermark_params *display, |
4342 | const struct intel_watermark_params *display, |
4374 | const struct intel_watermark_params *cursor, |
4343 | const struct intel_watermark_params *cursor, |
4375 | int *fbc_wm, int *display_wm, int *cursor_wm) |
4344 | int *fbc_wm, int *display_wm, int *cursor_wm) |
4376 | { |
4345 | { |
4377 | struct drm_crtc *crtc; |
4346 | struct drm_crtc *crtc; |
4378 | unsigned long line_time_us; |
4347 | unsigned long line_time_us; |
4379 | int hdisplay, htotal, pixel_size, clock; |
4348 | int hdisplay, htotal, pixel_size, clock; |
4380 | int line_count, line_size; |
4349 | int line_count, line_size; |
4381 | int small, large; |
4350 | int small, large; |
4382 | int entries; |
4351 | int entries; |
4383 | 4352 | ||
4384 | if (!latency_ns) { |
4353 | if (!latency_ns) { |
4385 | *fbc_wm = *display_wm = *cursor_wm = 0; |
4354 | *fbc_wm = *display_wm = *cursor_wm = 0; |
4386 | return false; |
4355 | return false; |
4387 | } |
4356 | } |
4388 | 4357 | ||
4389 | crtc = intel_get_crtc_for_plane(dev, plane); |
4358 | crtc = intel_get_crtc_for_plane(dev, plane); |
4390 | hdisplay = crtc->mode.hdisplay; |
4359 | hdisplay = crtc->mode.hdisplay; |
4391 | htotal = crtc->mode.htotal; |
4360 | htotal = crtc->mode.htotal; |
4392 | clock = crtc->mode.clock; |
4361 | clock = crtc->mode.clock; |
4393 | pixel_size = crtc->fb->bits_per_pixel / 8; |
4362 | pixel_size = crtc->fb->bits_per_pixel / 8; |
4394 | 4363 | ||
4395 | line_time_us = (htotal * 1000) / clock; |
4364 | line_time_us = (htotal * 1000) / clock; |
4396 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4365 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4397 | line_size = hdisplay * pixel_size; |
4366 | line_size = hdisplay * pixel_size; |
4398 | 4367 | ||
4399 | /* Use the minimum of the small and large buffer method for primary */ |
4368 | /* Use the minimum of the small and large buffer method for primary */ |
4400 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4369 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4401 | large = line_count * line_size; |
4370 | large = line_count * line_size; |
4402 | 4371 | ||
4403 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4372 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4404 | *display_wm = entries + display->guard_size; |
4373 | *display_wm = entries + display->guard_size; |
4405 | 4374 | ||
4406 | /* |
4375 | /* |
4407 | * Spec says: |
4376 | * Spec says: |
4408 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 |
4377 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 |
4409 | */ |
4378 | */ |
4410 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; |
4379 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; |
4411 | 4380 | ||
4412 | /* calculate the self-refresh watermark for display cursor */ |
4381 | /* calculate the self-refresh watermark for display cursor */ |
4413 | entries = line_count * pixel_size * 64; |
4382 | entries = line_count * pixel_size * 64; |
4414 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
4383 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
4415 | *cursor_wm = entries + cursor->guard_size; |
4384 | *cursor_wm = entries + cursor->guard_size; |
4416 | 4385 | ||
4417 | return ironlake_check_srwm(dev, level, |
4386 | return ironlake_check_srwm(dev, level, |
4418 | *fbc_wm, *display_wm, *cursor_wm, |
4387 | *fbc_wm, *display_wm, *cursor_wm, |
4419 | display, cursor); |
4388 | display, cursor); |
4420 | } |
4389 | } |
4421 | 4390 | ||
4422 | static void ironlake_update_wm(struct drm_device *dev) |
4391 | static void ironlake_update_wm(struct drm_device *dev) |
4423 | { |
4392 | { |
4424 | struct drm_i915_private *dev_priv = dev->dev_private; |
4393 | struct drm_i915_private *dev_priv = dev->dev_private; |
4425 | int fbc_wm, plane_wm, cursor_wm; |
4394 | int fbc_wm, plane_wm, cursor_wm; |
4426 | unsigned int enabled; |
4395 | unsigned int enabled; |
4427 | 4396 | ||
4428 | enabled = 0; |
4397 | enabled = 0; |
4429 | if (g4x_compute_wm0(dev, 0, |
4398 | if (g4x_compute_wm0(dev, 0, |
4430 | &ironlake_display_wm_info, |
4399 | &ironlake_display_wm_info, |
4431 | ILK_LP0_PLANE_LATENCY, |
4400 | ILK_LP0_PLANE_LATENCY, |
4432 | &ironlake_cursor_wm_info, |
4401 | &ironlake_cursor_wm_info, |
4433 | ILK_LP0_CURSOR_LATENCY, |
4402 | ILK_LP0_CURSOR_LATENCY, |
4434 | &plane_wm, &cursor_wm)) { |
4403 | &plane_wm, &cursor_wm)) { |
4435 | I915_WRITE(WM0_PIPEA_ILK, |
4404 | I915_WRITE(WM0_PIPEA_ILK, |
4436 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4405 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4437 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4406 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4438 | " plane %d, " "cursor: %d\n", |
4407 | " plane %d, " "cursor: %d\n", |
4439 | plane_wm, cursor_wm); |
4408 | plane_wm, cursor_wm); |
4440 | enabled |= 1; |
4409 | enabled |= 1; |
4441 | } |
4410 | } |
4442 | 4411 | ||
4443 | if (g4x_compute_wm0(dev, 1, |
4412 | if (g4x_compute_wm0(dev, 1, |
4444 | &ironlake_display_wm_info, |
4413 | &ironlake_display_wm_info, |
4445 | ILK_LP0_PLANE_LATENCY, |
4414 | ILK_LP0_PLANE_LATENCY, |
4446 | &ironlake_cursor_wm_info, |
4415 | &ironlake_cursor_wm_info, |
4447 | ILK_LP0_CURSOR_LATENCY, |
4416 | ILK_LP0_CURSOR_LATENCY, |
4448 | &plane_wm, &cursor_wm)) { |
4417 | &plane_wm, &cursor_wm)) { |
4449 | I915_WRITE(WM0_PIPEB_ILK, |
4418 | I915_WRITE(WM0_PIPEB_ILK, |
4450 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4419 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4451 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4420 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4452 | " plane %d, cursor: %d\n", |
4421 | " plane %d, cursor: %d\n", |
4453 | plane_wm, cursor_wm); |
4422 | plane_wm, cursor_wm); |
4454 | enabled |= 2; |
4423 | enabled |= 2; |
4455 | } |
4424 | } |
4456 | 4425 | ||
4457 | /* |
4426 | /* |
4458 | * Calculate and update the self-refresh watermark only when one |
4427 | * Calculate and update the self-refresh watermark only when one |
4459 | * display plane is used. |
4428 | * display plane is used. |
4460 | */ |
4429 | */ |
4461 | I915_WRITE(WM3_LP_ILK, 0); |
4430 | I915_WRITE(WM3_LP_ILK, 0); |
4462 | I915_WRITE(WM2_LP_ILK, 0); |
4431 | I915_WRITE(WM2_LP_ILK, 0); |
4463 | I915_WRITE(WM1_LP_ILK, 0); |
4432 | I915_WRITE(WM1_LP_ILK, 0); |
4464 | 4433 | ||
4465 | if (!single_plane_enabled(enabled)) |
4434 | if (!single_plane_enabled(enabled)) |
4466 | return; |
4435 | return; |
4467 | enabled = ffs(enabled) - 1; |
4436 | enabled = ffs(enabled) - 1; |
4468 | 4437 | ||
4469 | /* WM1 */ |
4438 | /* WM1 */ |
4470 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4439 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4471 | ILK_READ_WM1_LATENCY() * 500, |
4440 | ILK_READ_WM1_LATENCY() * 500, |
4472 | &ironlake_display_srwm_info, |
4441 | &ironlake_display_srwm_info, |
4473 | &ironlake_cursor_srwm_info, |
4442 | &ironlake_cursor_srwm_info, |
4474 | &fbc_wm, &plane_wm, &cursor_wm)) |
4443 | &fbc_wm, &plane_wm, &cursor_wm)) |
4475 | return; |
4444 | return; |
4476 | 4445 | ||
4477 | I915_WRITE(WM1_LP_ILK, |
4446 | I915_WRITE(WM1_LP_ILK, |
4478 | WM1_LP_SR_EN | |
4447 | WM1_LP_SR_EN | |
4479 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4448 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4480 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4449 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4481 | (plane_wm << WM1_LP_SR_SHIFT) | |
4450 | (plane_wm << WM1_LP_SR_SHIFT) | |
4482 | cursor_wm); |
4451 | cursor_wm); |
4483 | 4452 | ||
4484 | /* WM2 */ |
4453 | /* WM2 */ |
4485 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4454 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4486 | ILK_READ_WM2_LATENCY() * 500, |
4455 | ILK_READ_WM2_LATENCY() * 500, |
4487 | &ironlake_display_srwm_info, |
4456 | &ironlake_display_srwm_info, |
4488 | &ironlake_cursor_srwm_info, |
4457 | &ironlake_cursor_srwm_info, |
4489 | &fbc_wm, &plane_wm, &cursor_wm)) |
4458 | &fbc_wm, &plane_wm, &cursor_wm)) |
4490 | return; |
4459 | return; |
4491 | 4460 | ||
4492 | I915_WRITE(WM2_LP_ILK, |
4461 | I915_WRITE(WM2_LP_ILK, |
4493 | WM2_LP_EN | |
4462 | WM2_LP_EN | |
4494 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4463 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4495 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4464 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4496 | (plane_wm << WM1_LP_SR_SHIFT) | |
4465 | (plane_wm << WM1_LP_SR_SHIFT) | |
4497 | cursor_wm); |
4466 | cursor_wm); |
4498 | 4467 | ||
4499 | /* |
4468 | /* |
4500 | * WM3 is unsupported on ILK, probably because we don't have latency |
4469 | * WM3 is unsupported on ILK, probably because we don't have latency |
4501 | * data for that power state |
4470 | * data for that power state |
4502 | */ |
4471 | */ |
4503 | } |
4472 | } |
4504 | 4473 | ||
4505 | void sandybridge_update_wm(struct drm_device *dev) |
4474 | void sandybridge_update_wm(struct drm_device *dev) |
4506 | { |
4475 | { |
4507 | struct drm_i915_private *dev_priv = dev->dev_private; |
4476 | struct drm_i915_private *dev_priv = dev->dev_private; |
4508 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4477 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4509 | int fbc_wm, plane_wm, cursor_wm; |
4478 | int fbc_wm, plane_wm, cursor_wm; |
4510 | unsigned int enabled; |
4479 | unsigned int enabled; |
4511 | 4480 | ||
4512 | enabled = 0; |
4481 | enabled = 0; |
4513 | if (g4x_compute_wm0(dev, 0, |
4482 | if (g4x_compute_wm0(dev, 0, |
4514 | &sandybridge_display_wm_info, latency, |
4483 | &sandybridge_display_wm_info, latency, |
4515 | &sandybridge_cursor_wm_info, latency, |
4484 | &sandybridge_cursor_wm_info, latency, |
4516 | &plane_wm, &cursor_wm)) { |
4485 | &plane_wm, &cursor_wm)) { |
4517 | I915_WRITE(WM0_PIPEA_ILK, |
4486 | I915_WRITE(WM0_PIPEA_ILK, |
4518 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4487 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4519 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4488 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" |
4520 | " plane %d, " "cursor: %d\n", |
4489 | " plane %d, " "cursor: %d\n", |
4521 | plane_wm, cursor_wm); |
4490 | plane_wm, cursor_wm); |
4522 | enabled |= 1; |
4491 | enabled |= 1; |
4523 | } |
4492 | } |
4524 | 4493 | ||
4525 | if (g4x_compute_wm0(dev, 1, |
4494 | if (g4x_compute_wm0(dev, 1, |
4526 | &sandybridge_display_wm_info, latency, |
4495 | &sandybridge_display_wm_info, latency, |
4527 | &sandybridge_cursor_wm_info, latency, |
4496 | &sandybridge_cursor_wm_info, latency, |
4528 | &plane_wm, &cursor_wm)) { |
4497 | &plane_wm, &cursor_wm)) { |
4529 | I915_WRITE(WM0_PIPEB_ILK, |
4498 | I915_WRITE(WM0_PIPEB_ILK, |
4530 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4499 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4531 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4500 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" |
4532 | " plane %d, cursor: %d\n", |
4501 | " plane %d, cursor: %d\n", |
4533 | plane_wm, cursor_wm); |
4502 | plane_wm, cursor_wm); |
4534 | enabled |= 2; |
4503 | enabled |= 2; |
4535 | } |
4504 | } |
4536 | 4505 | ||
4537 | /* IVB has 3 pipes */ |
4506 | /* IVB has 3 pipes */ |
4538 | if (IS_IVYBRIDGE(dev) && |
4507 | if (IS_IVYBRIDGE(dev) && |
4539 | g4x_compute_wm0(dev, 2, |
4508 | g4x_compute_wm0(dev, 2, |
4540 | &sandybridge_display_wm_info, latency, |
4509 | &sandybridge_display_wm_info, latency, |
4541 | &sandybridge_cursor_wm_info, latency, |
4510 | &sandybridge_cursor_wm_info, latency, |
4542 | &plane_wm, &cursor_wm)) { |
4511 | &plane_wm, &cursor_wm)) { |
4543 | I915_WRITE(WM0_PIPEC_IVB, |
4512 | I915_WRITE(WM0_PIPEC_IVB, |
4544 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4513 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); |
4545 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" |
4514 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" |
4546 | " plane %d, cursor: %d\n", |
4515 | " plane %d, cursor: %d\n", |
4547 | plane_wm, cursor_wm); |
4516 | plane_wm, cursor_wm); |
4548 | enabled |= 3; |
4517 | enabled |= 3; |
4549 | } |
4518 | } |
4550 | 4519 | ||
4551 | /* |
4520 | /* |
4552 | * Calculate and update the self-refresh watermark only when one |
4521 | * Calculate and update the self-refresh watermark only when one |
4553 | * display plane is used. |
4522 | * display plane is used. |
4554 | * |
4523 | * |
4555 | * SNB support 3 levels of watermark. |
4524 | * SNB support 3 levels of watermark. |
4556 | * |
4525 | * |
4557 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, |
4526 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, |
4558 | * and disabled in the descending order |
4527 | * and disabled in the descending order |
4559 | * |
4528 | * |
4560 | */ |
4529 | */ |
4561 | I915_WRITE(WM3_LP_ILK, 0); |
4530 | I915_WRITE(WM3_LP_ILK, 0); |
4562 | I915_WRITE(WM2_LP_ILK, 0); |
4531 | I915_WRITE(WM2_LP_ILK, 0); |
4563 | I915_WRITE(WM1_LP_ILK, 0); |
4532 | I915_WRITE(WM1_LP_ILK, 0); |
4564 | 4533 | ||
4565 | if (!single_plane_enabled(enabled) || |
4534 | if (!single_plane_enabled(enabled) || |
4566 | dev_priv->sprite_scaling_enabled) |
4535 | dev_priv->sprite_scaling_enabled) |
4567 | return; |
4536 | return; |
4568 | enabled = ffs(enabled) - 1; |
4537 | enabled = ffs(enabled) - 1; |
4569 | 4538 | ||
4570 | /* WM1 */ |
4539 | /* WM1 */ |
4571 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4540 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4572 | SNB_READ_WM1_LATENCY() * 500, |
4541 | SNB_READ_WM1_LATENCY() * 500, |
4573 | &sandybridge_display_srwm_info, |
4542 | &sandybridge_display_srwm_info, |
4574 | &sandybridge_cursor_srwm_info, |
4543 | &sandybridge_cursor_srwm_info, |
4575 | &fbc_wm, &plane_wm, &cursor_wm)) |
4544 | &fbc_wm, &plane_wm, &cursor_wm)) |
4576 | return; |
4545 | return; |
4577 | 4546 | ||
4578 | I915_WRITE(WM1_LP_ILK, |
4547 | I915_WRITE(WM1_LP_ILK, |
4579 | WM1_LP_SR_EN | |
4548 | WM1_LP_SR_EN | |
4580 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4549 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4581 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4550 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4582 | (plane_wm << WM1_LP_SR_SHIFT) | |
4551 | (plane_wm << WM1_LP_SR_SHIFT) | |
4583 | cursor_wm); |
4552 | cursor_wm); |
4584 | 4553 | ||
4585 | /* WM2 */ |
4554 | /* WM2 */ |
4586 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4555 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4587 | SNB_READ_WM2_LATENCY() * 500, |
4556 | SNB_READ_WM2_LATENCY() * 500, |
4588 | &sandybridge_display_srwm_info, |
4557 | &sandybridge_display_srwm_info, |
4589 | &sandybridge_cursor_srwm_info, |
4558 | &sandybridge_cursor_srwm_info, |
4590 | &fbc_wm, &plane_wm, &cursor_wm)) |
4559 | &fbc_wm, &plane_wm, &cursor_wm)) |
4591 | return; |
4560 | return; |
4592 | 4561 | ||
4593 | I915_WRITE(WM2_LP_ILK, |
4562 | I915_WRITE(WM2_LP_ILK, |
4594 | WM2_LP_EN | |
4563 | WM2_LP_EN | |
4595 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4564 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4596 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4565 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4597 | (plane_wm << WM1_LP_SR_SHIFT) | |
4566 | (plane_wm << WM1_LP_SR_SHIFT) | |
4598 | cursor_wm); |
4567 | cursor_wm); |
4599 | 4568 | ||
4600 | /* WM3 */ |
4569 | /* WM3 */ |
4601 | if (!ironlake_compute_srwm(dev, 3, enabled, |
4570 | if (!ironlake_compute_srwm(dev, 3, enabled, |
4602 | SNB_READ_WM3_LATENCY() * 500, |
4571 | SNB_READ_WM3_LATENCY() * 500, |
4603 | &sandybridge_display_srwm_info, |
4572 | &sandybridge_display_srwm_info, |
4604 | &sandybridge_cursor_srwm_info, |
4573 | &sandybridge_cursor_srwm_info, |
4605 | &fbc_wm, &plane_wm, &cursor_wm)) |
4574 | &fbc_wm, &plane_wm, &cursor_wm)) |
4606 | return; |
4575 | return; |
4607 | 4576 | ||
4608 | I915_WRITE(WM3_LP_ILK, |
4577 | I915_WRITE(WM3_LP_ILK, |
4609 | WM3_LP_EN | |
4578 | WM3_LP_EN | |
4610 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4579 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | |
4611 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4580 | (fbc_wm << WM1_LP_FBC_SHIFT) | |
4612 | (plane_wm << WM1_LP_SR_SHIFT) | |
4581 | (plane_wm << WM1_LP_SR_SHIFT) | |
4613 | cursor_wm); |
4582 | cursor_wm); |
4614 | } |
4583 | } |
4615 | 4584 | ||
4616 | static bool |
4585 | static bool |
4617 | sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, |
4586 | sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, |
4618 | uint32_t sprite_width, int pixel_size, |
4587 | uint32_t sprite_width, int pixel_size, |
4619 | const struct intel_watermark_params *display, |
4588 | const struct intel_watermark_params *display, |
4620 | int display_latency_ns, int *sprite_wm) |
4589 | int display_latency_ns, int *sprite_wm) |
4621 | { |
4590 | { |
4622 | struct drm_crtc *crtc; |
4591 | struct drm_crtc *crtc; |
4623 | int clock; |
4592 | int clock; |
4624 | int entries, tlb_miss; |
4593 | int entries, tlb_miss; |
4625 | 4594 | ||
4626 | crtc = intel_get_crtc_for_plane(dev, plane); |
4595 | crtc = intel_get_crtc_for_plane(dev, plane); |
4627 | if (crtc->fb == NULL || !crtc->enabled) { |
4596 | if (crtc->fb == NULL || !crtc->enabled) { |
4628 | *sprite_wm = display->guard_size; |
4597 | *sprite_wm = display->guard_size; |
4629 | return false; |
4598 | return false; |
4630 | } |
4599 | } |
4631 | 4600 | ||
4632 | clock = crtc->mode.clock; |
4601 | clock = crtc->mode.clock; |
4633 | 4602 | ||
4634 | /* Use the small buffer method to calculate the sprite watermark */ |
4603 | /* Use the small buffer method to calculate the sprite watermark */ |
4635 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
4604 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; |
4636 | tlb_miss = display->fifo_size*display->cacheline_size - |
4605 | tlb_miss = display->fifo_size*display->cacheline_size - |
4637 | sprite_width * 8; |
4606 | sprite_width * 8; |
4638 | if (tlb_miss > 0) |
4607 | if (tlb_miss > 0) |
4639 | entries += tlb_miss; |
4608 | entries += tlb_miss; |
4640 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
4609 | entries = DIV_ROUND_UP(entries, display->cacheline_size); |
4641 | *sprite_wm = entries + display->guard_size; |
4610 | *sprite_wm = entries + display->guard_size; |
4642 | if (*sprite_wm > (int)display->max_wm) |
4611 | if (*sprite_wm > (int)display->max_wm) |
4643 | *sprite_wm = display->max_wm; |
4612 | *sprite_wm = display->max_wm; |
4644 | 4613 | ||
4645 | return true; |
4614 | return true; |
4646 | } |
4615 | } |
4647 | 4616 | ||
4648 | static bool |
4617 | static bool |
4649 | sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, |
4618 | sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, |
4650 | uint32_t sprite_width, int pixel_size, |
4619 | uint32_t sprite_width, int pixel_size, |
4651 | const struct intel_watermark_params *display, |
4620 | const struct intel_watermark_params *display, |
4652 | int latency_ns, int *sprite_wm) |
4621 | int latency_ns, int *sprite_wm) |
4653 | { |
4622 | { |
4654 | struct drm_crtc *crtc; |
4623 | struct drm_crtc *crtc; |
4655 | unsigned long line_time_us; |
4624 | unsigned long line_time_us; |
4656 | int clock; |
4625 | int clock; |
4657 | int line_count, line_size; |
4626 | int line_count, line_size; |
4658 | int small, large; |
4627 | int small, large; |
4659 | int entries; |
4628 | int entries; |
4660 | 4629 | ||
4661 | if (!latency_ns) { |
4630 | if (!latency_ns) { |
4662 | *sprite_wm = 0; |
4631 | *sprite_wm = 0; |
4663 | return false; |
4632 | return false; |
4664 | } |
4633 | } |
4665 | 4634 | ||
4666 | crtc = intel_get_crtc_for_plane(dev, plane); |
4635 | crtc = intel_get_crtc_for_plane(dev, plane); |
4667 | clock = crtc->mode.clock; |
4636 | clock = crtc->mode.clock; |
4668 | 4637 | ||
4669 | line_time_us = (sprite_width * 1000) / clock; |
4638 | line_time_us = (sprite_width * 1000) / clock; |
4670 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4639 | line_count = (latency_ns / line_time_us + 1000) / 1000; |
4671 | line_size = sprite_width * pixel_size; |
4640 | line_size = sprite_width * pixel_size; |
4672 | 4641 | ||
4673 | /* Use the minimum of the small and large buffer method for primary */ |
4642 | /* Use the minimum of the small and large buffer method for primary */ |
4674 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4643 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; |
4675 | large = line_count * line_size; |
4644 | large = line_count * line_size; |
4676 | 4645 | ||
4677 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4646 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4678 | *sprite_wm = entries + display->guard_size; |
4647 | *sprite_wm = entries + display->guard_size; |
4679 | 4648 | ||
4680 | return *sprite_wm > 0x3ff ? false : true; |
4649 | return *sprite_wm > 0x3ff ? false : true; |
4681 | } |
4650 | } |
4682 | 4651 | ||
4683 | static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, |
4652 | static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, |
4684 | uint32_t sprite_width, int pixel_size) |
4653 | uint32_t sprite_width, int pixel_size) |
4685 | { |
4654 | { |
4686 | struct drm_i915_private *dev_priv = dev->dev_private; |
4655 | struct drm_i915_private *dev_priv = dev->dev_private; |
4687 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4656 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
4688 | int sprite_wm, reg; |
4657 | int sprite_wm, reg; |
4689 | int ret; |
4658 | int ret; |
4690 | 4659 | ||
4691 | switch (pipe) { |
4660 | switch (pipe) { |
4692 | case 0: |
4661 | case 0: |
4693 | reg = WM0_PIPEA_ILK; |
4662 | reg = WM0_PIPEA_ILK; |
4694 | break; |
4663 | break; |
4695 | case 1: |
4664 | case 1: |
4696 | reg = WM0_PIPEB_ILK; |
4665 | reg = WM0_PIPEB_ILK; |
4697 | break; |
4666 | break; |
4698 | case 2: |
4667 | case 2: |
4699 | reg = WM0_PIPEC_IVB; |
4668 | reg = WM0_PIPEC_IVB; |
4700 | break; |
4669 | break; |
4701 | default: |
4670 | default: |
4702 | return; /* bad pipe */ |
4671 | return; /* bad pipe */ |
4703 | } |
4672 | } |
4704 | 4673 | ||
4705 | ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, |
4674 | ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, |
4706 | &sandybridge_display_wm_info, |
4675 | &sandybridge_display_wm_info, |
4707 | latency, &sprite_wm); |
4676 | latency, &sprite_wm); |
4708 | if (!ret) { |
4677 | if (!ret) { |
4709 | DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", |
4678 | DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", |
4710 | pipe); |
4679 | pipe); |
4711 | return; |
4680 | return; |
4712 | } |
4681 | } |
4713 | 4682 | ||
4714 | I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); |
4683 | I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); |
4715 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); |
4684 | DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); |
4716 | 4685 | ||
4717 | 4686 | ||
4718 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4687 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4719 | pixel_size, |
4688 | pixel_size, |
4720 | &sandybridge_display_srwm_info, |
4689 | &sandybridge_display_srwm_info, |
4721 | SNB_READ_WM1_LATENCY() * 500, |
4690 | SNB_READ_WM1_LATENCY() * 500, |
4722 | &sprite_wm); |
4691 | &sprite_wm); |
4723 | if (!ret) { |
4692 | if (!ret) { |
4724 | DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", |
4693 | DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", |
4725 | pipe); |
4694 | pipe); |
4726 | return; |
4695 | return; |
4727 | } |
4696 | } |
4728 | I915_WRITE(WM1S_LP_ILK, sprite_wm); |
4697 | I915_WRITE(WM1S_LP_ILK, sprite_wm); |
4729 | 4698 | ||
4730 | /* Only IVB has two more LP watermarks for sprite */ |
4699 | /* Only IVB has two more LP watermarks for sprite */ |
4731 | if (!IS_IVYBRIDGE(dev)) |
4700 | if (!IS_IVYBRIDGE(dev)) |
4732 | return; |
4701 | return; |
4733 | 4702 | ||
4734 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4703 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4735 | pixel_size, |
4704 | pixel_size, |
4736 | &sandybridge_display_srwm_info, |
4705 | &sandybridge_display_srwm_info, |
4737 | SNB_READ_WM2_LATENCY() * 500, |
4706 | SNB_READ_WM2_LATENCY() * 500, |
4738 | &sprite_wm); |
4707 | &sprite_wm); |
4739 | if (!ret) { |
4708 | if (!ret) { |
4740 | DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", |
4709 | DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", |
4741 | pipe); |
4710 | pipe); |
4742 | return; |
4711 | return; |
4743 | } |
4712 | } |
4744 | I915_WRITE(WM2S_LP_IVB, sprite_wm); |
4713 | I915_WRITE(WM2S_LP_IVB, sprite_wm); |
4745 | 4714 | ||
4746 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4715 | ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, |
4747 | pixel_size, |
4716 | pixel_size, |
4748 | &sandybridge_display_srwm_info, |
4717 | &sandybridge_display_srwm_info, |
4749 | SNB_READ_WM3_LATENCY() * 500, |
4718 | SNB_READ_WM3_LATENCY() * 500, |
4750 | &sprite_wm); |
4719 | &sprite_wm); |
4751 | if (!ret) { |
4720 | if (!ret) { |
4752 | DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", |
4721 | DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", |
4753 | pipe); |
4722 | pipe); |
4754 | return; |
4723 | return; |
4755 | } |
4724 | } |
4756 | I915_WRITE(WM3S_LP_IVB, sprite_wm); |
4725 | I915_WRITE(WM3S_LP_IVB, sprite_wm); |
4757 | } |
4726 | } |
4758 | 4727 | ||
4759 | /** |
4728 | /** |
4760 | * intel_update_watermarks - update FIFO watermark values based on current modes |
4729 | * intel_update_watermarks - update FIFO watermark values based on current modes |
4761 | * |
4730 | * |
4762 | * Calculate watermark values for the various WM regs based on current mode |
4731 | * Calculate watermark values for the various WM regs based on current mode |
4763 | * and plane configuration. |
4732 | * and plane configuration. |
4764 | * |
4733 | * |
4765 | * There are several cases to deal with here: |
4734 | * There are several cases to deal with here: |
4766 | * - normal (i.e. non-self-refresh) |
4735 | * - normal (i.e. non-self-refresh) |
4767 | * - self-refresh (SR) mode |
4736 | * - self-refresh (SR) mode |
4768 | * - lines are large relative to FIFO size (buffer can hold up to 2) |
4737 | * - lines are large relative to FIFO size (buffer can hold up to 2) |
4769 | * - lines are small relative to FIFO size (buffer can hold more than 2 |
4738 | * - lines are small relative to FIFO size (buffer can hold more than 2 |
4770 | * lines), so need to account for TLB latency |
4739 | * lines), so need to account for TLB latency |
4771 | * |
4740 | * |
4772 | * The normal calculation is: |
4741 | * The normal calculation is: |
4773 | * watermark = dotclock * bytes per pixel * latency |
4742 | * watermark = dotclock * bytes per pixel * latency |
4774 | * where latency is platform & configuration dependent (we assume pessimal |
4743 | * where latency is platform & configuration dependent (we assume pessimal |
4775 | * values here). |
4744 | * values here). |
4776 | * |
4745 | * |
4777 | * The SR calculation is: |
4746 | * The SR calculation is: |
4778 | * watermark = (trunc(latency/line time)+1) * surface width * |
4747 | * watermark = (trunc(latency/line time)+1) * surface width * |
4779 | * bytes per pixel |
4748 | * bytes per pixel |
4780 | * where |
4749 | * where |
4781 | * line time = htotal / dotclock |
4750 | * line time = htotal / dotclock |
4782 | * surface width = hdisplay for normal plane and 64 for cursor |
4751 | * surface width = hdisplay for normal plane and 64 for cursor |
4783 | * and latency is assumed to be high, as above. |
4752 | * and latency is assumed to be high, as above. |
4784 | * |
4753 | * |
4785 | * The final value programmed to the register should always be rounded up, |
4754 | * The final value programmed to the register should always be rounded up, |
4786 | * and include an extra 2 entries to account for clock crossings. |
4755 | * and include an extra 2 entries to account for clock crossings. |
4787 | * |
4756 | * |
4788 | * We don't use the sprite, so we can ignore that. And on Crestline we have |
4757 | * We don't use the sprite, so we can ignore that. And on Crestline we have |
4789 | * to set the non-SR watermarks to 8. |
4758 | * to set the non-SR watermarks to 8. |
4790 | */ |
4759 | */ |
4791 | static void intel_update_watermarks(struct drm_device *dev) |
4760 | static void intel_update_watermarks(struct drm_device *dev) |
4792 | { |
4761 | { |
4793 | struct drm_i915_private *dev_priv = dev->dev_private; |
4762 | struct drm_i915_private *dev_priv = dev->dev_private; |
4794 | 4763 | ||
4795 | if (dev_priv->display.update_wm) |
4764 | if (dev_priv->display.update_wm) |
4796 | dev_priv->display.update_wm(dev); |
4765 | dev_priv->display.update_wm(dev); |
4797 | } |
4766 | } |
4798 | 4767 | ||
4799 | void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, |
4768 | void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, |
4800 | uint32_t sprite_width, int pixel_size) |
4769 | uint32_t sprite_width, int pixel_size) |
4801 | { |
4770 | { |
4802 | struct drm_i915_private *dev_priv = dev->dev_private; |
4771 | struct drm_i915_private *dev_priv = dev->dev_private; |
4803 | 4772 | ||
4804 | if (dev_priv->display.update_sprite_wm) |
4773 | if (dev_priv->display.update_sprite_wm) |
4805 | dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, |
4774 | dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, |
4806 | pixel_size); |
4775 | pixel_size); |
4807 | } |
4776 | } |
4808 | 4777 | ||
4809 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4778 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4810 | { |
4779 | { |
4811 | if (i915_panel_use_ssc >= 0) |
4780 | if (i915_panel_use_ssc >= 0) |
4812 | return i915_panel_use_ssc != 0; |
4781 | return i915_panel_use_ssc != 0; |
4813 | return dev_priv->lvds_use_ssc |
4782 | return dev_priv->lvds_use_ssc |
4814 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4783 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4815 | } |
4784 | } |
4816 | 4785 | ||
4817 | /** |
4786 | /** |
4818 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send |
4787 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send |
4819 | * @crtc: CRTC structure |
4788 | * @crtc: CRTC structure |
4820 | * @mode: requested mode |
4789 | * @mode: requested mode |
4821 | * |
4790 | * |
4822 | * A pipe may be connected to one or more outputs. Based on the depth of the |
4791 | * A pipe may be connected to one or more outputs. Based on the depth of the |
4823 | * attached framebuffer, choose a good color depth to use on the pipe. |
4792 | * attached framebuffer, choose a good color depth to use on the pipe. |
4824 | * |
4793 | * |
4825 | * If possible, match the pipe depth to the fb depth. In some cases, this |
4794 | * If possible, match the pipe depth to the fb depth. In some cases, this |
4826 | * isn't ideal, because the connected output supports a lesser or restricted |
4795 | * isn't ideal, because the connected output supports a lesser or restricted |
4827 | * set of depths. Resolve that here: |
4796 | * set of depths. Resolve that here: |
4828 | * LVDS typically supports only 6bpc, so clamp down in that case |
4797 | * LVDS typically supports only 6bpc, so clamp down in that case |
4829 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc |
4798 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc |
4830 | * Displays may support a restricted set as well, check EDID and clamp as |
4799 | * Displays may support a restricted set as well, check EDID and clamp as |
4831 | * appropriate. |
4800 | * appropriate. |
4832 | * DP may want to dither down to 6bpc to fit larger modes |
4801 | * DP may want to dither down to 6bpc to fit larger modes |
4833 | * |
4802 | * |
4834 | * RETURNS: |
4803 | * RETURNS: |
4835 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, |
4804 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, |
4836 | * true if they don't match). |
4805 | * true if they don't match). |
4837 | */ |
4806 | */ |
4838 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, |
4807 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, |
4839 | unsigned int *pipe_bpp, |
4808 | unsigned int *pipe_bpp, |
4840 | struct drm_display_mode *mode) |
4809 | struct drm_display_mode *mode) |
4841 | { |
4810 | { |
4842 | struct drm_device *dev = crtc->dev; |
4811 | struct drm_device *dev = crtc->dev; |
4843 | struct drm_i915_private *dev_priv = dev->dev_private; |
4812 | struct drm_i915_private *dev_priv = dev->dev_private; |
4844 | struct drm_encoder *encoder; |
4813 | struct drm_encoder *encoder; |
4845 | struct drm_connector *connector; |
4814 | struct drm_connector *connector; |
4846 | unsigned int display_bpc = UINT_MAX, bpc; |
4815 | unsigned int display_bpc = UINT_MAX, bpc; |
4847 | 4816 | ||
4848 | /* Walk the encoders & connectors on this crtc, get min bpc */ |
4817 | /* Walk the encoders & connectors on this crtc, get min bpc */ |
4849 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
4818 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
4850 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
4819 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
4851 | 4820 | ||
4852 | if (encoder->crtc != crtc) |
4821 | if (encoder->crtc != crtc) |
4853 | continue; |
4822 | continue; |
4854 | 4823 | ||
4855 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { |
4824 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { |
4856 | unsigned int lvds_bpc; |
4825 | unsigned int lvds_bpc; |
4857 | 4826 | ||
4858 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == |
4827 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == |
4859 | LVDS_A3_POWER_UP) |
4828 | LVDS_A3_POWER_UP) |
4860 | lvds_bpc = 8; |
4829 | lvds_bpc = 8; |
4861 | else |
4830 | else |
4862 | lvds_bpc = 6; |
4831 | lvds_bpc = 6; |
4863 | 4832 | ||
4864 | if (lvds_bpc < display_bpc) { |
4833 | if (lvds_bpc < display_bpc) { |
4865 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
4834 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
4866 | display_bpc = lvds_bpc; |
4835 | display_bpc = lvds_bpc; |
4867 | } |
4836 | } |
4868 | continue; |
4837 | continue; |
4869 | } |
4838 | } |
4870 | 4839 | ||
4871 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
4840 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
4872 | /* Use VBT settings if we have an eDP panel */ |
4841 | /* Use VBT settings if we have an eDP panel */ |
4873 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
4842 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; |
4874 | 4843 | ||
4875 | if (edp_bpc < display_bpc) { |
4844 | if (edp_bpc < display_bpc) { |
4876 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
4845 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
4877 | display_bpc = edp_bpc; |
4846 | display_bpc = edp_bpc; |
4878 | } |
4847 | } |
4879 | continue; |
4848 | continue; |
4880 | } |
4849 | } |
4881 | 4850 | ||
4882 | /* Not one of the known troublemakers, check the EDID */ |
4851 | /* Not one of the known troublemakers, check the EDID */ |
4883 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
4852 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
4884 | head) { |
4853 | head) { |
4885 | if (connector->encoder != encoder) |
4854 | if (connector->encoder != encoder) |
4886 | continue; |
4855 | continue; |
4887 | 4856 | ||
4888 | /* Don't use an invalid EDID bpc value */ |
4857 | /* Don't use an invalid EDID bpc value */ |
4889 | if (connector->display_info.bpc && |
4858 | if (connector->display_info.bpc && |
4890 | connector->display_info.bpc < display_bpc) { |
4859 | connector->display_info.bpc < display_bpc) { |
4891 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4860 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
4892 | display_bpc = connector->display_info.bpc; |
4861 | display_bpc = connector->display_info.bpc; |
4893 | } |
4862 | } |
4894 | } |
4863 | } |
4895 | 4864 | ||
4896 | /* |
4865 | /* |
4897 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
4866 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak |
4898 | * through, clamp it down. (Note: >12bpc will be caught below.) |
4867 | * through, clamp it down. (Note: >12bpc will be caught below.) |
4899 | */ |
4868 | */ |
4900 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
4869 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { |
4901 | if (display_bpc > 8 && display_bpc < 12) { |
4870 | if (display_bpc > 8 && display_bpc < 12) { |
4902 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
4871 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
4903 | display_bpc = 12; |
4872 | display_bpc = 12; |
4904 | } else { |
4873 | } else { |
4905 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
4874 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
4906 | display_bpc = 8; |
4875 | display_bpc = 8; |
4907 | } |
4876 | } |
4908 | } |
4877 | } |
4909 | } |
4878 | } |
4910 | 4879 | ||
4911 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4880 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4912 | DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); |
4881 | DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); |
4913 | display_bpc = 6; |
4882 | display_bpc = 6; |
4914 | } |
4883 | } |
4915 | 4884 | ||
4916 | /* |
4885 | /* |
4917 | * We could just drive the pipe at the highest bpc all the time and |
4886 | * We could just drive the pipe at the highest bpc all the time and |
4918 | * enable dithering as needed, but that costs bandwidth. So choose |
4887 | * enable dithering as needed, but that costs bandwidth. So choose |
4919 | * the minimum value that expresses the full color range of the fb but |
4888 | * the minimum value that expresses the full color range of the fb but |
4920 | * also stays within the max display bpc discovered above. |
4889 | * also stays within the max display bpc discovered above. |
4921 | */ |
4890 | */ |
4922 | 4891 | ||
4923 | switch (crtc->fb->depth) { |
4892 | switch (crtc->fb->depth) { |
4924 | case 8: |
4893 | case 8: |
4925 | bpc = 8; /* since we go through a colormap */ |
4894 | bpc = 8; /* since we go through a colormap */ |
4926 | break; |
4895 | break; |
4927 | case 15: |
4896 | case 15: |
4928 | case 16: |
4897 | case 16: |
4929 | bpc = 6; /* min is 18bpp */ |
4898 | bpc = 6; /* min is 18bpp */ |
4930 | break; |
4899 | break; |
4931 | case 24: |
4900 | case 24: |
4932 | bpc = 8; |
4901 | bpc = 8; |
4933 | break; |
4902 | break; |
4934 | case 30: |
4903 | case 30: |
4935 | bpc = 10; |
4904 | bpc = 10; |
4936 | break; |
4905 | break; |
4937 | case 48: |
4906 | case 48: |
4938 | bpc = 12; |
4907 | bpc = 12; |
4939 | break; |
4908 | break; |
4940 | default: |
4909 | default: |
4941 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
4910 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
4942 | bpc = min((unsigned int)8, display_bpc); |
4911 | bpc = min((unsigned int)8, display_bpc); |
4943 | break; |
4912 | break; |
4944 | } |
4913 | } |
4945 | 4914 | ||
4946 | display_bpc = min(display_bpc, bpc); |
4915 | display_bpc = min(display_bpc, bpc); |
4947 | 4916 | ||
4948 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4917 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4949 | bpc, display_bpc); |
4918 | bpc, display_bpc); |
4950 | 4919 | ||
4951 | *pipe_bpp = display_bpc * 3; |
4920 | *pipe_bpp = display_bpc * 3; |
4952 | 4921 | ||
4953 | return display_bpc != bpc; |
4922 | return display_bpc != bpc; |
4954 | } |
4923 | } |
4955 | 4924 | ||
4956 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4925 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4957 | struct drm_display_mode *mode, |
4926 | struct drm_display_mode *mode, |
4958 | struct drm_display_mode *adjusted_mode, |
4927 | struct drm_display_mode *adjusted_mode, |
4959 | int x, int y, |
4928 | int x, int y, |
4960 | struct drm_framebuffer *old_fb) |
4929 | struct drm_framebuffer *old_fb) |
4961 | { |
4930 | { |
4962 | struct drm_device *dev = crtc->dev; |
4931 | struct drm_device *dev = crtc->dev; |
4963 | struct drm_i915_private *dev_priv = dev->dev_private; |
4932 | struct drm_i915_private *dev_priv = dev->dev_private; |
4964 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4933 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4965 | int pipe = intel_crtc->pipe; |
4934 | int pipe = intel_crtc->pipe; |
4966 | int plane = intel_crtc->plane; |
4935 | int plane = intel_crtc->plane; |
4967 | int refclk, num_connectors = 0; |
4936 | int refclk, num_connectors = 0; |
4968 | intel_clock_t clock, reduced_clock; |
4937 | intel_clock_t clock, reduced_clock; |
4969 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
4938 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
4970 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
4939 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
4971 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
4940 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
4972 | struct drm_mode_config *mode_config = &dev->mode_config; |
4941 | struct drm_mode_config *mode_config = &dev->mode_config; |
4973 | struct intel_encoder *encoder; |
4942 | struct intel_encoder *encoder; |
4974 | const intel_limit_t *limit; |
4943 | const intel_limit_t *limit; |
4975 | int ret; |
4944 | int ret; |
4976 | u32 temp; |
4945 | u32 temp; |
4977 | u32 lvds_sync = 0; |
4946 | u32 lvds_sync = 0; |
4978 | 4947 | ||
4979 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4948 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4980 | if (encoder->base.crtc != crtc) |
4949 | if (encoder->base.crtc != crtc) |
4981 | continue; |
4950 | continue; |
4982 | 4951 | ||
4983 | switch (encoder->type) { |
4952 | switch (encoder->type) { |
4984 | case INTEL_OUTPUT_LVDS: |
4953 | case INTEL_OUTPUT_LVDS: |
4985 | is_lvds = true; |
4954 | is_lvds = true; |
4986 | break; |
4955 | break; |
4987 | case INTEL_OUTPUT_SDVO: |
4956 | case INTEL_OUTPUT_SDVO: |
4988 | case INTEL_OUTPUT_HDMI: |
4957 | case INTEL_OUTPUT_HDMI: |
4989 | is_sdvo = true; |
4958 | is_sdvo = true; |
4990 | if (encoder->needs_tv_clock) |
4959 | if (encoder->needs_tv_clock) |
4991 | is_tv = true; |
4960 | is_tv = true; |
4992 | break; |
4961 | break; |
4993 | case INTEL_OUTPUT_DVO: |
4962 | case INTEL_OUTPUT_DVO: |
4994 | is_dvo = true; |
4963 | is_dvo = true; |
4995 | break; |
4964 | break; |
4996 | case INTEL_OUTPUT_TVOUT: |
4965 | case INTEL_OUTPUT_TVOUT: |
4997 | is_tv = true; |
4966 | is_tv = true; |
4998 | break; |
4967 | break; |
4999 | case INTEL_OUTPUT_ANALOG: |
4968 | case INTEL_OUTPUT_ANALOG: |
5000 | is_crt = true; |
4969 | is_crt = true; |
5001 | break; |
4970 | break; |
5002 | case INTEL_OUTPUT_DISPLAYPORT: |
4971 | case INTEL_OUTPUT_DISPLAYPORT: |
5003 | is_dp = true; |
4972 | is_dp = true; |
5004 | break; |
4973 | break; |
5005 | } |
4974 | } |
5006 | 4975 | ||
5007 | num_connectors++; |
4976 | num_connectors++; |
5008 | } |
4977 | } |
5009 | 4978 | ||
5010 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
4979 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5011 | refclk = dev_priv->lvds_ssc_freq * 1000; |
4980 | refclk = dev_priv->lvds_ssc_freq * 1000; |
5012 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
4981 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5013 | refclk / 1000); |
4982 | refclk / 1000); |
5014 | } else if (!IS_GEN2(dev)) { |
4983 | } else if (!IS_GEN2(dev)) { |
5015 | refclk = 96000; |
4984 | refclk = 96000; |
5016 | } else { |
4985 | } else { |
5017 | refclk = 48000; |
4986 | refclk = 48000; |
5018 | } |
4987 | } |
5019 | 4988 | ||
5020 | /* |
4989 | /* |
5021 | * Returns a set of divisors for the desired target clock with the given |
4990 | * Returns a set of divisors for the desired target clock with the given |
5022 | * refclk, or FALSE. The returned values represent the clock equation: |
4991 | * refclk, or FALSE. The returned values represent the clock equation: |
5023 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
4992 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5024 | */ |
4993 | */ |
5025 | limit = intel_limit(crtc, refclk); |
4994 | limit = intel_limit(crtc, refclk); |
5026 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
4995 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
5027 | if (!ok) { |
4996 | if (!ok) { |
5028 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
4997 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5029 | return -EINVAL; |
4998 | return -EINVAL; |
5030 | } |
4999 | } |
5031 | 5000 | ||
5032 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5001 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5033 | // intel_crtc_update_cursor(crtc, true); |
5002 | // intel_crtc_update_cursor(crtc, true); |
5034 | 5003 | ||
5035 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5004 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5036 | has_reduced_clock = limit->find_pll(limit, crtc, |
5005 | has_reduced_clock = limit->find_pll(limit, crtc, |
5037 | dev_priv->lvds_downclock, |
5006 | dev_priv->lvds_downclock, |
5038 | refclk, |
5007 | refclk, |
5039 | &reduced_clock); |
5008 | &reduced_clock); |
5040 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
5009 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
5041 | /* |
5010 | /* |
5042 | * If the different P is found, it means that we can't |
5011 | * If the different P is found, it means that we can't |
5043 | * switch the display clock by using the FP0/FP1. |
5012 | * switch the display clock by using the FP0/FP1. |
5044 | * In such case we will disable the LVDS downclock |
5013 | * In such case we will disable the LVDS downclock |
5045 | * feature. |
5014 | * feature. |
5046 | */ |
5015 | */ |
5047 | DRM_DEBUG_KMS("Different P is found for " |
5016 | DRM_DEBUG_KMS("Different P is found for " |
5048 | "LVDS clock/downclock\n"); |
5017 | "LVDS clock/downclock\n"); |
5049 | has_reduced_clock = 0; |
5018 | has_reduced_clock = 0; |
5050 | } |
5019 | } |
5051 | } |
5020 | } |
5052 | /* SDVO TV has fixed PLL values depend on its clock range, |
5021 | /* SDVO TV has fixed PLL values depend on its clock range, |
5053 | this mirrors vbios setting. */ |
5022 | this mirrors vbios setting. */ |
5054 | if (is_sdvo && is_tv) { |
5023 | if (is_sdvo && is_tv) { |
5055 | if (adjusted_mode->clock >= 100000 |
5024 | if (adjusted_mode->clock >= 100000 |
5056 | && adjusted_mode->clock < 140500) { |
5025 | && adjusted_mode->clock < 140500) { |
5057 | clock.p1 = 2; |
5026 | clock.p1 = 2; |
5058 | clock.p2 = 10; |
5027 | clock.p2 = 10; |
5059 | clock.n = 3; |
5028 | clock.n = 3; |
5060 | clock.m1 = 16; |
5029 | clock.m1 = 16; |
5061 | clock.m2 = 8; |
5030 | clock.m2 = 8; |
5062 | } else if (adjusted_mode->clock >= 140500 |
5031 | } else if (adjusted_mode->clock >= 140500 |
5063 | && adjusted_mode->clock <= 200000) { |
5032 | && adjusted_mode->clock <= 200000) { |
5064 | clock.p1 = 1; |
5033 | clock.p1 = 1; |
5065 | clock.p2 = 10; |
5034 | clock.p2 = 10; |
5066 | clock.n = 6; |
5035 | clock.n = 6; |
5067 | clock.m1 = 12; |
5036 | clock.m1 = 12; |
5068 | clock.m2 = 8; |
5037 | clock.m2 = 8; |
5069 | } |
5038 | } |
5070 | } |
5039 | } |
5071 | 5040 | ||
5072 | if (IS_PINEVIEW(dev)) { |
5041 | if (IS_PINEVIEW(dev)) { |
5073 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
5042 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
5074 | if (has_reduced_clock) |
5043 | if (has_reduced_clock) |
5075 | fp2 = (1 << reduced_clock.n) << 16 | |
5044 | fp2 = (1 << reduced_clock.n) << 16 | |
5076 | reduced_clock.m1 << 8 | reduced_clock.m2; |
5045 | reduced_clock.m1 << 8 | reduced_clock.m2; |
5077 | } else { |
5046 | } else { |
5078 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5047 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5079 | if (has_reduced_clock) |
5048 | if (has_reduced_clock) |
5080 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5049 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5081 | reduced_clock.m2; |
5050 | reduced_clock.m2; |
5082 | } |
5051 | } |
5083 | 5052 | ||
5084 | dpll = DPLL_VGA_MODE_DIS; |
5053 | dpll = DPLL_VGA_MODE_DIS; |
5085 | 5054 | ||
5086 | if (!IS_GEN2(dev)) { |
5055 | if (!IS_GEN2(dev)) { |
5087 | if (is_lvds) |
5056 | if (is_lvds) |
5088 | dpll |= DPLLB_MODE_LVDS; |
5057 | dpll |= DPLLB_MODE_LVDS; |
5089 | else |
5058 | else |
5090 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5059 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5091 | if (is_sdvo) { |
5060 | if (is_sdvo) { |
5092 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5061 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5093 | if (pixel_multiplier > 1) { |
5062 | if (pixel_multiplier > 1) { |
5094 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
5063 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
5095 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
5064 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; |
5096 | } |
5065 | } |
5097 | dpll |= DPLL_DVO_HIGH_SPEED; |
5066 | dpll |= DPLL_DVO_HIGH_SPEED; |
5098 | } |
5067 | } |
5099 | if (is_dp) |
5068 | if (is_dp) |
5100 | dpll |= DPLL_DVO_HIGH_SPEED; |
5069 | dpll |= DPLL_DVO_HIGH_SPEED; |
5101 | 5070 | ||
5102 | /* compute bitmask from p1 value */ |
5071 | /* compute bitmask from p1 value */ |
5103 | if (IS_PINEVIEW(dev)) |
5072 | if (IS_PINEVIEW(dev)) |
5104 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
5073 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; |
5105 | else { |
5074 | else { |
5106 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5075 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5107 | if (IS_G4X(dev) && has_reduced_clock) |
5076 | if (IS_G4X(dev) && has_reduced_clock) |
5108 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5077 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5109 | } |
5078 | } |
5110 | switch (clock.p2) { |
5079 | switch (clock.p2) { |
5111 | case 5: |
5080 | case 5: |
5112 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5081 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5113 | break; |
5082 | break; |
5114 | case 7: |
5083 | case 7: |
5115 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5084 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5116 | break; |
5085 | break; |
5117 | case 10: |
5086 | case 10: |
5118 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5087 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5119 | break; |
5088 | break; |
5120 | case 14: |
5089 | case 14: |
5121 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5090 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5122 | break; |
5091 | break; |
5123 | } |
5092 | } |
5124 | if (INTEL_INFO(dev)->gen >= 4) |
5093 | if (INTEL_INFO(dev)->gen >= 4) |
5125 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
5094 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
5126 | } else { |
5095 | } else { |
5127 | if (is_lvds) { |
5096 | if (is_lvds) { |
5128 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5097 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5129 | } else { |
5098 | } else { |
5130 | if (clock.p1 == 2) |
5099 | if (clock.p1 == 2) |
5131 | dpll |= PLL_P1_DIVIDE_BY_TWO; |
5100 | dpll |= PLL_P1_DIVIDE_BY_TWO; |
5132 | else |
5101 | else |
5133 | dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5102 | dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5134 | if (clock.p2 == 4) |
5103 | if (clock.p2 == 4) |
5135 | dpll |= PLL_P2_DIVIDE_BY_4; |
5104 | dpll |= PLL_P2_DIVIDE_BY_4; |
5136 | } |
5105 | } |
5137 | } |
5106 | } |
5138 | 5107 | ||
5139 | if (is_sdvo && is_tv) |
5108 | if (is_sdvo && is_tv) |
5140 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5109 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5141 | else if (is_tv) |
5110 | else if (is_tv) |
5142 | /* XXX: just matching BIOS for now */ |
5111 | /* XXX: just matching BIOS for now */ |
5143 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5112 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5144 | dpll |= 3; |
5113 | dpll |= 3; |
5145 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5114 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5146 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5115 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5147 | else |
5116 | else |
5148 | dpll |= PLL_REF_INPUT_DREFCLK; |
5117 | dpll |= PLL_REF_INPUT_DREFCLK; |
5149 | 5118 | ||
5150 | /* setup pipeconf */ |
5119 | /* setup pipeconf */ |
5151 | pipeconf = I915_READ(PIPECONF(pipe)); |
5120 | pipeconf = I915_READ(PIPECONF(pipe)); |
5152 | 5121 | ||
5153 | /* Set up the display plane register */ |
5122 | /* Set up the display plane register */ |
5154 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5123 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5155 | 5124 | ||
5156 | /* Ironlake's plane is forced to pipe, bit 24 is to |
5125 | /* Ironlake's plane is forced to pipe, bit 24 is to |
5157 | enable color space conversion */ |
5126 | enable color space conversion */ |
5158 | if (pipe == 0) |
5127 | if (pipe == 0) |
5159 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
5128 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; |
5160 | else |
5129 | else |
5161 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
5130 | dspcntr |= DISPPLANE_SEL_PIPE_B; |
5162 | 5131 | ||
5163 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
5132 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
5164 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
5133 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
5165 | * core speed. |
5134 | * core speed. |
5166 | * |
5135 | * |
5167 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
5136 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the |
5168 | * pipe == 0 check? |
5137 | * pipe == 0 check? |
5169 | */ |
5138 | */ |
5170 | if (mode->clock > |
5139 | if (mode->clock > |
5171 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
5140 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) |
5172 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
5141 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
5173 | else |
5142 | else |
5174 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
5143 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
5175 | } |
5144 | } |
5176 | 5145 | ||
5177 | /* default to 8bpc */ |
5146 | /* default to 8bpc */ |
5178 | pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); |
5147 | pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); |
5179 | if (is_dp) { |
5148 | if (is_dp) { |
5180 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
5149 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
5181 | pipeconf |= PIPECONF_BPP_6 | |
5150 | pipeconf |= PIPECONF_BPP_6 | |
5182 | PIPECONF_DITHER_EN | |
5151 | PIPECONF_DITHER_EN | |
5183 | PIPECONF_DITHER_TYPE_SP; |
5152 | PIPECONF_DITHER_TYPE_SP; |
5184 | } |
5153 | } |
5185 | } |
5154 | } |
5186 | 5155 | ||
5187 | dpll |= DPLL_VCO_ENABLE; |
5156 | dpll |= DPLL_VCO_ENABLE; |
5188 | 5157 | ||
5189 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
5158 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
5190 | drm_mode_debug_printmodeline(mode); |
5159 | drm_mode_debug_printmodeline(mode); |
5191 | 5160 | ||
5192 | I915_WRITE(FP0(pipe), fp); |
5161 | I915_WRITE(FP0(pipe), fp); |
5193 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5162 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5194 | 5163 | ||
5195 | POSTING_READ(DPLL(pipe)); |
5164 | POSTING_READ(DPLL(pipe)); |
5196 | udelay(150); |
5165 | udelay(150); |
5197 | 5166 | ||
5198 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
5167 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
5199 | * This is an exception to the general rule that mode_set doesn't turn |
5168 | * This is an exception to the general rule that mode_set doesn't turn |
5200 | * things on. |
5169 | * things on. |
5201 | */ |
5170 | */ |
5202 | if (is_lvds) { |
5171 | if (is_lvds) { |
5203 | temp = I915_READ(LVDS); |
5172 | temp = I915_READ(LVDS); |
5204 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5173 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5205 | if (pipe == 1) { |
5174 | if (pipe == 1) { |
5206 | temp |= LVDS_PIPEB_SELECT; |
5175 | temp |= LVDS_PIPEB_SELECT; |
5207 | } else { |
5176 | } else { |
5208 | temp &= ~LVDS_PIPEB_SELECT; |
5177 | temp &= ~LVDS_PIPEB_SELECT; |
5209 | } |
5178 | } |
5210 | /* set the corresponsding LVDS_BORDER bit */ |
5179 | /* set the corresponsding LVDS_BORDER bit */ |
5211 | temp |= dev_priv->lvds_border_bits; |
5180 | temp |= dev_priv->lvds_border_bits; |
5212 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5181 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5213 | * set the DPLLs for dual-channel mode or not. |
5182 | * set the DPLLs for dual-channel mode or not. |
5214 | */ |
5183 | */ |
5215 | if (clock.p2 == 7) |
5184 | if (clock.p2 == 7) |
5216 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
5185 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
5217 | else |
5186 | else |
5218 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
5187 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
5219 | 5188 | ||
5220 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
5189 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
5221 | * appropriately here, but we need to look more thoroughly into how |
5190 | * appropriately here, but we need to look more thoroughly into how |
5222 | * panels behave in the two modes. |
5191 | * panels behave in the two modes. |
5223 | */ |
5192 | */ |
5224 | /* set the dithering flag on LVDS as needed */ |
5193 | /* set the dithering flag on LVDS as needed */ |
5225 | if (INTEL_INFO(dev)->gen >= 4) { |
5194 | if (INTEL_INFO(dev)->gen >= 4) { |
5226 | if (dev_priv->lvds_dither) |
5195 | if (dev_priv->lvds_dither) |
5227 | temp |= LVDS_ENABLE_DITHER; |
5196 | temp |= LVDS_ENABLE_DITHER; |
5228 | else |
5197 | else |
5229 | temp &= ~LVDS_ENABLE_DITHER; |
5198 | temp &= ~LVDS_ENABLE_DITHER; |
5230 | } |
5199 | } |
5231 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5200 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5232 | lvds_sync |= LVDS_HSYNC_POLARITY; |
5201 | lvds_sync |= LVDS_HSYNC_POLARITY; |
5233 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
5202 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
5234 | lvds_sync |= LVDS_VSYNC_POLARITY; |
5203 | lvds_sync |= LVDS_VSYNC_POLARITY; |
5235 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
5204 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
5236 | != lvds_sync) { |
5205 | != lvds_sync) { |
5237 | char flags[2] = "-+"; |
5206 | char flags[2] = "-+"; |
5238 | DRM_INFO("Changing LVDS panel from " |
5207 | DRM_INFO("Changing LVDS panel from " |
5239 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
5208 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
5240 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
5209 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
5241 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
5210 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
5242 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
5211 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
5243 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5212 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5244 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
5213 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
5245 | temp |= lvds_sync; |
5214 | temp |= lvds_sync; |
5246 | } |
5215 | } |
5247 | I915_WRITE(LVDS, temp); |
5216 | I915_WRITE(LVDS, temp); |
5248 | } |
5217 | } |
5249 | 5218 | ||
5250 | if (is_dp) { |
5219 | if (is_dp) { |
5251 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5220 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5252 | } |
5221 | } |
5253 | 5222 | ||
5254 | I915_WRITE(DPLL(pipe), dpll); |
5223 | I915_WRITE(DPLL(pipe), dpll); |
5255 | 5224 | ||
5256 | /* Wait for the clocks to stabilize. */ |
5225 | /* Wait for the clocks to stabilize. */ |
5257 | POSTING_READ(DPLL(pipe)); |
5226 | POSTING_READ(DPLL(pipe)); |
5258 | udelay(150); |
5227 | udelay(150); |
5259 | 5228 | ||
5260 | if (INTEL_INFO(dev)->gen >= 4) { |
5229 | if (INTEL_INFO(dev)->gen >= 4) { |
5261 | temp = 0; |
5230 | temp = 0; |
5262 | if (is_sdvo) { |
5231 | if (is_sdvo) { |
5263 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
5232 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); |
5264 | if (temp > 1) |
5233 | if (temp > 1) |
5265 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
5234 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; |
5266 | else |
5235 | else |
5267 | temp = 0; |
5236 | temp = 0; |
5268 | } |
5237 | } |
5269 | I915_WRITE(DPLL_MD(pipe), temp); |
5238 | I915_WRITE(DPLL_MD(pipe), temp); |
5270 | } else { |
5239 | } else { |
5271 | /* The pixel multiplier can only be updated once the |
5240 | /* The pixel multiplier can only be updated once the |
5272 | * DPLL is enabled and the clocks are stable. |
5241 | * DPLL is enabled and the clocks are stable. |
5273 | * |
5242 | * |
5274 | * So write it again. |
5243 | * So write it again. |
5275 | */ |
5244 | */ |
5276 | I915_WRITE(DPLL(pipe), dpll); |
5245 | I915_WRITE(DPLL(pipe), dpll); |
5277 | } |
5246 | } |
5278 | 5247 | ||
5279 | intel_crtc->lowfreq_avail = false; |
5248 | intel_crtc->lowfreq_avail = false; |
5280 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5249 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5281 | I915_WRITE(FP1(pipe), fp2); |
5250 | I915_WRITE(FP1(pipe), fp2); |
5282 | intel_crtc->lowfreq_avail = true; |
5251 | intel_crtc->lowfreq_avail = true; |
5283 | if (HAS_PIPE_CXSR(dev)) { |
5252 | if (HAS_PIPE_CXSR(dev)) { |
5284 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5253 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5285 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5254 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5286 | } |
5255 | } |
5287 | } else { |
5256 | } else { |
5288 | I915_WRITE(FP1(pipe), fp); |
5257 | I915_WRITE(FP1(pipe), fp); |
5289 | if (HAS_PIPE_CXSR(dev)) { |
5258 | if (HAS_PIPE_CXSR(dev)) { |
5290 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5259 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5291 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5260 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5292 | } |
5261 | } |
5293 | } |
5262 | } |
- | 5263 | ||
5294 | 5264 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
|
5295 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5265 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5296 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5266 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5297 | /* the chip adds 2 halflines automatically */ |
5267 | /* the chip adds 2 halflines automatically */ |
5298 | adjusted_mode->crtc_vdisplay -= 1; |
5268 | adjusted_mode->crtc_vdisplay -= 1; |
5299 | adjusted_mode->crtc_vtotal -= 1; |
5269 | adjusted_mode->crtc_vtotal -= 1; |
5300 | adjusted_mode->crtc_vblank_start -= 1; |
5270 | adjusted_mode->crtc_vblank_start -= 1; |
5301 | adjusted_mode->crtc_vblank_end -= 1; |
5271 | adjusted_mode->crtc_vblank_end -= 1; |
5302 | adjusted_mode->crtc_vsync_end -= 1; |
5272 | adjusted_mode->crtc_vsync_end -= 1; |
5303 | adjusted_mode->crtc_vsync_start -= 1; |
5273 | adjusted_mode->crtc_vsync_start -= 1; |
5304 | } else |
5274 | } else |
5305 | pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */ |
5275 | pipeconf |= PIPECONF_PROGRESSIVE; |
5306 | 5276 | ||
5307 | I915_WRITE(HTOTAL(pipe), |
5277 | I915_WRITE(HTOTAL(pipe), |
5308 | (adjusted_mode->crtc_hdisplay - 1) | |
5278 | (adjusted_mode->crtc_hdisplay - 1) | |
5309 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5279 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5310 | I915_WRITE(HBLANK(pipe), |
5280 | I915_WRITE(HBLANK(pipe), |
5311 | (adjusted_mode->crtc_hblank_start - 1) | |
5281 | (adjusted_mode->crtc_hblank_start - 1) | |
5312 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5282 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5313 | I915_WRITE(HSYNC(pipe), |
5283 | I915_WRITE(HSYNC(pipe), |
5314 | (adjusted_mode->crtc_hsync_start - 1) | |
5284 | (adjusted_mode->crtc_hsync_start - 1) | |
5315 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5285 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5316 | 5286 | ||
5317 | I915_WRITE(VTOTAL(pipe), |
5287 | I915_WRITE(VTOTAL(pipe), |
5318 | (adjusted_mode->crtc_vdisplay - 1) | |
5288 | (adjusted_mode->crtc_vdisplay - 1) | |
5319 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5289 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5320 | I915_WRITE(VBLANK(pipe), |
5290 | I915_WRITE(VBLANK(pipe), |
5321 | (adjusted_mode->crtc_vblank_start - 1) | |
5291 | (adjusted_mode->crtc_vblank_start - 1) | |
5322 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5292 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5323 | I915_WRITE(VSYNC(pipe), |
5293 | I915_WRITE(VSYNC(pipe), |
5324 | (adjusted_mode->crtc_vsync_start - 1) | |
5294 | (adjusted_mode->crtc_vsync_start - 1) | |
5325 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5295 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5326 | 5296 | ||
5327 | /* pipesrc and dspsize control the size that is scaled from, |
5297 | /* pipesrc and dspsize control the size that is scaled from, |
5328 | * which should always be the user's requested size. |
5298 | * which should always be the user's requested size. |
5329 | */ |
5299 | */ |
5330 | I915_WRITE(DSPSIZE(plane), |
5300 | I915_WRITE(DSPSIZE(plane), |
5331 | ((mode->vdisplay - 1) << 16) | |
5301 | ((mode->vdisplay - 1) << 16) | |
5332 | (mode->hdisplay - 1)); |
5302 | (mode->hdisplay - 1)); |
5333 | I915_WRITE(DSPPOS(plane), 0); |
5303 | I915_WRITE(DSPPOS(plane), 0); |
5334 | I915_WRITE(PIPESRC(pipe), |
5304 | I915_WRITE(PIPESRC(pipe), |
5335 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
5305 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
5336 | 5306 | ||
5337 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5307 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5338 | POSTING_READ(PIPECONF(pipe)); |
5308 | POSTING_READ(PIPECONF(pipe)); |
5339 | intel_enable_pipe(dev_priv, pipe, false); |
5309 | intel_enable_pipe(dev_priv, pipe, false); |
5340 | 5310 | ||
5341 | intel_wait_for_vblank(dev, pipe); |
5311 | intel_wait_for_vblank(dev, pipe); |
5342 | 5312 | ||
5343 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5313 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5344 | POSTING_READ(DSPCNTR(plane)); |
5314 | POSTING_READ(DSPCNTR(plane)); |
5345 | intel_enable_plane(dev_priv, plane, pipe); |
5315 | intel_enable_plane(dev_priv, plane, pipe); |
5346 | 5316 | ||
5347 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5317 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5348 | 5318 | ||
5349 | intel_update_watermarks(dev); |
5319 | intel_update_watermarks(dev); |
5350 | 5320 | ||
5351 | return ret; |
5321 | return ret; |
5352 | } |
5322 | } |
5353 | 5323 | ||
5354 | /* |
5324 | /* |
5355 | * Initialize reference clocks when the driver loads |
5325 | * Initialize reference clocks when the driver loads |
5356 | */ |
5326 | */ |
5357 | void ironlake_init_pch_refclk(struct drm_device *dev) |
5327 | void ironlake_init_pch_refclk(struct drm_device *dev) |
5358 | { |
5328 | { |
5359 | struct drm_i915_private *dev_priv = dev->dev_private; |
5329 | struct drm_i915_private *dev_priv = dev->dev_private; |
5360 | struct drm_mode_config *mode_config = &dev->mode_config; |
5330 | struct drm_mode_config *mode_config = &dev->mode_config; |
5361 | struct intel_encoder *encoder; |
5331 | struct intel_encoder *encoder; |
5362 | u32 temp; |
5332 | u32 temp; |
5363 | bool has_lvds = false; |
5333 | bool has_lvds = false; |
5364 | bool has_cpu_edp = false; |
5334 | bool has_cpu_edp = false; |
5365 | bool has_pch_edp = false; |
5335 | bool has_pch_edp = false; |
5366 | bool has_panel = false; |
5336 | bool has_panel = false; |
5367 | bool has_ck505 = false; |
5337 | bool has_ck505 = false; |
5368 | bool can_ssc = false; |
5338 | bool can_ssc = false; |
5369 | 5339 | ||
5370 | /* We need to take the global config into account */ |
5340 | /* We need to take the global config into account */ |
5371 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5341 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5372 | base.head) { |
5342 | base.head) { |
5373 | switch (encoder->type) { |
5343 | switch (encoder->type) { |
5374 | case INTEL_OUTPUT_LVDS: |
5344 | case INTEL_OUTPUT_LVDS: |
5375 | has_panel = true; |
5345 | has_panel = true; |
5376 | has_lvds = true; |
5346 | has_lvds = true; |
5377 | break; |
5347 | break; |
5378 | case INTEL_OUTPUT_EDP: |
5348 | case INTEL_OUTPUT_EDP: |
5379 | has_panel = true; |
5349 | has_panel = true; |
5380 | if (intel_encoder_is_pch_edp(&encoder->base)) |
5350 | if (intel_encoder_is_pch_edp(&encoder->base)) |
5381 | has_pch_edp = true; |
5351 | has_pch_edp = true; |
5382 | else |
5352 | else |
5383 | has_cpu_edp = true; |
5353 | has_cpu_edp = true; |
5384 | break; |
5354 | break; |
5385 | } |
5355 | } |
5386 | } |
5356 | } |
5387 | 5357 | ||
5388 | if (HAS_PCH_IBX(dev)) { |
5358 | if (HAS_PCH_IBX(dev)) { |
5389 | has_ck505 = dev_priv->display_clock_mode; |
5359 | has_ck505 = dev_priv->display_clock_mode; |
5390 | can_ssc = has_ck505; |
5360 | can_ssc = has_ck505; |
5391 | } else { |
5361 | } else { |
5392 | has_ck505 = false; |
5362 | has_ck505 = false; |
5393 | can_ssc = true; |
5363 | can_ssc = true; |
5394 | } |
5364 | } |
5395 | 5365 | ||
5396 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", |
5366 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", |
5397 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, |
5367 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, |
5398 | has_ck505); |
5368 | has_ck505); |
5399 | 5369 | ||
5400 | /* Ironlake: try to setup display ref clock before DPLL |
5370 | /* Ironlake: try to setup display ref clock before DPLL |
5401 | * enabling. This is only under driver's control after |
5371 | * enabling. This is only under driver's control after |
5402 | * PCH B stepping, previous chipset stepping should be |
5372 | * PCH B stepping, previous chipset stepping should be |
5403 | * ignoring this setting. |
5373 | * ignoring this setting. |
5404 | */ |
5374 | */ |
5405 | temp = I915_READ(PCH_DREF_CONTROL); |
5375 | temp = I915_READ(PCH_DREF_CONTROL); |
5406 | /* Always enable nonspread source */ |
5376 | /* Always enable nonspread source */ |
5407 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
5377 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
5408 | 5378 | ||
5409 | if (has_ck505) |
5379 | if (has_ck505) |
5410 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
5380 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
5411 | else |
5381 | else |
5412 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; |
5382 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; |
5413 | 5383 | ||
5414 | if (has_panel) { |
5384 | if (has_panel) { |
5415 | temp &= ~DREF_SSC_SOURCE_MASK; |
5385 | temp &= ~DREF_SSC_SOURCE_MASK; |
5416 | temp |= DREF_SSC_SOURCE_ENABLE; |
5386 | temp |= DREF_SSC_SOURCE_ENABLE; |
5417 | 5387 | ||
5418 | /* SSC must be turned on before enabling the CPU output */ |
5388 | /* SSC must be turned on before enabling the CPU output */ |
5419 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5389 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5420 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
5390 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
5421 | temp |= DREF_SSC1_ENABLE; |
5391 | temp |= DREF_SSC1_ENABLE; |
5422 | } |
5392 | } |
5423 | 5393 | ||
5424 | /* Get SSC going before enabling the outputs */ |
5394 | /* Get SSC going before enabling the outputs */ |
5425 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5395 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5426 | POSTING_READ(PCH_DREF_CONTROL); |
5396 | POSTING_READ(PCH_DREF_CONTROL); |
5427 | udelay(200); |
5397 | udelay(200); |
5428 | 5398 | ||
5429 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5399 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5430 | 5400 | ||
5431 | /* Enable CPU source on CPU attached eDP */ |
5401 | /* Enable CPU source on CPU attached eDP */ |
5432 | if (has_cpu_edp) { |
5402 | if (has_cpu_edp) { |
5433 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5403 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5434 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
5404 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
5435 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5405 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5436 | } |
5406 | } |
5437 | else |
5407 | else |
5438 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5408 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5439 | } else |
5409 | } else |
5440 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5410 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5441 | 5411 | ||
5442 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5412 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5443 | POSTING_READ(PCH_DREF_CONTROL); |
5413 | POSTING_READ(PCH_DREF_CONTROL); |
5444 | udelay(200); |
5414 | udelay(200); |
5445 | } else { |
5415 | } else { |
5446 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); |
5416 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); |
5447 | 5417 | ||
5448 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5418 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5449 | 5419 | ||
5450 | /* Turn off CPU output */ |
5420 | /* Turn off CPU output */ |
5451 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5421 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5452 | 5422 | ||
5453 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5423 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5454 | POSTING_READ(PCH_DREF_CONTROL); |
5424 | POSTING_READ(PCH_DREF_CONTROL); |
5455 | udelay(200); |
5425 | udelay(200); |
5456 | 5426 | ||
5457 | /* Turn off the SSC source */ |
5427 | /* Turn off the SSC source */ |
5458 | temp &= ~DREF_SSC_SOURCE_MASK; |
5428 | temp &= ~DREF_SSC_SOURCE_MASK; |
5459 | temp |= DREF_SSC_SOURCE_DISABLE; |
5429 | temp |= DREF_SSC_SOURCE_DISABLE; |
5460 | 5430 | ||
5461 | /* Turn off SSC1 */ |
5431 | /* Turn off SSC1 */ |
5462 | temp &= ~ DREF_SSC1_ENABLE; |
5432 | temp &= ~ DREF_SSC1_ENABLE; |
5463 | 5433 | ||
5464 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5434 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5465 | POSTING_READ(PCH_DREF_CONTROL); |
5435 | POSTING_READ(PCH_DREF_CONTROL); |
5466 | udelay(200); |
5436 | udelay(200); |
5467 | } |
5437 | } |
5468 | } |
5438 | } |
5469 | 5439 | ||
5470 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
5440 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
5471 | { |
5441 | { |
5472 | struct drm_device *dev = crtc->dev; |
5442 | struct drm_device *dev = crtc->dev; |
5473 | struct drm_i915_private *dev_priv = dev->dev_private; |
5443 | struct drm_i915_private *dev_priv = dev->dev_private; |
5474 | struct intel_encoder *encoder; |
5444 | struct intel_encoder *encoder; |
5475 | struct drm_mode_config *mode_config = &dev->mode_config; |
5445 | struct drm_mode_config *mode_config = &dev->mode_config; |
5476 | struct intel_encoder *edp_encoder = NULL; |
5446 | struct intel_encoder *edp_encoder = NULL; |
5477 | int num_connectors = 0; |
5447 | int num_connectors = 0; |
5478 | bool is_lvds = false; |
5448 | bool is_lvds = false; |
5479 | 5449 | ||
5480 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
5450 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
5481 | if (encoder->base.crtc != crtc) |
5451 | if (encoder->base.crtc != crtc) |
5482 | continue; |
5452 | continue; |
5483 | 5453 | ||
5484 | switch (encoder->type) { |
5454 | switch (encoder->type) { |
5485 | case INTEL_OUTPUT_LVDS: |
5455 | case INTEL_OUTPUT_LVDS: |
5486 | is_lvds = true; |
5456 | is_lvds = true; |
5487 | break; |
5457 | break; |
5488 | case INTEL_OUTPUT_EDP: |
5458 | case INTEL_OUTPUT_EDP: |
5489 | edp_encoder = encoder; |
5459 | edp_encoder = encoder; |
5490 | break; |
5460 | break; |
5491 | } |
5461 | } |
5492 | num_connectors++; |
5462 | num_connectors++; |
5493 | } |
5463 | } |
5494 | 5464 | ||
5495 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5465 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
5496 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5466 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5497 | dev_priv->lvds_ssc_freq); |
5467 | dev_priv->lvds_ssc_freq); |
5498 | return dev_priv->lvds_ssc_freq * 1000; |
5468 | return dev_priv->lvds_ssc_freq * 1000; |
5499 | } |
5469 | } |
5500 | 5470 | ||
5501 | return 120000; |
5471 | return 120000; |
5502 | } |
5472 | } |
5503 | 5473 | ||
5504 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5474 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5505 | struct drm_display_mode *mode, |
5475 | struct drm_display_mode *mode, |
5506 | struct drm_display_mode *adjusted_mode, |
5476 | struct drm_display_mode *adjusted_mode, |
5507 | int x, int y, |
5477 | int x, int y, |
5508 | struct drm_framebuffer *old_fb) |
5478 | struct drm_framebuffer *old_fb) |
5509 | { |
5479 | { |
5510 | struct drm_device *dev = crtc->dev; |
5480 | struct drm_device *dev = crtc->dev; |
5511 | struct drm_i915_private *dev_priv = dev->dev_private; |
5481 | struct drm_i915_private *dev_priv = dev->dev_private; |
5512 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5482 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5513 | int pipe = intel_crtc->pipe; |
5483 | int pipe = intel_crtc->pipe; |
5514 | int plane = intel_crtc->plane; |
5484 | int plane = intel_crtc->plane; |
5515 | int refclk, num_connectors = 0; |
5485 | int refclk, num_connectors = 0; |
5516 | intel_clock_t clock, reduced_clock; |
5486 | intel_clock_t clock, reduced_clock; |
5517 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
5487 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
5518 | bool ok, has_reduced_clock = false, is_sdvo = false; |
5488 | bool ok, has_reduced_clock = false, is_sdvo = false; |
5519 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
5489 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
5520 | struct intel_encoder *has_edp_encoder = NULL; |
5490 | struct intel_encoder *has_edp_encoder = NULL; |
5521 | struct drm_mode_config *mode_config = &dev->mode_config; |
5491 | struct drm_mode_config *mode_config = &dev->mode_config; |
5522 | struct intel_encoder *encoder; |
5492 | struct intel_encoder *encoder; |
5523 | const intel_limit_t *limit; |
5493 | const intel_limit_t *limit; |
5524 | int ret; |
5494 | int ret; |
5525 | struct fdi_m_n m_n = {0}; |
5495 | struct fdi_m_n m_n = {0}; |
5526 | u32 temp; |
5496 | u32 temp; |
5527 | u32 lvds_sync = 0; |
5497 | u32 lvds_sync = 0; |
5528 | int target_clock, pixel_multiplier, lane, link_bw, factor; |
5498 | int target_clock, pixel_multiplier, lane, link_bw, factor; |
5529 | unsigned int pipe_bpp; |
5499 | unsigned int pipe_bpp; |
5530 | bool dither; |
5500 | bool dither; |
5531 | 5501 | ||
5532 | ENTER(); |
5502 | ENTER(); |
5533 | 5503 | ||
5534 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
5504 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
5535 | if (encoder->base.crtc != crtc) |
5505 | if (encoder->base.crtc != crtc) |
5536 | continue; |
5506 | continue; |
5537 | 5507 | ||
5538 | switch (encoder->type) { |
5508 | switch (encoder->type) { |
5539 | case INTEL_OUTPUT_LVDS: |
5509 | case INTEL_OUTPUT_LVDS: |
5540 | is_lvds = true; |
5510 | is_lvds = true; |
5541 | break; |
5511 | break; |
5542 | case INTEL_OUTPUT_SDVO: |
5512 | case INTEL_OUTPUT_SDVO: |
5543 | case INTEL_OUTPUT_HDMI: |
5513 | case INTEL_OUTPUT_HDMI: |
5544 | is_sdvo = true; |
5514 | is_sdvo = true; |
5545 | if (encoder->needs_tv_clock) |
5515 | if (encoder->needs_tv_clock) |
5546 | is_tv = true; |
5516 | is_tv = true; |
5547 | break; |
5517 | break; |
5548 | case INTEL_OUTPUT_TVOUT: |
5518 | case INTEL_OUTPUT_TVOUT: |
5549 | is_tv = true; |
5519 | is_tv = true; |
5550 | break; |
5520 | break; |
5551 | case INTEL_OUTPUT_ANALOG: |
5521 | case INTEL_OUTPUT_ANALOG: |
5552 | is_crt = true; |
5522 | is_crt = true; |
5553 | break; |
5523 | break; |
5554 | case INTEL_OUTPUT_DISPLAYPORT: |
5524 | case INTEL_OUTPUT_DISPLAYPORT: |
5555 | is_dp = true; |
5525 | is_dp = true; |
5556 | break; |
5526 | break; |
5557 | case INTEL_OUTPUT_EDP: |
5527 | case INTEL_OUTPUT_EDP: |
5558 | has_edp_encoder = encoder; |
5528 | has_edp_encoder = encoder; |
5559 | break; |
5529 | break; |
5560 | } |
5530 | } |
5561 | 5531 | ||
5562 | num_connectors++; |
5532 | num_connectors++; |
5563 | } |
5533 | } |
5564 | 5534 | ||
5565 | refclk = ironlake_get_refclk(crtc); |
5535 | refclk = ironlake_get_refclk(crtc); |
5566 | 5536 | ||
5567 | /* |
5537 | /* |
5568 | * Returns a set of divisors for the desired target clock with the given |
5538 | * Returns a set of divisors for the desired target clock with the given |
5569 | * refclk, or FALSE. The returned values represent the clock equation: |
5539 | * refclk, or FALSE. The returned values represent the clock equation: |
5570 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5540 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. |
5571 | */ |
5541 | */ |
5572 | limit = intel_limit(crtc, refclk); |
5542 | limit = intel_limit(crtc, refclk); |
5573 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
5543 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
5574 | if (!ok) { |
5544 | if (!ok) { |
5575 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5545 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
5576 | return -EINVAL; |
5546 | return -EINVAL; |
5577 | } |
5547 | } |
5578 | 5548 | ||
5579 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5549 | /* Ensure that the cursor is valid for the new mode before changing... */ |
5580 | // intel_crtc_update_cursor(crtc, true); |
5550 | // intel_crtc_update_cursor(crtc, true); |
5581 | 5551 | ||
5582 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5552 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5583 | has_reduced_clock = limit->find_pll(limit, crtc, |
5553 | has_reduced_clock = limit->find_pll(limit, crtc, |
5584 | dev_priv->lvds_downclock, |
5554 | dev_priv->lvds_downclock, |
5585 | refclk, |
5555 | refclk, |
5586 | &reduced_clock); |
5556 | &reduced_clock); |
5587 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
5557 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
5588 | /* |
5558 | /* |
5589 | * If the different P is found, it means that we can't |
5559 | * If the different P is found, it means that we can't |
5590 | * switch the display clock by using the FP0/FP1. |
5560 | * switch the display clock by using the FP0/FP1. |
5591 | * In such case we will disable the LVDS downclock |
5561 | * In such case we will disable the LVDS downclock |
5592 | * feature. |
5562 | * feature. |
5593 | */ |
5563 | */ |
5594 | DRM_DEBUG_KMS("Different P is found for " |
5564 | DRM_DEBUG_KMS("Different P is found for " |
5595 | "LVDS clock/downclock\n"); |
5565 | "LVDS clock/downclock\n"); |
5596 | has_reduced_clock = 0; |
5566 | has_reduced_clock = 0; |
5597 | } |
5567 | } |
5598 | } |
5568 | } |
5599 | /* SDVO TV has fixed PLL values depend on its clock range, |
5569 | /* SDVO TV has fixed PLL values depend on its clock range, |
5600 | this mirrors vbios setting. */ |
5570 | this mirrors vbios setting. */ |
5601 | if (is_sdvo && is_tv) { |
5571 | if (is_sdvo && is_tv) { |
5602 | if (adjusted_mode->clock >= 100000 |
5572 | if (adjusted_mode->clock >= 100000 |
5603 | && adjusted_mode->clock < 140500) { |
5573 | && adjusted_mode->clock < 140500) { |
5604 | clock.p1 = 2; |
5574 | clock.p1 = 2; |
5605 | clock.p2 = 10; |
5575 | clock.p2 = 10; |
5606 | clock.n = 3; |
5576 | clock.n = 3; |
5607 | clock.m1 = 16; |
5577 | clock.m1 = 16; |
5608 | clock.m2 = 8; |
5578 | clock.m2 = 8; |
5609 | } else if (adjusted_mode->clock >= 140500 |
5579 | } else if (adjusted_mode->clock >= 140500 |
5610 | && adjusted_mode->clock <= 200000) { |
5580 | && adjusted_mode->clock <= 200000) { |
5611 | clock.p1 = 1; |
5581 | clock.p1 = 1; |
5612 | clock.p2 = 10; |
5582 | clock.p2 = 10; |
5613 | clock.n = 6; |
5583 | clock.n = 6; |
5614 | clock.m1 = 12; |
5584 | clock.m1 = 12; |
5615 | clock.m2 = 8; |
5585 | clock.m2 = 8; |
5616 | } |
5586 | } |
5617 | } |
5587 | } |
5618 | 5588 | ||
5619 | /* FDI link */ |
5589 | /* FDI link */ |
5620 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5590 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5621 | lane = 0; |
5591 | lane = 0; |
5622 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
5592 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
5623 | according to current link config */ |
5593 | according to current link config */ |
5624 | if (has_edp_encoder && |
5594 | if (has_edp_encoder && |
5625 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5595 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5626 | target_clock = mode->clock; |
5596 | target_clock = mode->clock; |
5627 | intel_edp_link_config(has_edp_encoder, |
5597 | intel_edp_link_config(has_edp_encoder, |
5628 | &lane, &link_bw); |
5598 | &lane, &link_bw); |
5629 | } else { |
5599 | } else { |
5630 | /* [e]DP over FDI requires target mode clock |
5600 | /* [e]DP over FDI requires target mode clock |
5631 | instead of link clock */ |
5601 | instead of link clock */ |
5632 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
5602 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
5633 | target_clock = mode->clock; |
5603 | target_clock = mode->clock; |
5634 | else |
5604 | else |
5635 | target_clock = adjusted_mode->clock; |
5605 | target_clock = adjusted_mode->clock; |
5636 | 5606 | ||
5637 | /* FDI is a binary signal running at ~2.7GHz, encoding |
5607 | /* FDI is a binary signal running at ~2.7GHz, encoding |
5638 | * each output octet as 10 bits. The actual frequency |
5608 | * each output octet as 10 bits. The actual frequency |
5639 | * is stored as a divider into a 100MHz clock, and the |
5609 | * is stored as a divider into a 100MHz clock, and the |
5640 | * mode pixel clock is stored in units of 1KHz. |
5610 | * mode pixel clock is stored in units of 1KHz. |
5641 | * Hence the bw of each lane in terms of the mode signal |
5611 | * Hence the bw of each lane in terms of the mode signal |
5642 | * is: |
5612 | * is: |
5643 | */ |
5613 | */ |
5644 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; |
5614 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; |
5645 | } |
5615 | } |
5646 | 5616 | ||
5647 | /* determine panel color depth */ |
5617 | /* determine panel color depth */ |
5648 | temp = I915_READ(PIPECONF(pipe)); |
5618 | temp = I915_READ(PIPECONF(pipe)); |
5649 | temp &= ~PIPE_BPC_MASK; |
5619 | temp &= ~PIPE_BPC_MASK; |
5650 | dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); |
5620 | dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); |
5651 | switch (pipe_bpp) { |
5621 | switch (pipe_bpp) { |
5652 | case 18: |
5622 | case 18: |
5653 | temp |= PIPE_6BPC; |
5623 | temp |= PIPE_6BPC; |
5654 | break; |
5624 | break; |
5655 | case 24: |
5625 | case 24: |
5656 | temp |= PIPE_8BPC; |
5626 | temp |= PIPE_8BPC; |
5657 | break; |
5627 | break; |
5658 | case 30: |
5628 | case 30: |
5659 | temp |= PIPE_10BPC; |
5629 | temp |= PIPE_10BPC; |
5660 | break; |
5630 | break; |
5661 | case 36: |
5631 | case 36: |
5662 | temp |= PIPE_12BPC; |
5632 | temp |= PIPE_12BPC; |
5663 | break; |
5633 | break; |
5664 | default: |
5634 | default: |
5665 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
5635 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
5666 | pipe_bpp); |
5636 | pipe_bpp); |
5667 | temp |= PIPE_8BPC; |
5637 | temp |= PIPE_8BPC; |
5668 | pipe_bpp = 24; |
5638 | pipe_bpp = 24; |
5669 | break; |
5639 | break; |
5670 | } |
5640 | } |
5671 | 5641 | ||
5672 | intel_crtc->bpp = pipe_bpp; |
5642 | intel_crtc->bpp = pipe_bpp; |
5673 | I915_WRITE(PIPECONF(pipe), temp); |
5643 | I915_WRITE(PIPECONF(pipe), temp); |
5674 | 5644 | ||
5675 | if (!lane) { |
5645 | if (!lane) { |
5676 | /* |
5646 | /* |
5677 | * Account for spread spectrum to avoid |
5647 | * Account for spread spectrum to avoid |
5678 | * oversubscribing the link. Max center spread |
5648 | * oversubscribing the link. Max center spread |
5679 | * is 2.5%; use 5% for safety's sake. |
5649 | * is 2.5%; use 5% for safety's sake. |
5680 | */ |
5650 | */ |
5681 | u32 bps = target_clock * intel_crtc->bpp * 21 / 20; |
5651 | u32 bps = target_clock * intel_crtc->bpp * 21 / 20; |
5682 | lane = bps / (link_bw * 8) + 1; |
5652 | lane = bps / (link_bw * 8) + 1; |
5683 | } |
5653 | } |
5684 | 5654 | ||
5685 | intel_crtc->fdi_lanes = lane; |
5655 | intel_crtc->fdi_lanes = lane; |
5686 | 5656 | ||
5687 | if (pixel_multiplier > 1) |
5657 | if (pixel_multiplier > 1) |
5688 | link_bw *= pixel_multiplier; |
5658 | link_bw *= pixel_multiplier; |
5689 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5659 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5690 | &m_n); |
5660 | &m_n); |
5691 | 5661 | ||
5692 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5662 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5693 | if (has_reduced_clock) |
5663 | if (has_reduced_clock) |
5694 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5664 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5695 | reduced_clock.m2; |
5665 | reduced_clock.m2; |
5696 | 5666 | ||
5697 | /* Enable autotuning of the PLL clock (if permissible) */ |
5667 | /* Enable autotuning of the PLL clock (if permissible) */ |
5698 | factor = 21; |
5668 | factor = 21; |
5699 | if (is_lvds) { |
5669 | if (is_lvds) { |
5700 | if ((intel_panel_use_ssc(dev_priv) && |
5670 | if ((intel_panel_use_ssc(dev_priv) && |
5701 | dev_priv->lvds_ssc_freq == 100) || |
5671 | dev_priv->lvds_ssc_freq == 100) || |
5702 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) |
5672 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) |
5703 | factor = 25; |
5673 | factor = 25; |
5704 | } else if (is_sdvo && is_tv) |
5674 | } else if (is_sdvo && is_tv) |
5705 | factor = 20; |
5675 | factor = 20; |
5706 | 5676 | ||
5707 | if (clock.m < factor * clock.n) |
5677 | if (clock.m < factor * clock.n) |
5708 | fp |= FP_CB_TUNE; |
5678 | fp |= FP_CB_TUNE; |
5709 | 5679 | ||
5710 | dpll = 0; |
5680 | dpll = 0; |
5711 | 5681 | ||
5712 | if (is_lvds) |
5682 | if (is_lvds) |
5713 | dpll |= DPLLB_MODE_LVDS; |
5683 | dpll |= DPLLB_MODE_LVDS; |
5714 | else |
5684 | else |
5715 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5685 | dpll |= DPLLB_MODE_DAC_SERIAL; |
5716 | if (is_sdvo) { |
5686 | if (is_sdvo) { |
5717 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5687 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5718 | if (pixel_multiplier > 1) { |
5688 | if (pixel_multiplier > 1) { |
5719 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
5689 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; |
5720 | } |
5690 | } |
5721 | dpll |= DPLL_DVO_HIGH_SPEED; |
5691 | dpll |= DPLL_DVO_HIGH_SPEED; |
5722 | } |
5692 | } |
5723 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
5693 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
5724 | dpll |= DPLL_DVO_HIGH_SPEED; |
5694 | dpll |= DPLL_DVO_HIGH_SPEED; |
5725 | 5695 | ||
5726 | /* compute bitmask from p1 value */ |
5696 | /* compute bitmask from p1 value */ |
5727 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5697 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
5728 | /* also FPA1 */ |
5698 | /* also FPA1 */ |
5729 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5699 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; |
5730 | 5700 | ||
5731 | switch (clock.p2) { |
5701 | switch (clock.p2) { |
5732 | case 5: |
5702 | case 5: |
5733 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5703 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; |
5734 | break; |
5704 | break; |
5735 | case 7: |
5705 | case 7: |
5736 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5706 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; |
5737 | break; |
5707 | break; |
5738 | case 10: |
5708 | case 10: |
5739 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5709 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; |
5740 | break; |
5710 | break; |
5741 | case 14: |
5711 | case 14: |
5742 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5712 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; |
5743 | break; |
5713 | break; |
5744 | } |
5714 | } |
5745 | 5715 | ||
5746 | if (is_sdvo && is_tv) |
5716 | if (is_sdvo && is_tv) |
5747 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5717 | dpll |= PLL_REF_INPUT_TVCLKINBC; |
5748 | else if (is_tv) |
5718 | else if (is_tv) |
5749 | /* XXX: just matching BIOS for now */ |
5719 | /* XXX: just matching BIOS for now */ |
5750 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5720 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
5751 | dpll |= 3; |
5721 | dpll |= 3; |
5752 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5722 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
5753 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5723 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
5754 | else |
5724 | else |
5755 | dpll |= PLL_REF_INPUT_DREFCLK; |
5725 | dpll |= PLL_REF_INPUT_DREFCLK; |
5756 | 5726 | ||
5757 | /* setup pipeconf */ |
5727 | /* setup pipeconf */ |
5758 | pipeconf = I915_READ(PIPECONF(pipe)); |
5728 | pipeconf = I915_READ(PIPECONF(pipe)); |
5759 | 5729 | ||
5760 | /* Set up the display plane register */ |
5730 | /* Set up the display plane register */ |
5761 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5731 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5762 | 5732 | ||
5763 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5733 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5764 | drm_mode_debug_printmodeline(mode); |
5734 | drm_mode_debug_printmodeline(mode); |
5765 | 5735 | ||
5766 | /* PCH eDP needs FDI, but CPU eDP does not */ |
5736 | /* PCH eDP needs FDI, but CPU eDP does not */ |
5767 | if (!intel_crtc->no_pll) { |
5737 | if (!intel_crtc->no_pll) { |
5768 | if (!has_edp_encoder || |
5738 | if (!has_edp_encoder || |
5769 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5739 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5770 | I915_WRITE(PCH_FP0(pipe), fp); |
5740 | I915_WRITE(PCH_FP0(pipe), fp); |
5771 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5741 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5772 | 5742 | ||
5773 | POSTING_READ(PCH_DPLL(pipe)); |
5743 | POSTING_READ(PCH_DPLL(pipe)); |
5774 | udelay(150); |
5744 | udelay(150); |
5775 | } |
5745 | } |
5776 | } else { |
5746 | } else { |
5777 | if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && |
5747 | if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && |
5778 | fp == I915_READ(PCH_FP0(0))) { |
5748 | fp == I915_READ(PCH_FP0(0))) { |
5779 | intel_crtc->use_pll_a = true; |
5749 | intel_crtc->use_pll_a = true; |
5780 | DRM_DEBUG_KMS("using pipe a dpll\n"); |
5750 | DRM_DEBUG_KMS("using pipe a dpll\n"); |
5781 | } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && |
5751 | } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && |
5782 | fp == I915_READ(PCH_FP0(1))) { |
5752 | fp == I915_READ(PCH_FP0(1))) { |
5783 | intel_crtc->use_pll_a = false; |
5753 | intel_crtc->use_pll_a = false; |
5784 | DRM_DEBUG_KMS("using pipe b dpll\n"); |
5754 | DRM_DEBUG_KMS("using pipe b dpll\n"); |
5785 | } else { |
5755 | } else { |
5786 | DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); |
5756 | DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); |
5787 | return -EINVAL; |
5757 | return -EINVAL; |
5788 | } |
5758 | } |
5789 | } |
5759 | } |
5790 | 5760 | ||
5791 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
5761 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
5792 | * This is an exception to the general rule that mode_set doesn't turn |
5762 | * This is an exception to the general rule that mode_set doesn't turn |
5793 | * things on. |
5763 | * things on. |
5794 | */ |
5764 | */ |
5795 | if (is_lvds) { |
5765 | if (is_lvds) { |
5796 | temp = I915_READ(PCH_LVDS); |
5766 | temp = I915_READ(PCH_LVDS); |
5797 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5767 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5798 | if (HAS_PCH_CPT(dev)) { |
5768 | if (HAS_PCH_CPT(dev)) { |
5799 | temp &= ~PORT_TRANS_SEL_MASK; |
5769 | temp &= ~PORT_TRANS_SEL_MASK; |
5800 | temp |= PORT_TRANS_SEL_CPT(pipe); |
5770 | temp |= PORT_TRANS_SEL_CPT(pipe); |
5801 | } else { |
5771 | } else { |
5802 | if (pipe == 1) |
5772 | if (pipe == 1) |
5803 | temp |= LVDS_PIPEB_SELECT; |
5773 | temp |= LVDS_PIPEB_SELECT; |
5804 | else |
5774 | else |
5805 | temp &= ~LVDS_PIPEB_SELECT; |
5775 | temp &= ~LVDS_PIPEB_SELECT; |
5806 | } |
5776 | } |
5807 | 5777 | ||
5808 | /* set the corresponsding LVDS_BORDER bit */ |
5778 | /* set the corresponsding LVDS_BORDER bit */ |
5809 | temp |= dev_priv->lvds_border_bits; |
5779 | temp |= dev_priv->lvds_border_bits; |
5810 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5780 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5811 | * set the DPLLs for dual-channel mode or not. |
5781 | * set the DPLLs for dual-channel mode or not. |
5812 | */ |
5782 | */ |
5813 | if (clock.p2 == 7) |
5783 | if (clock.p2 == 7) |
5814 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
5784 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
5815 | else |
5785 | else |
5816 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
5786 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
5817 | 5787 | ||
5818 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
5788 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
5819 | * appropriately here, but we need to look more thoroughly into how |
5789 | * appropriately here, but we need to look more thoroughly into how |
5820 | * panels behave in the two modes. |
5790 | * panels behave in the two modes. |
5821 | */ |
5791 | */ |
5822 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5792 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5823 | lvds_sync |= LVDS_HSYNC_POLARITY; |
5793 | lvds_sync |= LVDS_HSYNC_POLARITY; |
5824 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
5794 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
5825 | lvds_sync |= LVDS_VSYNC_POLARITY; |
5795 | lvds_sync |= LVDS_VSYNC_POLARITY; |
5826 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
5796 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) |
5827 | != lvds_sync) { |
5797 | != lvds_sync) { |
5828 | char flags[2] = "-+"; |
5798 | char flags[2] = "-+"; |
5829 | DRM_INFO("Changing LVDS panel from " |
5799 | DRM_INFO("Changing LVDS panel from " |
5830 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
5800 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", |
5831 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
5801 | flags[!(temp & LVDS_HSYNC_POLARITY)], |
5832 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
5802 | flags[!(temp & LVDS_VSYNC_POLARITY)], |
5833 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
5803 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], |
5834 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5804 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); |
5835 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
5805 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
5836 | temp |= lvds_sync; |
5806 | temp |= lvds_sync; |
5837 | } |
5807 | } |
5838 | I915_WRITE(PCH_LVDS, temp); |
5808 | I915_WRITE(PCH_LVDS, temp); |
5839 | } |
5809 | } |
5840 | 5810 | ||
5841 | pipeconf &= ~PIPECONF_DITHER_EN; |
5811 | pipeconf &= ~PIPECONF_DITHER_EN; |
5842 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; |
5812 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; |
5843 | if ((is_lvds && dev_priv->lvds_dither) || dither) { |
5813 | if ((is_lvds && dev_priv->lvds_dither) || dither) { |
5844 | pipeconf |= PIPECONF_DITHER_EN; |
5814 | pipeconf |= PIPECONF_DITHER_EN; |
5845 | pipeconf |= PIPECONF_DITHER_TYPE_SP; |
5815 | pipeconf |= PIPECONF_DITHER_TYPE_SP; |
5846 | } |
5816 | } |
5847 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5817 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5848 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5818 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
5849 | } else { |
5819 | } else { |
5850 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
5820 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
5851 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
5821 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
5852 | I915_WRITE(TRANSDATA_N1(pipe), 0); |
5822 | I915_WRITE(TRANSDATA_N1(pipe), 0); |
5853 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
5823 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
5854 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
5824 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
5855 | } |
5825 | } |
5856 | 5826 | ||
5857 | if (!intel_crtc->no_pll && |
5827 | if (!intel_crtc->no_pll && |
5858 | (!has_edp_encoder || |
5828 | (!has_edp_encoder || |
5859 | intel_encoder_is_pch_edp(&has_edp_encoder->base))) { |
5829 | intel_encoder_is_pch_edp(&has_edp_encoder->base))) { |
5860 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5830 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5861 | 5831 | ||
5862 | /* Wait for the clocks to stabilize. */ |
5832 | /* Wait for the clocks to stabilize. */ |
5863 | POSTING_READ(PCH_DPLL(pipe)); |
5833 | POSTING_READ(PCH_DPLL(pipe)); |
5864 | udelay(150); |
5834 | udelay(150); |
5865 | 5835 | ||
5866 | /* The pixel multiplier can only be updated once the |
5836 | /* The pixel multiplier can only be updated once the |
5867 | * DPLL is enabled and the clocks are stable. |
5837 | * DPLL is enabled and the clocks are stable. |
5868 | * |
5838 | * |
5869 | * So write it again. |
5839 | * So write it again. |
5870 | */ |
5840 | */ |
5871 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5841 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5872 | } |
5842 | } |
5873 | 5843 | ||
5874 | intel_crtc->lowfreq_avail = false; |
5844 | intel_crtc->lowfreq_avail = false; |
5875 | if (!intel_crtc->no_pll) { |
5845 | if (!intel_crtc->no_pll) { |
5876 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5846 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5877 | I915_WRITE(PCH_FP1(pipe), fp2); |
5847 | I915_WRITE(PCH_FP1(pipe), fp2); |
5878 | intel_crtc->lowfreq_avail = true; |
5848 | intel_crtc->lowfreq_avail = true; |
5879 | if (HAS_PIPE_CXSR(dev)) { |
5849 | if (HAS_PIPE_CXSR(dev)) { |
5880 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5850 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5881 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5851 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5882 | } |
5852 | } |
5883 | } else { |
5853 | } else { |
5884 | I915_WRITE(PCH_FP1(pipe), fp); |
5854 | I915_WRITE(PCH_FP1(pipe), fp); |
5885 | if (HAS_PIPE_CXSR(dev)) { |
5855 | if (HAS_PIPE_CXSR(dev)) { |
5886 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5856 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5887 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5857 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5888 | } |
5858 | } |
5889 | } |
5859 | } |
5890 | } |
5860 | } |
- | 5861 | ||
5891 | 5862 | pipeconf &= ~PIPECONF_INTERLACE_MASK; |
|
5892 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5863 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5893 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5864 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
5894 | /* the chip adds 2 halflines automatically */ |
5865 | /* the chip adds 2 halflines automatically */ |
5895 | adjusted_mode->crtc_vdisplay -= 1; |
5866 | adjusted_mode->crtc_vdisplay -= 1; |
5896 | adjusted_mode->crtc_vtotal -= 1; |
5867 | adjusted_mode->crtc_vtotal -= 1; |
5897 | adjusted_mode->crtc_vblank_start -= 1; |
5868 | adjusted_mode->crtc_vblank_start -= 1; |
5898 | adjusted_mode->crtc_vblank_end -= 1; |
5869 | adjusted_mode->crtc_vblank_end -= 1; |
5899 | adjusted_mode->crtc_vsync_end -= 1; |
5870 | adjusted_mode->crtc_vsync_end -= 1; |
5900 | adjusted_mode->crtc_vsync_start -= 1; |
5871 | adjusted_mode->crtc_vsync_start -= 1; |
5901 | } else |
5872 | } else |
5902 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ |
5873 | pipeconf |= PIPECONF_PROGRESSIVE; |
5903 | 5874 | ||
5904 | I915_WRITE(HTOTAL(pipe), |
5875 | I915_WRITE(HTOTAL(pipe), |
5905 | (adjusted_mode->crtc_hdisplay - 1) | |
5876 | (adjusted_mode->crtc_hdisplay - 1) | |
5906 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5877 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5907 | I915_WRITE(HBLANK(pipe), |
5878 | I915_WRITE(HBLANK(pipe), |
5908 | (adjusted_mode->crtc_hblank_start - 1) | |
5879 | (adjusted_mode->crtc_hblank_start - 1) | |
5909 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5880 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5910 | I915_WRITE(HSYNC(pipe), |
5881 | I915_WRITE(HSYNC(pipe), |
5911 | (adjusted_mode->crtc_hsync_start - 1) | |
5882 | (adjusted_mode->crtc_hsync_start - 1) | |
5912 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5883 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5913 | 5884 | ||
5914 | I915_WRITE(VTOTAL(pipe), |
5885 | I915_WRITE(VTOTAL(pipe), |
5915 | (adjusted_mode->crtc_vdisplay - 1) | |
5886 | (adjusted_mode->crtc_vdisplay - 1) | |
5916 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5887 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5917 | I915_WRITE(VBLANK(pipe), |
5888 | I915_WRITE(VBLANK(pipe), |
5918 | (adjusted_mode->crtc_vblank_start - 1) | |
5889 | (adjusted_mode->crtc_vblank_start - 1) | |
5919 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5890 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5920 | I915_WRITE(VSYNC(pipe), |
5891 | I915_WRITE(VSYNC(pipe), |
5921 | (adjusted_mode->crtc_vsync_start - 1) | |
5892 | (adjusted_mode->crtc_vsync_start - 1) | |
5922 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5893 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5923 | 5894 | ||
5924 | /* pipesrc controls the size that is scaled from, which should |
5895 | /* pipesrc controls the size that is scaled from, which should |
5925 | * always be the user's requested size. |
5896 | * always be the user's requested size. |
5926 | */ |
5897 | */ |
5927 | I915_WRITE(PIPESRC(pipe), |
5898 | I915_WRITE(PIPESRC(pipe), |
5928 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
5899 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); |
5929 | 5900 | ||
5930 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
5901 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
5931 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
5902 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); |
5932 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
5903 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
5933 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
5904 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
5934 | 5905 | ||
5935 | if (has_edp_encoder && |
5906 | if (has_edp_encoder && |
5936 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5907 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5937 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
5908 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
5938 | } |
5909 | } |
5939 | 5910 | ||
5940 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5911 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5941 | POSTING_READ(PIPECONF(pipe)); |
5912 | POSTING_READ(PIPECONF(pipe)); |
5942 | 5913 | ||
5943 | intel_wait_for_vblank(dev, pipe); |
5914 | intel_wait_for_vblank(dev, pipe); |
5944 | 5915 | ||
5945 | if (IS_GEN5(dev)) { |
5916 | if (IS_GEN5(dev)) { |
5946 | /* enable address swizzle for tiling buffer */ |
5917 | /* enable address swizzle for tiling buffer */ |
5947 | temp = I915_READ(DISP_ARB_CTL); |
5918 | temp = I915_READ(DISP_ARB_CTL); |
5948 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
5919 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); |
5949 | } |
5920 | } |
5950 | 5921 | ||
5951 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5922 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5952 | POSTING_READ(DSPCNTR(plane)); |
5923 | POSTING_READ(DSPCNTR(plane)); |
5953 | 5924 | ||
5954 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5925 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
5955 | 5926 | ||
5956 | dbgprintf("Set base\n"); |
5927 | dbgprintf("Set base\n"); |
5957 | 5928 | ||
5958 | intel_update_watermarks(dev); |
5929 | intel_update_watermarks(dev); |
5959 | 5930 | ||
5960 | LEAVE(); |
5931 | LEAVE(); |
5961 | 5932 | ||
5962 | return ret; |
5933 | return ret; |
5963 | } |
5934 | } |
5964 | 5935 | ||
5965 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
5936 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
5966 | struct drm_display_mode *mode, |
5937 | struct drm_display_mode *mode, |
5967 | struct drm_display_mode *adjusted_mode, |
5938 | struct drm_display_mode *adjusted_mode, |
5968 | int x, int y, |
5939 | int x, int y, |
5969 | struct drm_framebuffer *old_fb) |
5940 | struct drm_framebuffer *old_fb) |
5970 | { |
5941 | { |
5971 | struct drm_device *dev = crtc->dev; |
5942 | struct drm_device *dev = crtc->dev; |
5972 | struct drm_i915_private *dev_priv = dev->dev_private; |
5943 | struct drm_i915_private *dev_priv = dev->dev_private; |
5973 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5944 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5974 | int pipe = intel_crtc->pipe; |
5945 | int pipe = intel_crtc->pipe; |
5975 | int ret; |
5946 | int ret; |
5976 | 5947 | ||
5977 | // drm_vblank_pre_modeset(dev, pipe); |
5948 | // drm_vblank_pre_modeset(dev, pipe); |
5978 | ENTER(); |
5949 | ENTER(); |
5979 | 5950 | ||
5980 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
5951 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
5981 | x, y, old_fb); |
5952 | x, y, old_fb); |
5982 | 5953 | ||
5983 | // drm_vblank_post_modeset(dev, pipe); |
5954 | // drm_vblank_post_modeset(dev, pipe); |
5984 | 5955 | ||
5985 | intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; |
5956 | intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; |
5986 | LEAVE(); |
5957 | LEAVE(); |
5987 | 5958 | ||
5988 | return ret; |
5959 | return ret; |
5989 | } |
5960 | } |
5990 | 5961 | ||
5991 | static bool intel_eld_uptodate(struct drm_connector *connector, |
5962 | static bool intel_eld_uptodate(struct drm_connector *connector, |
5992 | int reg_eldv, uint32_t bits_eldv, |
5963 | int reg_eldv, uint32_t bits_eldv, |
5993 | int reg_elda, uint32_t bits_elda, |
5964 | int reg_elda, uint32_t bits_elda, |
5994 | int reg_edid) |
5965 | int reg_edid) |
5995 | { |
5966 | { |
5996 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5967 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5997 | uint8_t *eld = connector->eld; |
5968 | uint8_t *eld = connector->eld; |
5998 | uint32_t i; |
5969 | uint32_t i; |
5999 | 5970 | ||
6000 | i = I915_READ(reg_eldv); |
5971 | i = I915_READ(reg_eldv); |
6001 | i &= bits_eldv; |
5972 | i &= bits_eldv; |
6002 | 5973 | ||
6003 | if (!eld[0]) |
5974 | if (!eld[0]) |
6004 | return !i; |
5975 | return !i; |
6005 | 5976 | ||
6006 | if (!i) |
5977 | if (!i) |
6007 | return false; |
5978 | return false; |
6008 | 5979 | ||
6009 | i = I915_READ(reg_elda); |
5980 | i = I915_READ(reg_elda); |
6010 | i &= ~bits_elda; |
5981 | i &= ~bits_elda; |
6011 | I915_WRITE(reg_elda, i); |
5982 | I915_WRITE(reg_elda, i); |
6012 | 5983 | ||
6013 | for (i = 0; i < eld[2]; i++) |
5984 | for (i = 0; i < eld[2]; i++) |
6014 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) |
5985 | if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) |
6015 | return false; |
5986 | return false; |
6016 | 5987 | ||
6017 | return true; |
5988 | return true; |
6018 | } |
5989 | } |
6019 | 5990 | ||
6020 | static void g4x_write_eld(struct drm_connector *connector, |
5991 | static void g4x_write_eld(struct drm_connector *connector, |
6021 | struct drm_crtc *crtc) |
5992 | struct drm_crtc *crtc) |
6022 | { |
5993 | { |
6023 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
5994 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6024 | uint8_t *eld = connector->eld; |
5995 | uint8_t *eld = connector->eld; |
6025 | uint32_t eldv; |
5996 | uint32_t eldv; |
6026 | uint32_t len; |
5997 | uint32_t len; |
6027 | uint32_t i; |
5998 | uint32_t i; |
6028 | 5999 | ||
6029 | i = I915_READ(G4X_AUD_VID_DID); |
6000 | i = I915_READ(G4X_AUD_VID_DID); |
6030 | 6001 | ||
6031 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) |
6002 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) |
6032 | eldv = G4X_ELDV_DEVCL_DEVBLC; |
6003 | eldv = G4X_ELDV_DEVCL_DEVBLC; |
6033 | else |
6004 | else |
6034 | eldv = G4X_ELDV_DEVCTG; |
6005 | eldv = G4X_ELDV_DEVCTG; |
6035 | 6006 | ||
6036 | if (intel_eld_uptodate(connector, |
6007 | if (intel_eld_uptodate(connector, |
6037 | G4X_AUD_CNTL_ST, eldv, |
6008 | G4X_AUD_CNTL_ST, eldv, |
6038 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, |
6009 | G4X_AUD_CNTL_ST, G4X_ELD_ADDR, |
6039 | G4X_HDMIW_HDMIEDID)) |
6010 | G4X_HDMIW_HDMIEDID)) |
6040 | return; |
6011 | return; |
6041 | 6012 | ||
6042 | i = I915_READ(G4X_AUD_CNTL_ST); |
6013 | i = I915_READ(G4X_AUD_CNTL_ST); |
6043 | i &= ~(eldv | G4X_ELD_ADDR); |
6014 | i &= ~(eldv | G4X_ELD_ADDR); |
6044 | len = (i >> 9) & 0x1f; /* ELD buffer size */ |
6015 | len = (i >> 9) & 0x1f; /* ELD buffer size */ |
6045 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
6016 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
6046 | 6017 | ||
6047 | if (!eld[0]) |
6018 | if (!eld[0]) |
6048 | return; |
6019 | return; |
6049 | 6020 | ||
6050 | len = min_t(uint8_t, eld[2], len); |
6021 | len = min_t(uint8_t, eld[2], len); |
6051 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6022 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6052 | for (i = 0; i < len; i++) |
6023 | for (i = 0; i < len; i++) |
6053 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); |
6024 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); |
6054 | 6025 | ||
6055 | i = I915_READ(G4X_AUD_CNTL_ST); |
6026 | i = I915_READ(G4X_AUD_CNTL_ST); |
6056 | i |= eldv; |
6027 | i |= eldv; |
6057 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
6028 | I915_WRITE(G4X_AUD_CNTL_ST, i); |
6058 | } |
6029 | } |
6059 | 6030 | ||
6060 | static void ironlake_write_eld(struct drm_connector *connector, |
6031 | static void ironlake_write_eld(struct drm_connector *connector, |
6061 | struct drm_crtc *crtc) |
6032 | struct drm_crtc *crtc) |
6062 | { |
6033 | { |
6063 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6034 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6064 | uint8_t *eld = connector->eld; |
6035 | uint8_t *eld = connector->eld; |
6065 | uint32_t eldv; |
6036 | uint32_t eldv; |
6066 | uint32_t i; |
6037 | uint32_t i; |
6067 | int len; |
6038 | int len; |
6068 | int hdmiw_hdmiedid; |
6039 | int hdmiw_hdmiedid; |
6069 | int aud_cntl_st; |
6040 | int aud_cntl_st; |
6070 | int aud_cntrl_st2; |
6041 | int aud_cntrl_st2; |
6071 | 6042 | ||
6072 | if (HAS_PCH_IBX(connector->dev)) { |
6043 | if (HAS_PCH_IBX(connector->dev)) { |
6073 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; |
6044 | hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; |
6074 | aud_cntl_st = IBX_AUD_CNTL_ST_A; |
6045 | aud_cntl_st = IBX_AUD_CNTL_ST_A; |
6075 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6046 | aud_cntrl_st2 = IBX_AUD_CNTL_ST2; |
6076 | } else { |
6047 | } else { |
6077 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; |
6048 | hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; |
6078 | aud_cntl_st = CPT_AUD_CNTL_ST_A; |
6049 | aud_cntl_st = CPT_AUD_CNTL_ST_A; |
6079 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6050 | aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; |
6080 | } |
6051 | } |
6081 | 6052 | ||
6082 | i = to_intel_crtc(crtc)->pipe; |
6053 | i = to_intel_crtc(crtc)->pipe; |
6083 | hdmiw_hdmiedid += i * 0x100; |
6054 | hdmiw_hdmiedid += i * 0x100; |
6084 | aud_cntl_st += i * 0x100; |
6055 | aud_cntl_st += i * 0x100; |
6085 | 6056 | ||
6086 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); |
6057 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); |
6087 | 6058 | ||
6088 | i = I915_READ(aud_cntl_st); |
6059 | i = I915_READ(aud_cntl_st); |
6089 | i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ |
6060 | i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ |
6090 | if (!i) { |
6061 | if (!i) { |
6091 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
6062 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); |
6092 | /* operate blindly on all ports */ |
6063 | /* operate blindly on all ports */ |
6093 | eldv = IBX_ELD_VALIDB; |
6064 | eldv = IBX_ELD_VALIDB; |
6094 | eldv |= IBX_ELD_VALIDB << 4; |
6065 | eldv |= IBX_ELD_VALIDB << 4; |
6095 | eldv |= IBX_ELD_VALIDB << 8; |
6066 | eldv |= IBX_ELD_VALIDB << 8; |
6096 | } else { |
6067 | } else { |
6097 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); |
6068 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); |
6098 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); |
6069 | eldv = IBX_ELD_VALIDB << ((i - 1) * 4); |
6099 | } |
6070 | } |
6100 | 6071 | ||
6101 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6072 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
6102 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6073 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6103 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6074 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6104 | } |
6075 | } |
6105 | 6076 | ||
6106 | if (intel_eld_uptodate(connector, |
6077 | if (intel_eld_uptodate(connector, |
6107 | aud_cntrl_st2, eldv, |
6078 | aud_cntrl_st2, eldv, |
6108 | aud_cntl_st, IBX_ELD_ADDRESS, |
6079 | aud_cntl_st, IBX_ELD_ADDRESS, |
6109 | hdmiw_hdmiedid)) |
6080 | hdmiw_hdmiedid)) |
6110 | return; |
6081 | return; |
6111 | 6082 | ||
6112 | i = I915_READ(aud_cntrl_st2); |
6083 | i = I915_READ(aud_cntrl_st2); |
6113 | i &= ~eldv; |
6084 | i &= ~eldv; |
6114 | I915_WRITE(aud_cntrl_st2, i); |
6085 | I915_WRITE(aud_cntrl_st2, i); |
6115 | 6086 | ||
6116 | if (!eld[0]) |
6087 | if (!eld[0]) |
6117 | return; |
6088 | return; |
6118 | 6089 | ||
6119 | i = I915_READ(aud_cntl_st); |
6090 | i = I915_READ(aud_cntl_st); |
6120 | i &= ~IBX_ELD_ADDRESS; |
6091 | i &= ~IBX_ELD_ADDRESS; |
6121 | I915_WRITE(aud_cntl_st, i); |
6092 | I915_WRITE(aud_cntl_st, i); |
6122 | 6093 | ||
6123 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
6094 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ |
6124 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6095 | DRM_DEBUG_DRIVER("ELD size %d\n", len); |
6125 | for (i = 0; i < len; i++) |
6096 | for (i = 0; i < len; i++) |
6126 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6097 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); |
6127 | 6098 | ||
6128 | i = I915_READ(aud_cntrl_st2); |
6099 | i = I915_READ(aud_cntrl_st2); |
6129 | i |= eldv; |
6100 | i |= eldv; |
6130 | I915_WRITE(aud_cntrl_st2, i); |
6101 | I915_WRITE(aud_cntrl_st2, i); |
6131 | } |
6102 | } |
6132 | 6103 | ||
6133 | void intel_write_eld(struct drm_encoder *encoder, |
6104 | void intel_write_eld(struct drm_encoder *encoder, |
6134 | struct drm_display_mode *mode) |
6105 | struct drm_display_mode *mode) |
6135 | { |
6106 | { |
6136 | struct drm_crtc *crtc = encoder->crtc; |
6107 | struct drm_crtc *crtc = encoder->crtc; |
6137 | struct drm_connector *connector; |
6108 | struct drm_connector *connector; |
6138 | struct drm_device *dev = encoder->dev; |
6109 | struct drm_device *dev = encoder->dev; |
6139 | struct drm_i915_private *dev_priv = dev->dev_private; |
6110 | struct drm_i915_private *dev_priv = dev->dev_private; |
6140 | 6111 | ||
6141 | connector = drm_select_eld(encoder, mode); |
6112 | connector = drm_select_eld(encoder, mode); |
6142 | if (!connector) |
6113 | if (!connector) |
6143 | return; |
6114 | return; |
6144 | 6115 | ||
6145 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6116 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6146 | connector->base.id, |
6117 | connector->base.id, |
6147 | drm_get_connector_name(connector), |
6118 | drm_get_connector_name(connector), |
6148 | connector->encoder->base.id, |
6119 | connector->encoder->base.id, |
6149 | drm_get_encoder_name(connector->encoder)); |
6120 | drm_get_encoder_name(connector->encoder)); |
6150 | 6121 | ||
6151 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
6122 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
6152 | 6123 | ||
6153 | if (dev_priv->display.write_eld) |
6124 | if (dev_priv->display.write_eld) |
6154 | dev_priv->display.write_eld(connector, crtc); |
6125 | dev_priv->display.write_eld(connector, crtc); |
6155 | } |
6126 | } |
6156 | 6127 | ||
6157 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
6128 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
6158 | void intel_crtc_load_lut(struct drm_crtc *crtc) |
6129 | void intel_crtc_load_lut(struct drm_crtc *crtc) |
6159 | { |
6130 | { |
6160 | struct drm_device *dev = crtc->dev; |
6131 | struct drm_device *dev = crtc->dev; |
6161 | struct drm_i915_private *dev_priv = dev->dev_private; |
6132 | struct drm_i915_private *dev_priv = dev->dev_private; |
6162 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6163 | int palreg = PALETTE(intel_crtc->pipe); |
6134 | int palreg = PALETTE(intel_crtc->pipe); |
6164 | int i; |
6135 | int i; |
6165 | 6136 | ||
6166 | /* The clocks have to be on to load the palette. */ |
6137 | /* The clocks have to be on to load the palette. */ |
6167 | if (!crtc->enabled) |
6138 | if (!crtc->enabled) |
6168 | return; |
6139 | return; |
6169 | 6140 | ||
6170 | /* use legacy palette for Ironlake */ |
6141 | /* use legacy palette for Ironlake */ |
6171 | if (HAS_PCH_SPLIT(dev)) |
6142 | if (HAS_PCH_SPLIT(dev)) |
6172 | palreg = LGC_PALETTE(intel_crtc->pipe); |
6143 | palreg = LGC_PALETTE(intel_crtc->pipe); |
6173 | 6144 | ||
6174 | for (i = 0; i < 256; i++) { |
6145 | for (i = 0; i < 256; i++) { |
6175 | I915_WRITE(palreg + 4 * i, |
6146 | I915_WRITE(palreg + 4 * i, |
6176 | (intel_crtc->lut_r[i] << 16) | |
6147 | (intel_crtc->lut_r[i] << 16) | |
6177 | (intel_crtc->lut_g[i] << 8) | |
6148 | (intel_crtc->lut_g[i] << 8) | |
6178 | intel_crtc->lut_b[i]); |
6149 | intel_crtc->lut_b[i]); |
6179 | } |
6150 | } |
6180 | } |
6151 | } |
6181 | 6152 | ||
6182 | 6153 | ||
6183 | 6154 | ||
6184 | 6155 | ||
6185 | 6156 | ||
6186 | 6157 | ||
6187 | 6158 | ||
6188 | 6159 | ||
6189 | 6160 | ||
6190 | 6161 | ||
6191 | 6162 | ||
6192 | 6163 | ||
6193 | 6164 | ||
6194 | 6165 | ||
6195 | 6166 | ||
6196 | 6167 | ||
6197 | 6168 | ||
6198 | 6169 | ||
6199 | 6170 | ||
6200 | 6171 | ||
6201 | 6172 | ||
6202 | 6173 | ||
6203 | 6174 | ||
6204 | 6175 | ||
6205 | 6176 | ||
6206 | 6177 | ||
6207 | 6178 | ||
6208 | 6179 | ||
6209 | 6180 | ||
6210 | 6181 | ||
6211 | 6182 | ||
6212 | 6183 | ||
6213 | 6184 | ||
6214 | 6185 | ||
6215 | 6186 | ||
6216 | 6187 | ||
6217 | 6188 | ||
6218 | /** Sets the color ramps on behalf of RandR */ |
6189 | /** Sets the color ramps on behalf of RandR */ |
6219 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
6190 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
6220 | u16 blue, int regno) |
6191 | u16 blue, int regno) |
6221 | { |
6192 | { |
6222 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6193 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6223 | 6194 | ||
6224 | intel_crtc->lut_r[regno] = red >> 8; |
6195 | intel_crtc->lut_r[regno] = red >> 8; |
6225 | intel_crtc->lut_g[regno] = green >> 8; |
6196 | intel_crtc->lut_g[regno] = green >> 8; |
6226 | intel_crtc->lut_b[regno] = blue >> 8; |
6197 | intel_crtc->lut_b[regno] = blue >> 8; |
6227 | } |
6198 | } |
6228 | 6199 | ||
6229 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
6200 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
6230 | u16 *blue, int regno) |
6201 | u16 *blue, int regno) |
6231 | { |
6202 | { |
6232 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6203 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6233 | 6204 | ||
6234 | *red = intel_crtc->lut_r[regno] << 8; |
6205 | *red = intel_crtc->lut_r[regno] << 8; |
6235 | *green = intel_crtc->lut_g[regno] << 8; |
6206 | *green = intel_crtc->lut_g[regno] << 8; |
6236 | *blue = intel_crtc->lut_b[regno] << 8; |
6207 | *blue = intel_crtc->lut_b[regno] << 8; |
6237 | } |
6208 | } |
6238 | 6209 | ||
6239 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
6210 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
6240 | u16 *blue, uint32_t start, uint32_t size) |
6211 | u16 *blue, uint32_t start, uint32_t size) |
6241 | { |
6212 | { |
6242 | int end = (start + size > 256) ? 256 : start + size, i; |
6213 | int end = (start + size > 256) ? 256 : start + size, i; |
6243 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6214 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6244 | 6215 | ||
6245 | for (i = start; i < end; i++) { |
6216 | for (i = start; i < end; i++) { |
6246 | intel_crtc->lut_r[i] = red[i] >> 8; |
6217 | intel_crtc->lut_r[i] = red[i] >> 8; |
6247 | intel_crtc->lut_g[i] = green[i] >> 8; |
6218 | intel_crtc->lut_g[i] = green[i] >> 8; |
6248 | intel_crtc->lut_b[i] = blue[i] >> 8; |
6219 | intel_crtc->lut_b[i] = blue[i] >> 8; |
6249 | } |
6220 | } |
6250 | 6221 | ||
6251 | intel_crtc_load_lut(crtc); |
6222 | intel_crtc_load_lut(crtc); |
6252 | } |
6223 | } |
6253 | 6224 | ||
6254 | /** |
6225 | /** |
6255 | * Get a pipe with a simple mode set on it for doing load-based monitor |
6226 | * Get a pipe with a simple mode set on it for doing load-based monitor |
6256 | * detection. |
6227 | * detection. |
6257 | * |
6228 | * |
6258 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
6229 | * It will be up to the load-detect code to adjust the pipe as appropriate for |
6259 | * its requirements. The pipe will be connected to no other encoders. |
6230 | * its requirements. The pipe will be connected to no other encoders. |
6260 | * |
6231 | * |
6261 | * Currently this code will only succeed if there is a pipe with no encoders |
6232 | * Currently this code will only succeed if there is a pipe with no encoders |
6262 | * configured for it. In the future, it could choose to temporarily disable |
6233 | * configured for it. In the future, it could choose to temporarily disable |
6263 | * some outputs to free up a pipe for its use. |
6234 | * some outputs to free up a pipe for its use. |
6264 | * |
6235 | * |
6265 | * \return crtc, or NULL if no pipes are available. |
6236 | * \return crtc, or NULL if no pipes are available. |
6266 | */ |
6237 | */ |
6267 | 6238 | ||
6268 | /* VESA 640x480x72Hz mode to set on the pipe */ |
6239 | /* VESA 640x480x72Hz mode to set on the pipe */ |
6269 | static struct drm_display_mode load_detect_mode = { |
6240 | static struct drm_display_mode load_detect_mode = { |
6270 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, |
6241 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, |
6271 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
6242 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), |
6272 | }; |
6243 | }; |
6273 | 6244 | ||
6274 | 6245 | ||
6275 | 6246 | ||
6276 | 6247 | ||
6277 | 6248 | ||
6278 | static u32 |
6249 | static u32 |
6279 | intel_framebuffer_pitch_for_width(int width, int bpp) |
6250 | intel_framebuffer_pitch_for_width(int width, int bpp) |
6280 | { |
6251 | { |
6281 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); |
6252 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); |
6282 | return ALIGN(pitch, 64); |
6253 | return ALIGN(pitch, 64); |
6283 | } |
6254 | } |
6284 | 6255 | ||
6285 | static u32 |
6256 | static u32 |
6286 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) |
6257 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) |
6287 | { |
6258 | { |
6288 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); |
6259 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); |
6289 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); |
6260 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); |
6290 | } |
6261 | } |
6291 | 6262 | ||
6292 | static struct drm_framebuffer * |
6263 | static struct drm_framebuffer * |
6293 | intel_framebuffer_create_for_mode(struct drm_device *dev, |
6264 | intel_framebuffer_create_for_mode(struct drm_device *dev, |
6294 | struct drm_display_mode *mode, |
6265 | struct drm_display_mode *mode, |
6295 | int depth, int bpp) |
6266 | int depth, int bpp) |
6296 | { |
6267 | { |
6297 | struct drm_i915_gem_object *obj; |
6268 | struct drm_i915_gem_object *obj; |
6298 | struct drm_mode_fb_cmd2 mode_cmd; |
6269 | struct drm_mode_fb_cmd2 mode_cmd; |
6299 | 6270 | ||
6300 | // obj = i915_gem_alloc_object(dev, |
6271 | // obj = i915_gem_alloc_object(dev, |
6301 | // intel_framebuffer_size_for_mode(mode, bpp)); |
6272 | // intel_framebuffer_size_for_mode(mode, bpp)); |
6302 | // if (obj == NULL) |
6273 | // if (obj == NULL) |
6303 | return ERR_PTR(-ENOMEM); |
6274 | return ERR_PTR(-ENOMEM); |
6304 | 6275 | ||
6305 | // mode_cmd.width = mode->hdisplay; |
6276 | // mode_cmd.width = mode->hdisplay; |
6306 | // mode_cmd.height = mode->vdisplay; |
6277 | // mode_cmd.height = mode->vdisplay; |
6307 | // mode_cmd.depth = depth; |
6278 | // mode_cmd.depth = depth; |
6308 | // mode_cmd.bpp = bpp; |
6279 | // mode_cmd.bpp = bpp; |
6309 | // mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); |
6280 | // mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); |
6310 | 6281 | ||
6311 | // return intel_framebuffer_create(dev, &mode_cmd, obj); |
6282 | // return intel_framebuffer_create(dev, &mode_cmd, obj); |
6312 | } |
6283 | } |
6313 | 6284 | ||
6314 | static struct drm_framebuffer * |
6285 | static struct drm_framebuffer * |
6315 | mode_fits_in_fbdev(struct drm_device *dev, |
6286 | mode_fits_in_fbdev(struct drm_device *dev, |
6316 | struct drm_display_mode *mode) |
6287 | struct drm_display_mode *mode) |
6317 | { |
6288 | { |
6318 | struct drm_i915_private *dev_priv = dev->dev_private; |
6289 | struct drm_i915_private *dev_priv = dev->dev_private; |
6319 | struct drm_i915_gem_object *obj; |
6290 | struct drm_i915_gem_object *obj; |
6320 | struct drm_framebuffer *fb; |
6291 | struct drm_framebuffer *fb; |
6321 | 6292 | ||
6322 | // if (dev_priv->fbdev == NULL) |
6293 | // if (dev_priv->fbdev == NULL) |
6323 | // return NULL; |
6294 | // return NULL; |
6324 | 6295 | ||
6325 | // obj = dev_priv->fbdev->ifb.obj; |
6296 | // obj = dev_priv->fbdev->ifb.obj; |
6326 | // if (obj == NULL) |
6297 | // if (obj == NULL) |
6327 | // return NULL; |
6298 | // return NULL; |
6328 | 6299 | ||
6329 | // fb = &dev_priv->fbdev->ifb.base; |
6300 | // fb = &dev_priv->fbdev->ifb.base; |
6330 | // if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, |
6301 | // if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, |
6331 | // fb->bits_per_pixel)) |
6302 | // fb->bits_per_pixel)) |
6332 | return NULL; |
6303 | return NULL; |
6333 | 6304 | ||
6334 | // if (obj->base.size < mode->vdisplay * fb->pitch) |
6305 | // if (obj->base.size < mode->vdisplay * fb->pitch) |
6335 | // return NULL; |
6306 | // return NULL; |
6336 | 6307 | ||
6337 | // return fb; |
6308 | // return fb; |
6338 | } |
6309 | } |
6339 | 6310 | ||
6340 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
6311 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
6341 | struct drm_connector *connector, |
6312 | struct drm_connector *connector, |
6342 | struct drm_display_mode *mode, |
6313 | struct drm_display_mode *mode, |
6343 | struct intel_load_detect_pipe *old) |
6314 | struct intel_load_detect_pipe *old) |
6344 | { |
6315 | { |
6345 | struct intel_crtc *intel_crtc; |
6316 | struct intel_crtc *intel_crtc; |
6346 | struct drm_crtc *possible_crtc; |
6317 | struct drm_crtc *possible_crtc; |
6347 | struct drm_encoder *encoder = &intel_encoder->base; |
6318 | struct drm_encoder *encoder = &intel_encoder->base; |
6348 | struct drm_crtc *crtc = NULL; |
6319 | struct drm_crtc *crtc = NULL; |
6349 | struct drm_device *dev = encoder->dev; |
6320 | struct drm_device *dev = encoder->dev; |
6350 | struct drm_framebuffer *old_fb; |
6321 | struct drm_framebuffer *old_fb; |
6351 | int i = -1; |
6322 | int i = -1; |
6352 | 6323 | ||
6353 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6324 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6354 | connector->base.id, drm_get_connector_name(connector), |
6325 | connector->base.id, drm_get_connector_name(connector), |
6355 | encoder->base.id, drm_get_encoder_name(encoder)); |
6326 | encoder->base.id, drm_get_encoder_name(encoder)); |
6356 | 6327 | ||
6357 | /* |
6328 | /* |
6358 | * Algorithm gets a little messy: |
6329 | * Algorithm gets a little messy: |
6359 | * |
6330 | * |
6360 | * - if the connector already has an assigned crtc, use it (but make |
6331 | * - if the connector already has an assigned crtc, use it (but make |
6361 | * sure it's on first) |
6332 | * sure it's on first) |
6362 | * |
6333 | * |
6363 | * - try to find the first unused crtc that can drive this connector, |
6334 | * - try to find the first unused crtc that can drive this connector, |
6364 | * and use that if we find one |
6335 | * and use that if we find one |
6365 | */ |
6336 | */ |
6366 | 6337 | ||
6367 | /* See if we already have a CRTC for this connector */ |
6338 | /* See if we already have a CRTC for this connector */ |
6368 | if (encoder->crtc) { |
6339 | if (encoder->crtc) { |
6369 | crtc = encoder->crtc; |
6340 | crtc = encoder->crtc; |
6370 | 6341 | ||
6371 | intel_crtc = to_intel_crtc(crtc); |
6342 | intel_crtc = to_intel_crtc(crtc); |
6372 | old->dpms_mode = intel_crtc->dpms_mode; |
6343 | old->dpms_mode = intel_crtc->dpms_mode; |
6373 | old->load_detect_temp = false; |
6344 | old->load_detect_temp = false; |
6374 | 6345 | ||
6375 | /* Make sure the crtc and connector are running */ |
6346 | /* Make sure the crtc and connector are running */ |
6376 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { |
6347 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { |
6377 | struct drm_encoder_helper_funcs *encoder_funcs; |
6348 | struct drm_encoder_helper_funcs *encoder_funcs; |
6378 | struct drm_crtc_helper_funcs *crtc_funcs; |
6349 | struct drm_crtc_helper_funcs *crtc_funcs; |
6379 | 6350 | ||
6380 | crtc_funcs = crtc->helper_private; |
6351 | crtc_funcs = crtc->helper_private; |
6381 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
6352 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
6382 | 6353 | ||
6383 | encoder_funcs = encoder->helper_private; |
6354 | encoder_funcs = encoder->helper_private; |
6384 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
6355 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
6385 | } |
6356 | } |
6386 | 6357 | ||
6387 | return true; |
6358 | return true; |
6388 | } |
6359 | } |
6389 | 6360 | ||
6390 | /* Find an unused one (if possible) */ |
6361 | /* Find an unused one (if possible) */ |
6391 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { |
6362 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { |
6392 | i++; |
6363 | i++; |
6393 | if (!(encoder->possible_crtcs & (1 << i))) |
6364 | if (!(encoder->possible_crtcs & (1 << i))) |
6394 | continue; |
6365 | continue; |
6395 | if (!possible_crtc->enabled) { |
6366 | if (!possible_crtc->enabled) { |
6396 | crtc = possible_crtc; |
6367 | crtc = possible_crtc; |
6397 | break; |
6368 | break; |
6398 | } |
6369 | } |
6399 | } |
6370 | } |
6400 | 6371 | ||
6401 | /* |
6372 | /* |
6402 | * If we didn't find an unused CRTC, don't use any. |
6373 | * If we didn't find an unused CRTC, don't use any. |
6403 | */ |
6374 | */ |
6404 | if (!crtc) { |
6375 | if (!crtc) { |
6405 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
6376 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
6406 | return false; |
6377 | return false; |
6407 | } |
6378 | } |
6408 | 6379 | ||
6409 | encoder->crtc = crtc; |
6380 | encoder->crtc = crtc; |
6410 | connector->encoder = encoder; |
6381 | connector->encoder = encoder; |
6411 | 6382 | ||
6412 | intel_crtc = to_intel_crtc(crtc); |
6383 | intel_crtc = to_intel_crtc(crtc); |
6413 | old->dpms_mode = intel_crtc->dpms_mode; |
6384 | old->dpms_mode = intel_crtc->dpms_mode; |
6414 | old->load_detect_temp = true; |
6385 | old->load_detect_temp = true; |
6415 | old->release_fb = NULL; |
6386 | old->release_fb = NULL; |
6416 | 6387 | ||
6417 | if (!mode) |
6388 | if (!mode) |
6418 | mode = &load_detect_mode; |
6389 | mode = &load_detect_mode; |
6419 | 6390 | ||
6420 | old_fb = crtc->fb; |
6391 | old_fb = crtc->fb; |
6421 | 6392 | ||
6422 | /* We need a framebuffer large enough to accommodate all accesses |
6393 | /* We need a framebuffer large enough to accommodate all accesses |
6423 | * that the plane may generate whilst we perform load detection. |
6394 | * that the plane may generate whilst we perform load detection. |
6424 | * We can not rely on the fbcon either being present (we get called |
6395 | * We can not rely on the fbcon either being present (we get called |
6425 | * during its initialisation to detect all boot displays, or it may |
6396 | * during its initialisation to detect all boot displays, or it may |
6426 | * not even exist) or that it is large enough to satisfy the |
6397 | * not even exist) or that it is large enough to satisfy the |
6427 | * requested mode. |
6398 | * requested mode. |
6428 | */ |
6399 | */ |
6429 | crtc->fb = mode_fits_in_fbdev(dev, mode); |
6400 | crtc->fb = mode_fits_in_fbdev(dev, mode); |
6430 | if (crtc->fb == NULL) { |
6401 | if (crtc->fb == NULL) { |
6431 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); |
6402 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); |
6432 | crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); |
6403 | crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); |
6433 | old->release_fb = crtc->fb; |
6404 | old->release_fb = crtc->fb; |
6434 | } else |
6405 | } else |
6435 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
6406 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
6436 | if (IS_ERR(crtc->fb)) { |
6407 | if (IS_ERR(crtc->fb)) { |
6437 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
6408 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
6438 | crtc->fb = old_fb; |
6409 | crtc->fb = old_fb; |
6439 | return false; |
6410 | return false; |
6440 | } |
6411 | } |
6441 | 6412 | ||
6442 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { |
6413 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { |
6443 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
6414 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
6444 | if (old->release_fb) |
6415 | if (old->release_fb) |
6445 | old->release_fb->funcs->destroy(old->release_fb); |
6416 | old->release_fb->funcs->destroy(old->release_fb); |
6446 | crtc->fb = old_fb; |
6417 | crtc->fb = old_fb; |
6447 | return false; |
6418 | return false; |
6448 | } |
6419 | } |
6449 | 6420 | ||
6450 | /* let the connector get through one full cycle before testing */ |
6421 | /* let the connector get through one full cycle before testing */ |
6451 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
6422 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
6452 | 6423 | ||
6453 | return true; |
6424 | return true; |
6454 | } |
6425 | } |
6455 | 6426 | ||
6456 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
6427 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
6457 | struct drm_connector *connector, |
6428 | struct drm_connector *connector, |
6458 | struct intel_load_detect_pipe *old) |
6429 | struct intel_load_detect_pipe *old) |
6459 | { |
6430 | { |
6460 | struct drm_encoder *encoder = &intel_encoder->base; |
6431 | struct drm_encoder *encoder = &intel_encoder->base; |
6461 | struct drm_device *dev = encoder->dev; |
6432 | struct drm_device *dev = encoder->dev; |
6462 | struct drm_crtc *crtc = encoder->crtc; |
6433 | struct drm_crtc *crtc = encoder->crtc; |
6463 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
6434 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
6464 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
6435 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
6465 | 6436 | ||
6466 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6437 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6467 | connector->base.id, drm_get_connector_name(connector), |
6438 | connector->base.id, drm_get_connector_name(connector), |
6468 | encoder->base.id, drm_get_encoder_name(encoder)); |
6439 | encoder->base.id, drm_get_encoder_name(encoder)); |
6469 | 6440 | ||
6470 | if (old->load_detect_temp) { |
6441 | if (old->load_detect_temp) { |
6471 | connector->encoder = NULL; |
6442 | connector->encoder = NULL; |
6472 | drm_helper_disable_unused_functions(dev); |
6443 | drm_helper_disable_unused_functions(dev); |
6473 | 6444 | ||
6474 | if (old->release_fb) |
6445 | if (old->release_fb) |
6475 | old->release_fb->funcs->destroy(old->release_fb); |
6446 | old->release_fb->funcs->destroy(old->release_fb); |
6476 | 6447 | ||
6477 | return; |
6448 | return; |
6478 | } |
6449 | } |
6479 | 6450 | ||
6480 | /* Switch crtc and encoder back off if necessary */ |
6451 | /* Switch crtc and encoder back off if necessary */ |
6481 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { |
6452 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { |
6482 | encoder_funcs->dpms(encoder, old->dpms_mode); |
6453 | encoder_funcs->dpms(encoder, old->dpms_mode); |
6483 | crtc_funcs->dpms(crtc, old->dpms_mode); |
6454 | crtc_funcs->dpms(crtc, old->dpms_mode); |
6484 | } |
6455 | } |
6485 | } |
6456 | } |
6486 | 6457 | ||
6487 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
6458 | /* Returns the clock of the currently programmed mode of the given pipe. */ |
6488 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) |
6459 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) |
6489 | { |
6460 | { |
6490 | struct drm_i915_private *dev_priv = dev->dev_private; |
6461 | struct drm_i915_private *dev_priv = dev->dev_private; |
6491 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6462 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6492 | int pipe = intel_crtc->pipe; |
6463 | int pipe = intel_crtc->pipe; |
6493 | u32 dpll = I915_READ(DPLL(pipe)); |
6464 | u32 dpll = I915_READ(DPLL(pipe)); |
6494 | u32 fp; |
6465 | u32 fp; |
6495 | intel_clock_t clock; |
6466 | intel_clock_t clock; |
6496 | 6467 | ||
6497 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
6468 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) |
6498 | fp = I915_READ(FP0(pipe)); |
6469 | fp = I915_READ(FP0(pipe)); |
6499 | else |
6470 | else |
6500 | fp = I915_READ(FP1(pipe)); |
6471 | fp = I915_READ(FP1(pipe)); |
6501 | 6472 | ||
6502 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
6473 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; |
6503 | if (IS_PINEVIEW(dev)) { |
6474 | if (IS_PINEVIEW(dev)) { |
6504 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
6475 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; |
6505 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6476 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6506 | } else { |
6477 | } else { |
6507 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
6478 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; |
6508 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6479 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; |
6509 | } |
6480 | } |
6510 | 6481 | ||
6511 | if (!IS_GEN2(dev)) { |
6482 | if (!IS_GEN2(dev)) { |
6512 | if (IS_PINEVIEW(dev)) |
6483 | if (IS_PINEVIEW(dev)) |
6513 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
6484 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> |
6514 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
6485 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); |
6515 | else |
6486 | else |
6516 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
6487 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> |
6517 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6488 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6518 | 6489 | ||
6519 | switch (dpll & DPLL_MODE_MASK) { |
6490 | switch (dpll & DPLL_MODE_MASK) { |
6520 | case DPLLB_MODE_DAC_SERIAL: |
6491 | case DPLLB_MODE_DAC_SERIAL: |
6521 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? |
6492 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? |
6522 | 5 : 10; |
6493 | 5 : 10; |
6523 | break; |
6494 | break; |
6524 | case DPLLB_MODE_LVDS: |
6495 | case DPLLB_MODE_LVDS: |
6525 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? |
6496 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? |
6526 | 7 : 14; |
6497 | 7 : 14; |
6527 | break; |
6498 | break; |
6528 | default: |
6499 | default: |
6529 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
6500 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
6530 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
6501 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
6531 | return 0; |
6502 | return 0; |
6532 | } |
6503 | } |
6533 | 6504 | ||
6534 | /* XXX: Handle the 100Mhz refclk */ |
6505 | /* XXX: Handle the 100Mhz refclk */ |
6535 | intel_clock(dev, 96000, &clock); |
6506 | intel_clock(dev, 96000, &clock); |
6536 | } else { |
6507 | } else { |
6537 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
6508 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); |
6538 | 6509 | ||
6539 | if (is_lvds) { |
6510 | if (is_lvds) { |
6540 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
6511 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> |
6541 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6512 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6542 | clock.p2 = 14; |
6513 | clock.p2 = 14; |
6543 | 6514 | ||
6544 | if ((dpll & PLL_REF_INPUT_MASK) == |
6515 | if ((dpll & PLL_REF_INPUT_MASK) == |
6545 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
6516 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { |
6546 | /* XXX: might not be 66MHz */ |
6517 | /* XXX: might not be 66MHz */ |
6547 | intel_clock(dev, 66000, &clock); |
6518 | intel_clock(dev, 66000, &clock); |
6548 | } else |
6519 | } else |
6549 | intel_clock(dev, 48000, &clock); |
6520 | intel_clock(dev, 48000, &clock); |
6550 | } else { |
6521 | } else { |
6551 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
6522 | if (dpll & PLL_P1_DIVIDE_BY_TWO) |
6552 | clock.p1 = 2; |
6523 | clock.p1 = 2; |
6553 | else { |
6524 | else { |
6554 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> |
6525 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> |
6555 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; |
6526 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; |
6556 | } |
6527 | } |
6557 | if (dpll & PLL_P2_DIVIDE_BY_4) |
6528 | if (dpll & PLL_P2_DIVIDE_BY_4) |
6558 | clock.p2 = 4; |
6529 | clock.p2 = 4; |
6559 | else |
6530 | else |
6560 | clock.p2 = 2; |
6531 | clock.p2 = 2; |
6561 | 6532 | ||
6562 | intel_clock(dev, 48000, &clock); |
6533 | intel_clock(dev, 48000, &clock); |
6563 | } |
6534 | } |
6564 | } |
6535 | } |
6565 | 6536 | ||
6566 | /* XXX: It would be nice to validate the clocks, but we can't reuse |
6537 | /* XXX: It would be nice to validate the clocks, but we can't reuse |
6567 | * i830PllIsValid() because it relies on the xf86_config connector |
6538 | * i830PllIsValid() because it relies on the xf86_config connector |
6568 | * configuration being accurate, which it isn't necessarily. |
6539 | * configuration being accurate, which it isn't necessarily. |
6569 | */ |
6540 | */ |
6570 | 6541 | ||
6571 | return clock.dot; |
6542 | return clock.dot; |
6572 | } |
6543 | } |
6573 | 6544 | ||
6574 | /** Returns the currently programmed mode of the given pipe. */ |
6545 | /** Returns the currently programmed mode of the given pipe. */ |
6575 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
6546 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
6576 | struct drm_crtc *crtc) |
6547 | struct drm_crtc *crtc) |
6577 | { |
6548 | { |
6578 | struct drm_i915_private *dev_priv = dev->dev_private; |
6549 | struct drm_i915_private *dev_priv = dev->dev_private; |
6579 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6550 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6580 | int pipe = intel_crtc->pipe; |
6551 | int pipe = intel_crtc->pipe; |
6581 | struct drm_display_mode *mode; |
6552 | struct drm_display_mode *mode; |
6582 | int htot = I915_READ(HTOTAL(pipe)); |
6553 | int htot = I915_READ(HTOTAL(pipe)); |
6583 | int hsync = I915_READ(HSYNC(pipe)); |
6554 | int hsync = I915_READ(HSYNC(pipe)); |
6584 | int vtot = I915_READ(VTOTAL(pipe)); |
6555 | int vtot = I915_READ(VTOTAL(pipe)); |
6585 | int vsync = I915_READ(VSYNC(pipe)); |
6556 | int vsync = I915_READ(VSYNC(pipe)); |
6586 | 6557 | ||
6587 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
6558 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
6588 | if (!mode) |
6559 | if (!mode) |
6589 | return NULL; |
6560 | return NULL; |
6590 | 6561 | ||
6591 | mode->clock = intel_crtc_clock_get(dev, crtc); |
6562 | mode->clock = intel_crtc_clock_get(dev, crtc); |
6592 | mode->hdisplay = (htot & 0xffff) + 1; |
6563 | mode->hdisplay = (htot & 0xffff) + 1; |
6593 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
6564 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
6594 | mode->hsync_start = (hsync & 0xffff) + 1; |
6565 | mode->hsync_start = (hsync & 0xffff) + 1; |
6595 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; |
6566 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; |
6596 | mode->vdisplay = (vtot & 0xffff) + 1; |
6567 | mode->vdisplay = (vtot & 0xffff) + 1; |
6597 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; |
6568 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; |
6598 | mode->vsync_start = (vsync & 0xffff) + 1; |
6569 | mode->vsync_start = (vsync & 0xffff) + 1; |
6599 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; |
6570 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; |
6600 | 6571 | ||
6601 | drm_mode_set_name(mode); |
6572 | drm_mode_set_name(mode); |
6602 | drm_mode_set_crtcinfo(mode, 0); |
6573 | drm_mode_set_crtcinfo(mode, 0); |
6603 | 6574 | ||
6604 | return mode; |
6575 | return mode; |
6605 | } |
6576 | } |
6606 | 6577 | ||
6607 | #define GPU_IDLE_TIMEOUT 500 /* ms */ |
6578 | #define GPU_IDLE_TIMEOUT 500 /* ms */ |
6608 | 6579 | ||
6609 | 6580 | ||
6610 | 6581 | ||
6611 | 6582 | ||
6612 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
6583 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
6613 | 6584 | ||
6614 | 6585 | ||
6615 | 6586 | ||
6616 | 6587 | ||
6617 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
6588 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
6618 | { |
6589 | { |
6619 | struct drm_device *dev = crtc->dev; |
6590 | struct drm_device *dev = crtc->dev; |
6620 | drm_i915_private_t *dev_priv = dev->dev_private; |
6591 | drm_i915_private_t *dev_priv = dev->dev_private; |
6621 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6592 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6622 | int pipe = intel_crtc->pipe; |
6593 | int pipe = intel_crtc->pipe; |
6623 | int dpll_reg = DPLL(pipe); |
6594 | int dpll_reg = DPLL(pipe); |
6624 | int dpll; |
6595 | int dpll; |
6625 | 6596 | ||
6626 | ENTER(); |
6597 | ENTER(); |
6627 | 6598 | ||
6628 | if (HAS_PCH_SPLIT(dev)) |
6599 | if (HAS_PCH_SPLIT(dev)) |
6629 | return; |
6600 | return; |
6630 | 6601 | ||
6631 | if (!dev_priv->lvds_downclock_avail) |
6602 | if (!dev_priv->lvds_downclock_avail) |
6632 | return; |
6603 | return; |
6633 | 6604 | ||
6634 | dpll = I915_READ(dpll_reg); |
6605 | dpll = I915_READ(dpll_reg); |
6635 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
6606 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
6636 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
6607 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
6637 | 6608 | ||
6638 | /* Unlock panel regs */ |
6609 | /* Unlock panel regs */ |
6639 | I915_WRITE(PP_CONTROL, |
6610 | I915_WRITE(PP_CONTROL, |
6640 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
6611 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); |
6641 | 6612 | ||
6642 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
6613 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; |
6643 | I915_WRITE(dpll_reg, dpll); |
6614 | I915_WRITE(dpll_reg, dpll); |
6644 | intel_wait_for_vblank(dev, pipe); |
6615 | intel_wait_for_vblank(dev, pipe); |
6645 | 6616 | ||
6646 | dpll = I915_READ(dpll_reg); |
6617 | dpll = I915_READ(dpll_reg); |
6647 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
6618 | if (dpll & DISPLAY_RATE_SELECT_FPA1) |
6648 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
6619 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
6649 | 6620 | ||
6650 | /* ...and lock them again */ |
6621 | /* ...and lock them again */ |
6651 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); |
6622 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); |
6652 | } |
6623 | } |
6653 | 6624 | ||
6654 | LEAVE(); |
6625 | LEAVE(); |
6655 | 6626 | ||
6656 | /* Schedule downclock */ |
6627 | /* Schedule downclock */ |
6657 | } |
6628 | } |
6658 | 6629 | ||
6659 | 6630 | ||
6660 | 6631 | ||
6661 | 6632 | ||
6662 | 6633 | ||
6663 | 6634 | ||
6664 | 6635 | ||
6665 | 6636 | ||
6666 | 6637 | ||
6667 | 6638 | ||
6668 | 6639 | ||
6669 | 6640 | ||
6670 | 6641 | ||
6671 | 6642 | ||
6672 | 6643 | ||
6673 | 6644 | ||
6674 | 6645 | ||
6675 | 6646 | ||
6676 | 6647 | ||
6677 | 6648 | ||
6678 | 6649 | ||
6679 | 6650 | ||
6680 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
6651 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
6681 | { |
6652 | { |
6682 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6653 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6683 | struct drm_device *dev = crtc->dev; |
6654 | struct drm_device *dev = crtc->dev; |
6684 | struct intel_unpin_work *work; |
6655 | struct intel_unpin_work *work; |
6685 | unsigned long flags; |
6656 | unsigned long flags; |
6686 | 6657 | ||
6687 | spin_lock_irqsave(&dev->event_lock, flags); |
6658 | spin_lock_irqsave(&dev->event_lock, flags); |
6688 | work = intel_crtc->unpin_work; |
6659 | work = intel_crtc->unpin_work; |
6689 | intel_crtc->unpin_work = NULL; |
6660 | intel_crtc->unpin_work = NULL; |
6690 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6661 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6691 | 6662 | ||
6692 | if (work) { |
6663 | if (work) { |
6693 | // cancel_work_sync(&work->work); |
6664 | // cancel_work_sync(&work->work); |
6694 | kfree(work); |
6665 | kfree(work); |
6695 | } |
6666 | } |
6696 | 6667 | ||
6697 | drm_crtc_cleanup(crtc); |
6668 | drm_crtc_cleanup(crtc); |
6698 | 6669 | ||
6699 | kfree(intel_crtc); |
6670 | kfree(intel_crtc); |
6700 | } |
6671 | } |
6701 | 6672 | ||
6702 | 6673 | ||
6703 | 6674 | ||
6704 | 6675 | ||
6705 | 6676 | ||
6706 | 6677 | ||
6707 | 6678 | ||
6708 | 6679 | ||
6709 | 6680 | ||
6710 | 6681 | ||
6711 | 6682 | ||
6712 | 6683 | ||
6713 | 6684 | ||
6714 | 6685 | ||
6715 | 6686 | ||
6716 | 6687 | ||
6717 | 6688 | ||
6718 | 6689 | ||
6719 | 6690 | ||
6720 | 6691 | ||
6721 | 6692 | ||
6722 | 6693 | ||
6723 | 6694 | ||
6724 | 6695 | ||
6725 | 6696 | ||
6726 | 6697 | ||
6727 | 6698 | ||
6728 | 6699 | ||
6729 | 6700 | ||
6730 | 6701 | ||
6731 | 6702 | ||
6732 | 6703 | ||
6733 | 6704 | ||
6734 | 6705 | ||
6735 | 6706 | ||
6736 | 6707 | ||
6737 | 6708 | ||
6738 | 6709 | ||
6739 | 6710 | ||
6740 | 6711 | ||
6741 | 6712 | ||
6742 | 6713 | ||
6743 | 6714 | ||
6744 | 6715 | ||
6745 | 6716 | ||
6746 | 6717 | ||
6747 | 6718 | ||
6748 | 6719 | ||
6749 | 6720 | ||
6750 | 6721 | ||
6751 | 6722 | ||
6752 | 6723 | ||
6753 | 6724 | ||
6754 | 6725 | ||
6755 | 6726 | ||
6756 | 6727 | ||
6757 | 6728 | ||
6758 | 6729 | ||
6759 | 6730 | ||
6760 | 6731 | ||
6761 | 6732 | ||
6762 | 6733 | ||
6763 | 6734 | ||
6764 | 6735 | ||
6765 | 6736 | ||
6766 | 6737 | ||
6767 | static void intel_sanitize_modesetting(struct drm_device *dev, |
6738 | static void intel_sanitize_modesetting(struct drm_device *dev, |
6768 | int pipe, int plane) |
6739 | int pipe, int plane) |
6769 | { |
6740 | { |
6770 | struct drm_i915_private *dev_priv = dev->dev_private; |
6741 | struct drm_i915_private *dev_priv = dev->dev_private; |
6771 | u32 reg, val; |
6742 | u32 reg, val; |
6772 | 6743 | ||
6773 | if (HAS_PCH_SPLIT(dev)) |
6744 | if (HAS_PCH_SPLIT(dev)) |
6774 | return; |
6745 | return; |
6775 | 6746 | ||
6776 | /* Who knows what state these registers were left in by the BIOS or |
6747 | /* Who knows what state these registers were left in by the BIOS or |
6777 | * grub? |
6748 | * grub? |
6778 | * |
6749 | * |
6779 | * If we leave the registers in a conflicting state (e.g. with the |
6750 | * If we leave the registers in a conflicting state (e.g. with the |
6780 | * display plane reading from the other pipe than the one we intend |
6751 | * display plane reading from the other pipe than the one we intend |
6781 | * to use) then when we attempt to teardown the active mode, we will |
6752 | * to use) then when we attempt to teardown the active mode, we will |
6782 | * not disable the pipes and planes in the correct order -- leaving |
6753 | * not disable the pipes and planes in the correct order -- leaving |
6783 | * a plane reading from a disabled pipe and possibly leading to |
6754 | * a plane reading from a disabled pipe and possibly leading to |
6784 | * undefined behaviour. |
6755 | * undefined behaviour. |
6785 | */ |
6756 | */ |
6786 | 6757 | ||
6787 | reg = DSPCNTR(plane); |
6758 | reg = DSPCNTR(plane); |
6788 | val = I915_READ(reg); |
6759 | val = I915_READ(reg); |
6789 | 6760 | ||
6790 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
6761 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
6791 | return; |
6762 | return; |
6792 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) |
6763 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) |
6793 | return; |
6764 | return; |
6794 | 6765 | ||
6795 | /* This display plane is active and attached to the other CPU pipe. */ |
6766 | /* This display plane is active and attached to the other CPU pipe. */ |
6796 | pipe = !pipe; |
6767 | pipe = !pipe; |
6797 | 6768 | ||
6798 | /* Disable the plane and wait for it to stop reading from the pipe. */ |
6769 | /* Disable the plane and wait for it to stop reading from the pipe. */ |
6799 | intel_disable_plane(dev_priv, plane, pipe); |
6770 | intel_disable_plane(dev_priv, plane, pipe); |
6800 | intel_disable_pipe(dev_priv, pipe); |
6771 | intel_disable_pipe(dev_priv, pipe); |
6801 | } |
6772 | } |
6802 | 6773 | ||
6803 | static void intel_crtc_reset(struct drm_crtc *crtc) |
6774 | static void intel_crtc_reset(struct drm_crtc *crtc) |
6804 | { |
6775 | { |
6805 | struct drm_device *dev = crtc->dev; |
6776 | struct drm_device *dev = crtc->dev; |
6806 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6777 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6807 | 6778 | ||
6808 | /* Reset flags back to the 'unknown' status so that they |
6779 | /* Reset flags back to the 'unknown' status so that they |
6809 | * will be correctly set on the initial modeset. |
6780 | * will be correctly set on the initial modeset. |
6810 | */ |
6781 | */ |
6811 | intel_crtc->dpms_mode = -1; |
6782 | intel_crtc->dpms_mode = -1; |
6812 | 6783 | ||
6813 | /* We need to fix up any BIOS configuration that conflicts with |
6784 | /* We need to fix up any BIOS configuration that conflicts with |
6814 | * our expectations. |
6785 | * our expectations. |
6815 | */ |
6786 | */ |
6816 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); |
6787 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); |
6817 | } |
6788 | } |
6818 | 6789 | ||
6819 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
6790 | static struct drm_crtc_helper_funcs intel_helper_funcs = { |
6820 | .dpms = intel_crtc_dpms, |
6791 | .dpms = intel_crtc_dpms, |
6821 | .mode_fixup = intel_crtc_mode_fixup, |
6792 | .mode_fixup = intel_crtc_mode_fixup, |
6822 | .mode_set = intel_crtc_mode_set, |
6793 | .mode_set = intel_crtc_mode_set, |
6823 | .mode_set_base = intel_pipe_set_base, |
6794 | .mode_set_base = intel_pipe_set_base, |
6824 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
6795 | .mode_set_base_atomic = intel_pipe_set_base_atomic, |
6825 | .load_lut = intel_crtc_load_lut, |
6796 | .load_lut = intel_crtc_load_lut, |
6826 | .disable = intel_crtc_disable, |
6797 | .disable = intel_crtc_disable, |
6827 | }; |
6798 | }; |
6828 | 6799 | ||
6829 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
6800 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
6830 | .reset = intel_crtc_reset, |
6801 | .reset = intel_crtc_reset, |
6831 | // .cursor_set = intel_crtc_cursor_set, |
6802 | // .cursor_set = intel_crtc_cursor_set, |
6832 | // .cursor_move = intel_crtc_cursor_move, |
6803 | // .cursor_move = intel_crtc_cursor_move, |
6833 | .gamma_set = intel_crtc_gamma_set, |
6804 | .gamma_set = intel_crtc_gamma_set, |
6834 | .set_config = drm_crtc_helper_set_config, |
6805 | .set_config = drm_crtc_helper_set_config, |
6835 | .destroy = intel_crtc_destroy, |
6806 | .destroy = intel_crtc_destroy, |
6836 | // .page_flip = intel_crtc_page_flip, |
6807 | // .page_flip = intel_crtc_page_flip, |
6837 | }; |
6808 | }; |
6838 | 6809 | ||
6839 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
6810 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
6840 | { |
6811 | { |
6841 | drm_i915_private_t *dev_priv = dev->dev_private; |
6812 | drm_i915_private_t *dev_priv = dev->dev_private; |
6842 | struct intel_crtc *intel_crtc; |
6813 | struct intel_crtc *intel_crtc; |
6843 | int i; |
6814 | int i; |
6844 | 6815 | ||
6845 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
6816 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
6846 | if (intel_crtc == NULL) |
6817 | if (intel_crtc == NULL) |
6847 | return; |
6818 | return; |
6848 | 6819 | ||
6849 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
6820 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); |
6850 | 6821 | ||
6851 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
6822 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); |
6852 | for (i = 0; i < 256; i++) { |
6823 | for (i = 0; i < 256; i++) { |
6853 | intel_crtc->lut_r[i] = i; |
6824 | intel_crtc->lut_r[i] = i; |
6854 | intel_crtc->lut_g[i] = i; |
6825 | intel_crtc->lut_g[i] = i; |
6855 | intel_crtc->lut_b[i] = i; |
6826 | intel_crtc->lut_b[i] = i; |
6856 | } |
6827 | } |
6857 | 6828 | ||
6858 | /* Swap pipes & planes for FBC on pre-965 */ |
6829 | /* Swap pipes & planes for FBC on pre-965 */ |
6859 | intel_crtc->pipe = pipe; |
6830 | intel_crtc->pipe = pipe; |
6860 | intel_crtc->plane = pipe; |
6831 | intel_crtc->plane = pipe; |
6861 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
6832 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
6862 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
6833 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
6863 | intel_crtc->plane = !pipe; |
6834 | intel_crtc->plane = !pipe; |
6864 | } |
6835 | } |
6865 | 6836 | ||
6866 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
6837 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
6867 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
6838 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); |
6868 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
6839 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; |
6869 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
6840 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; |
6870 | 6841 | ||
6871 | intel_crtc_reset(&intel_crtc->base); |
6842 | intel_crtc_reset(&intel_crtc->base); |
6872 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
6843 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
6873 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
6844 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
6874 | 6845 | ||
6875 | if (HAS_PCH_SPLIT(dev)) { |
6846 | if (HAS_PCH_SPLIT(dev)) { |
6876 | if (pipe == 2 && IS_IVYBRIDGE(dev)) |
6847 | if (pipe == 2 && IS_IVYBRIDGE(dev)) |
6877 | intel_crtc->no_pll = true; |
6848 | intel_crtc->no_pll = true; |
6878 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
6849 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
6879 | intel_helper_funcs.commit = ironlake_crtc_commit; |
6850 | intel_helper_funcs.commit = ironlake_crtc_commit; |
6880 | } else { |
6851 | } else { |
6881 | intel_helper_funcs.prepare = i9xx_crtc_prepare; |
6852 | intel_helper_funcs.prepare = i9xx_crtc_prepare; |
6882 | intel_helper_funcs.commit = i9xx_crtc_commit; |
6853 | intel_helper_funcs.commit = i9xx_crtc_commit; |
6883 | } |
6854 | } |
6884 | 6855 | ||
6885 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
6856 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
6886 | 6857 | ||
6887 | intel_crtc->busy = false; |
6858 | intel_crtc->busy = false; |
6888 | 6859 | ||
6889 | } |
6860 | } |
6890 | 6861 | ||
6891 | 6862 | ||
6892 | 6863 | ||
6893 | 6864 | ||
6894 | 6865 | ||
6895 | 6866 | ||
6896 | 6867 | ||
6897 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
6868 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
6898 | { |
6869 | { |
6899 | struct intel_encoder *encoder; |
6870 | struct intel_encoder *encoder; |
6900 | int index_mask = 0; |
6871 | int index_mask = 0; |
6901 | int entry = 0; |
6872 | int entry = 0; |
6902 | 6873 | ||
6903 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
6874 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
6904 | if (type_mask & encoder->clone_mask) |
6875 | if (type_mask & encoder->clone_mask) |
6905 | index_mask |= (1 << entry); |
6876 | index_mask |= (1 << entry); |
6906 | entry++; |
6877 | entry++; |
6907 | } |
6878 | } |
6908 | 6879 | ||
6909 | return index_mask; |
6880 | return index_mask; |
6910 | } |
6881 | } |
6911 | 6882 | ||
6912 | static bool has_edp_a(struct drm_device *dev) |
6883 | static bool has_edp_a(struct drm_device *dev) |
6913 | { |
6884 | { |
6914 | struct drm_i915_private *dev_priv = dev->dev_private; |
6885 | struct drm_i915_private *dev_priv = dev->dev_private; |
6915 | 6886 | ||
6916 | if (!IS_MOBILE(dev)) |
6887 | if (!IS_MOBILE(dev)) |
6917 | return false; |
6888 | return false; |
6918 | 6889 | ||
6919 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) |
6890 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) |
6920 | return false; |
6891 | return false; |
6921 | 6892 | ||
6922 | if (IS_GEN5(dev) && |
6893 | if (IS_GEN5(dev) && |
6923 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) |
6894 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) |
6924 | return false; |
6895 | return false; |
6925 | 6896 | ||
6926 | return true; |
6897 | return true; |
6927 | } |
6898 | } |
6928 | 6899 | ||
6929 | static void intel_setup_outputs(struct drm_device *dev) |
6900 | static void intel_setup_outputs(struct drm_device *dev) |
6930 | { |
6901 | { |
6931 | struct drm_i915_private *dev_priv = dev->dev_private; |
6902 | struct drm_i915_private *dev_priv = dev->dev_private; |
6932 | struct intel_encoder *encoder; |
6903 | struct intel_encoder *encoder; |
6933 | bool dpd_is_edp = false; |
6904 | bool dpd_is_edp = false; |
6934 | bool has_lvds = false; |
6905 | bool has_lvds = false; |
6935 | 6906 | ||
6936 | ENTER(); |
6907 | ENTER(); |
6937 | 6908 | ||
6938 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
6909 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
6939 | has_lvds = intel_lvds_init(dev); |
6910 | has_lvds = intel_lvds_init(dev); |
6940 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { |
6911 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { |
6941 | /* disable the panel fitter on everything but LVDS */ |
6912 | /* disable the panel fitter on everything but LVDS */ |
6942 | I915_WRITE(PFIT_CONTROL, 0); |
6913 | I915_WRITE(PFIT_CONTROL, 0); |
6943 | } |
6914 | } |
6944 | 6915 | ||
6945 | if (HAS_PCH_SPLIT(dev)) { |
6916 | if (HAS_PCH_SPLIT(dev)) { |
6946 | dpd_is_edp = intel_dpd_is_edp(dev); |
6917 | dpd_is_edp = intel_dpd_is_edp(dev); |
6947 | 6918 | ||
6948 | if (has_edp_a(dev)) |
6919 | if (has_edp_a(dev)) |
6949 | intel_dp_init(dev, DP_A); |
6920 | intel_dp_init(dev, DP_A); |
6950 | 6921 | ||
6951 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6922 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6952 | intel_dp_init(dev, PCH_DP_D); |
6923 | intel_dp_init(dev, PCH_DP_D); |
6953 | } |
6924 | } |
6954 | 6925 | ||
6955 | intel_crt_init(dev); |
6926 | intel_crt_init(dev); |
6956 | 6927 | ||
6957 | if (HAS_PCH_SPLIT(dev)) { |
6928 | if (HAS_PCH_SPLIT(dev)) { |
6958 | int found; |
6929 | int found; |
6959 | 6930 | ||
6960 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
6931 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
6961 | /* PCH SDVOB multiplex with HDMIB */ |
6932 | /* PCH SDVOB multiplex with HDMIB */ |
6962 | found = intel_sdvo_init(dev, PCH_SDVOB); |
6933 | found = intel_sdvo_init(dev, PCH_SDVOB); |
6963 | if (!found) |
6934 | if (!found) |
6964 | intel_hdmi_init(dev, HDMIB); |
6935 | intel_hdmi_init(dev, HDMIB); |
6965 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
6936 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
6966 | intel_dp_init(dev, PCH_DP_B); |
6937 | intel_dp_init(dev, PCH_DP_B); |
6967 | } |
6938 | } |
6968 | 6939 | ||
6969 | if (I915_READ(HDMIC) & PORT_DETECTED) |
6940 | if (I915_READ(HDMIC) & PORT_DETECTED) |
6970 | intel_hdmi_init(dev, HDMIC); |
6941 | intel_hdmi_init(dev, HDMIC); |
6971 | 6942 | ||
6972 | if (I915_READ(HDMID) & PORT_DETECTED) |
6943 | if (I915_READ(HDMID) & PORT_DETECTED) |
6973 | intel_hdmi_init(dev, HDMID); |
6944 | intel_hdmi_init(dev, HDMID); |
6974 | 6945 | ||
6975 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
6946 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
6976 | intel_dp_init(dev, PCH_DP_C); |
6947 | intel_dp_init(dev, PCH_DP_C); |
6977 | 6948 | ||
6978 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6949 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
6979 | intel_dp_init(dev, PCH_DP_D); |
6950 | intel_dp_init(dev, PCH_DP_D); |
6980 | 6951 | ||
6981 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
6952 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
6982 | bool found = false; |
6953 | bool found = false; |
6983 | 6954 | ||
6984 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
6955 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
6985 | DRM_DEBUG_KMS("probing SDVOB\n"); |
6956 | DRM_DEBUG_KMS("probing SDVOB\n"); |
6986 | found = intel_sdvo_init(dev, SDVOB); |
6957 | found = intel_sdvo_init(dev, SDVOB); |
6987 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
6958 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
6988 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
6959 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); |
6989 | intel_hdmi_init(dev, SDVOB); |
6960 | intel_hdmi_init(dev, SDVOB); |
6990 | } |
6961 | } |
6991 | 6962 | ||
6992 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
6963 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
6993 | DRM_DEBUG_KMS("probing DP_B\n"); |
6964 | DRM_DEBUG_KMS("probing DP_B\n"); |
6994 | intel_dp_init(dev, DP_B); |
6965 | intel_dp_init(dev, DP_B); |
6995 | } |
6966 | } |
6996 | } |
6967 | } |
6997 | 6968 | ||
6998 | /* Before G4X SDVOC doesn't have its own detect register */ |
6969 | /* Before G4X SDVOC doesn't have its own detect register */ |
6999 | 6970 | ||
7000 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
6971 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
7001 | DRM_DEBUG_KMS("probing SDVOC\n"); |
6972 | DRM_DEBUG_KMS("probing SDVOC\n"); |
7002 | found = intel_sdvo_init(dev, SDVOC); |
6973 | found = intel_sdvo_init(dev, SDVOC); |
7003 | } |
6974 | } |
7004 | 6975 | ||
7005 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
6976 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
7006 | 6977 | ||
7007 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
6978 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
7008 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
6979 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); |
7009 | intel_hdmi_init(dev, SDVOC); |
6980 | intel_hdmi_init(dev, SDVOC); |
7010 | } |
6981 | } |
7011 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
6982 | if (SUPPORTS_INTEGRATED_DP(dev)) { |
7012 | DRM_DEBUG_KMS("probing DP_C\n"); |
6983 | DRM_DEBUG_KMS("probing DP_C\n"); |
7013 | intel_dp_init(dev, DP_C); |
6984 | intel_dp_init(dev, DP_C); |
7014 | } |
6985 | } |
7015 | } |
6986 | } |
7016 | 6987 | ||
7017 | if (SUPPORTS_INTEGRATED_DP(dev) && |
6988 | if (SUPPORTS_INTEGRATED_DP(dev) && |
7018 | (I915_READ(DP_D) & DP_DETECTED)) { |
6989 | (I915_READ(DP_D) & DP_DETECTED)) { |
7019 | DRM_DEBUG_KMS("probing DP_D\n"); |
6990 | DRM_DEBUG_KMS("probing DP_D\n"); |
7020 | intel_dp_init(dev, DP_D); |
6991 | intel_dp_init(dev, DP_D); |
7021 | } |
6992 | } |
7022 | } else if (IS_GEN2(dev)) |
6993 | } else if (IS_GEN2(dev)) |
7023 | intel_dvo_init(dev); |
6994 | intel_dvo_init(dev); |
7024 | 6995 | ||
7025 | // if (SUPPORTS_TV(dev)) |
6996 | // if (SUPPORTS_TV(dev)) |
7026 | // intel_tv_init(dev); |
6997 | // intel_tv_init(dev); |
7027 | 6998 | ||
7028 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
6999 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
7029 | encoder->base.possible_crtcs = encoder->crtc_mask; |
7000 | encoder->base.possible_crtcs = encoder->crtc_mask; |
7030 | encoder->base.possible_clones = |
7001 | encoder->base.possible_clones = |
7031 | intel_encoder_clones(dev, encoder->clone_mask); |
7002 | intel_encoder_clones(dev, encoder->clone_mask); |
7032 | } |
7003 | } |
7033 | 7004 | ||
7034 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7005 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7035 | // drm_helper_disable_unused_functions(dev); |
7006 | // drm_helper_disable_unused_functions(dev); |
7036 | 7007 | ||
7037 | if (HAS_PCH_SPLIT(dev)) |
7008 | if (HAS_PCH_SPLIT(dev)) |
7038 | ironlake_init_pch_refclk(dev); |
7009 | ironlake_init_pch_refclk(dev); |
7039 | 7010 | ||
7040 | LEAVE(); |
7011 | LEAVE(); |
7041 | } |
7012 | } |
7042 | 7013 | ||
7043 | 7014 | ||
7044 | 7015 | ||
7045 | - | ||
7046 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
- | |
7047 | .fb_create = NULL /*intel_user_framebuffer_create*/, |
- | |
7048 | .output_poll_changed = NULL /*intel_fb_output_poll_changed*/, |
- | |
7049 | }; |
7016 | |
7050 | 7017 | ||
7051 | 7018 | ||
7052 | 7019 | ||
7053 | 7020 | ||
7054 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
7021 | static const struct drm_framebuffer_funcs intel_fb_funcs = { |
7055 | // .destroy = intel_user_framebuffer_destroy, |
7022 | // .destroy = intel_user_framebuffer_destroy, |
7056 | // .create_handle = intel_user_framebuffer_create_handle, |
7023 | // .create_handle = intel_user_framebuffer_create_handle, |
7057 | }; |
7024 | }; |
7058 | 7025 | ||
7059 | int intel_framebuffer_init(struct drm_device *dev, |
7026 | int intel_framebuffer_init(struct drm_device *dev, |
7060 | struct intel_framebuffer *intel_fb, |
7027 | struct intel_framebuffer *intel_fb, |
7061 | struct drm_mode_fb_cmd2 *mode_cmd, |
7028 | struct drm_mode_fb_cmd2 *mode_cmd, |
7062 | struct drm_i915_gem_object *obj) |
7029 | struct drm_i915_gem_object *obj) |
7063 | { |
7030 | { |
7064 | int ret; |
7031 | int ret; |
7065 | 7032 | ||
7066 | if (obj->tiling_mode == I915_TILING_Y) |
7033 | if (obj->tiling_mode == I915_TILING_Y) |
7067 | return -EINVAL; |
7034 | return -EINVAL; |
7068 | 7035 | ||
7069 | if (mode_cmd->pitches[0] & 63) |
7036 | if (mode_cmd->pitches[0] & 63) |
7070 | return -EINVAL; |
7037 | return -EINVAL; |
7071 | 7038 | ||
7072 | switch (mode_cmd->pixel_format) { |
7039 | switch (mode_cmd->pixel_format) { |
7073 | case DRM_FORMAT_RGB332: |
7040 | case DRM_FORMAT_RGB332: |
7074 | case DRM_FORMAT_RGB565: |
7041 | case DRM_FORMAT_RGB565: |
7075 | case DRM_FORMAT_XRGB8888: |
7042 | case DRM_FORMAT_XRGB8888: |
7076 | case DRM_FORMAT_ARGB8888: |
7043 | case DRM_FORMAT_ARGB8888: |
7077 | case DRM_FORMAT_XRGB2101010: |
7044 | case DRM_FORMAT_XRGB2101010: |
7078 | case DRM_FORMAT_ARGB2101010: |
7045 | case DRM_FORMAT_ARGB2101010: |
7079 | /* RGB formats are common across chipsets */ |
7046 | /* RGB formats are common across chipsets */ |
7080 | break; |
7047 | break; |
7081 | case DRM_FORMAT_YUYV: |
7048 | case DRM_FORMAT_YUYV: |
7082 | case DRM_FORMAT_UYVY: |
7049 | case DRM_FORMAT_UYVY: |
7083 | case DRM_FORMAT_YVYU: |
7050 | case DRM_FORMAT_YVYU: |
7084 | case DRM_FORMAT_VYUY: |
7051 | case DRM_FORMAT_VYUY: |
7085 | break; |
7052 | break; |
7086 | default: |
7053 | default: |
7087 | DRM_ERROR("unsupported pixel format\n"); |
7054 | DRM_ERROR("unsupported pixel format\n"); |
7088 | return -EINVAL; |
7055 | return -EINVAL; |
7089 | } |
7056 | } |
7090 | 7057 | ||
7091 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
7058 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
7092 | if (ret) { |
7059 | if (ret) { |
7093 | DRM_ERROR("framebuffer init failed %d\n", ret); |
7060 | DRM_ERROR("framebuffer init failed %d\n", ret); |
7094 | return ret; |
7061 | return ret; |
7095 | } |
7062 | } |
7096 | 7063 | ||
7097 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
7064 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
7098 | intel_fb->obj = obj; |
7065 | intel_fb->obj = obj; |
7099 | return 0; |
7066 | return 0; |
7100 | } |
7067 | } |
7101 | 7068 | ||
- | 7069 | ||
- | 7070 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
|
- | 7071 | .fb_create = NULL /*intel_user_framebuffer_create*/, |
|
7102 | 7072 | .output_poll_changed = NULL /*intel_fb_output_poll_changed*/, |
|
7103 | 7073 | }; |
|
7104 | 7074 | ||
7105 | 7075 | ||
7106 | 7076 | ||
7107 | 7077 | ||
7108 | 7078 | ||
7109 | 7079 | ||
7110 | 7080 | ||
7111 | 7081 | ||
7112 | 7082 | ||
7113 | bool ironlake_set_drps(struct drm_device *dev, u8 val) |
7083 | bool ironlake_set_drps(struct drm_device *dev, u8 val) |
7114 | { |
7084 | { |
7115 | struct drm_i915_private *dev_priv = dev->dev_private; |
7085 | struct drm_i915_private *dev_priv = dev->dev_private; |
7116 | u16 rgvswctl; |
7086 | u16 rgvswctl; |
7117 | 7087 | ||
7118 | rgvswctl = I915_READ16(MEMSWCTL); |
7088 | rgvswctl = I915_READ16(MEMSWCTL); |
7119 | if (rgvswctl & MEMCTL_CMD_STS) { |
7089 | if (rgvswctl & MEMCTL_CMD_STS) { |
7120 | DRM_DEBUG("gpu busy, RCS change rejected\n"); |
7090 | DRM_DEBUG("gpu busy, RCS change rejected\n"); |
7121 | return false; /* still busy with another command */ |
7091 | return false; /* still busy with another command */ |
7122 | } |
7092 | } |
7123 | 7093 | ||
7124 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | |
7094 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | |
7125 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; |
7095 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; |
7126 | I915_WRITE16(MEMSWCTL, rgvswctl); |
7096 | I915_WRITE16(MEMSWCTL, rgvswctl); |
7127 | POSTING_READ16(MEMSWCTL); |
7097 | POSTING_READ16(MEMSWCTL); |
7128 | 7098 | ||
7129 | rgvswctl |= MEMCTL_CMD_STS; |
7099 | rgvswctl |= MEMCTL_CMD_STS; |
7130 | I915_WRITE16(MEMSWCTL, rgvswctl); |
7100 | I915_WRITE16(MEMSWCTL, rgvswctl); |
7131 | 7101 | ||
7132 | return true; |
7102 | return true; |
7133 | } |
7103 | } |
7134 | 7104 | ||
7135 | void ironlake_enable_drps(struct drm_device *dev) |
7105 | void ironlake_enable_drps(struct drm_device *dev) |
7136 | { |
7106 | { |
7137 | struct drm_i915_private *dev_priv = dev->dev_private; |
7107 | struct drm_i915_private *dev_priv = dev->dev_private; |
7138 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
7108 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
7139 | u8 fmax, fmin, fstart, vstart; |
7109 | u8 fmax, fmin, fstart, vstart; |
7140 | 7110 | ||
7141 | /* Enable temp reporting */ |
7111 | /* Enable temp reporting */ |
7142 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); |
7112 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); |
7143 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); |
7113 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); |
7144 | 7114 | ||
7145 | /* 100ms RC evaluation intervals */ |
7115 | /* 100ms RC evaluation intervals */ |
7146 | I915_WRITE(RCUPEI, 100000); |
7116 | I915_WRITE(RCUPEI, 100000); |
7147 | I915_WRITE(RCDNEI, 100000); |
7117 | I915_WRITE(RCDNEI, 100000); |
7148 | 7118 | ||
7149 | /* Set max/min thresholds to 90ms and 80ms respectively */ |
7119 | /* Set max/min thresholds to 90ms and 80ms respectively */ |
7150 | I915_WRITE(RCBMAXAVG, 90000); |
7120 | I915_WRITE(RCBMAXAVG, 90000); |
7151 | I915_WRITE(RCBMINAVG, 80000); |
7121 | I915_WRITE(RCBMINAVG, 80000); |
7152 | 7122 | ||
7153 | I915_WRITE(MEMIHYST, 1); |
7123 | I915_WRITE(MEMIHYST, 1); |
7154 | 7124 | ||
7155 | /* Set up min, max, and cur for interrupt handling */ |
7125 | /* Set up min, max, and cur for interrupt handling */ |
7156 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; |
7126 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; |
7157 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
7127 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
7158 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
7128 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
7159 | MEMMODE_FSTART_SHIFT; |
7129 | MEMMODE_FSTART_SHIFT; |
7160 | 7130 | ||
7161 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
7131 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
7162 | PXVFREQ_PX_SHIFT; |
7132 | PXVFREQ_PX_SHIFT; |
7163 | 7133 | ||
7164 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
7134 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
7165 | dev_priv->fstart = fstart; |
7135 | dev_priv->fstart = fstart; |
7166 | 7136 | ||
7167 | dev_priv->max_delay = fstart; |
7137 | dev_priv->max_delay = fstart; |
7168 | dev_priv->min_delay = fmin; |
7138 | dev_priv->min_delay = fmin; |
7169 | dev_priv->cur_delay = fstart; |
7139 | dev_priv->cur_delay = fstart; |
7170 | 7140 | ||
7171 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
7141 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
7172 | fmax, fmin, fstart); |
7142 | fmax, fmin, fstart); |
7173 | 7143 | ||
7174 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
7144 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
7175 | 7145 | ||
7176 | /* |
7146 | /* |
7177 | * Interrupts will be enabled in ironlake_irq_postinstall |
7147 | * Interrupts will be enabled in ironlake_irq_postinstall |
7178 | */ |
7148 | */ |
7179 | 7149 | ||
7180 | I915_WRITE(VIDSTART, vstart); |
7150 | I915_WRITE(VIDSTART, vstart); |
7181 | POSTING_READ(VIDSTART); |
7151 | POSTING_READ(VIDSTART); |
7182 | 7152 | ||
7183 | rgvmodectl |= MEMMODE_SWMODE_EN; |
7153 | rgvmodectl |= MEMMODE_SWMODE_EN; |
7184 | I915_WRITE(MEMMODECTL, rgvmodectl); |
7154 | I915_WRITE(MEMMODECTL, rgvmodectl); |
7185 | 7155 | ||
7186 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
7156 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
7187 | DRM_ERROR("stuck trying to change perf mode\n"); |
7157 | DRM_ERROR("stuck trying to change perf mode\n"); |
7188 | msleep(1); |
7158 | msleep(1); |
7189 | 7159 | ||
7190 | ironlake_set_drps(dev, fstart); |
7160 | ironlake_set_drps(dev, fstart); |
7191 | 7161 | ||
7192 | dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + |
7162 | dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + |
7193 | I915_READ(0x112e0); |
7163 | I915_READ(0x112e0); |
7194 | // dev_priv->last_time1 = jiffies_to_msecs(jiffies); |
7164 | // dev_priv->last_time1 = jiffies_to_msecs(jiffies); |
7195 | dev_priv->last_count2 = I915_READ(0x112f4); |
7165 | dev_priv->last_count2 = I915_READ(0x112f4); |
7196 | // getrawmonotonic(&dev_priv->last_time2); |
7166 | // getrawmonotonic(&dev_priv->last_time2); |
7197 | } |
7167 | } |
7198 | 7168 | ||
7199 | 7169 | ||
7200 | 7170 | ||
7201 | 7171 | ||
7202 | 7172 | ||
7203 | 7173 | ||
7204 | 7174 | ||
7205 | 7175 | ||
7206 | 7176 | ||
7207 | 7177 | ||
7208 | 7178 | ||
7209 | 7179 | ||
7210 | static unsigned long intel_pxfreq(u32 vidfreq) |
7180 | static unsigned long intel_pxfreq(u32 vidfreq) |
7211 | { |
7181 | { |
7212 | unsigned long freq; |
7182 | unsigned long freq; |
7213 | int div = (vidfreq & 0x3f0000) >> 16; |
7183 | int div = (vidfreq & 0x3f0000) >> 16; |
7214 | int post = (vidfreq & 0x3000) >> 12; |
7184 | int post = (vidfreq & 0x3000) >> 12; |
7215 | int pre = (vidfreq & 0x7); |
7185 | int pre = (vidfreq & 0x7); |
7216 | 7186 | ||
7217 | if (!pre) |
7187 | if (!pre) |
7218 | return 0; |
7188 | return 0; |
7219 | 7189 | ||
7220 | freq = ((div * 133333) / ((1< |
7190 | freq = ((div * 133333) / ((1< |
7221 | 7191 | ||
7222 | return freq; |
7192 | return freq; |
7223 | } |
7193 | } |
7224 | 7194 | ||
7225 | void intel_init_emon(struct drm_device *dev) |
7195 | void intel_init_emon(struct drm_device *dev) |
7226 | { |
7196 | { |
7227 | struct drm_i915_private *dev_priv = dev->dev_private; |
7197 | struct drm_i915_private *dev_priv = dev->dev_private; |
7228 | u32 lcfuse; |
7198 | u32 lcfuse; |
7229 | u8 pxw[16]; |
7199 | u8 pxw[16]; |
7230 | int i; |
7200 | int i; |
7231 | 7201 | ||
7232 | /* Disable to program */ |
7202 | /* Disable to program */ |
7233 | I915_WRITE(ECR, 0); |
7203 | I915_WRITE(ECR, 0); |
7234 | POSTING_READ(ECR); |
7204 | POSTING_READ(ECR); |
7235 | 7205 | ||
7236 | /* Program energy weights for various events */ |
7206 | /* Program energy weights for various events */ |
7237 | I915_WRITE(SDEW, 0x15040d00); |
7207 | I915_WRITE(SDEW, 0x15040d00); |
7238 | I915_WRITE(CSIEW0, 0x007f0000); |
7208 | I915_WRITE(CSIEW0, 0x007f0000); |
7239 | I915_WRITE(CSIEW1, 0x1e220004); |
7209 | I915_WRITE(CSIEW1, 0x1e220004); |
7240 | I915_WRITE(CSIEW2, 0x04000004); |
7210 | I915_WRITE(CSIEW2, 0x04000004); |
7241 | 7211 | ||
7242 | for (i = 0; i < 5; i++) |
7212 | for (i = 0; i < 5; i++) |
7243 | I915_WRITE(PEW + (i * 4), 0); |
7213 | I915_WRITE(PEW + (i * 4), 0); |
7244 | for (i = 0; i < 3; i++) |
7214 | for (i = 0; i < 3; i++) |
7245 | I915_WRITE(DEW + (i * 4), 0); |
7215 | I915_WRITE(DEW + (i * 4), 0); |
7246 | 7216 | ||
7247 | /* Program P-state weights to account for frequency power adjustment */ |
7217 | /* Program P-state weights to account for frequency power adjustment */ |
7248 | for (i = 0; i < 16; i++) { |
7218 | for (i = 0; i < 16; i++) { |
7249 | u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); |
7219 | u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); |
7250 | unsigned long freq = intel_pxfreq(pxvidfreq); |
7220 | unsigned long freq = intel_pxfreq(pxvidfreq); |
7251 | unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> |
7221 | unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> |
7252 | PXVFREQ_PX_SHIFT; |
7222 | PXVFREQ_PX_SHIFT; |
7253 | unsigned long val; |
7223 | unsigned long val; |
7254 | 7224 | ||
7255 | val = vid * vid; |
7225 | val = vid * vid; |
7256 | val *= (freq / 1000); |
7226 | val *= (freq / 1000); |
7257 | val *= 255; |
7227 | val *= 255; |
7258 | val /= (127*127*900); |
7228 | val /= (127*127*900); |
7259 | if (val > 0xff) |
7229 | if (val > 0xff) |
7260 | DRM_ERROR("bad pxval: %ld\n", val); |
7230 | DRM_ERROR("bad pxval: %ld\n", val); |
7261 | pxw[i] = val; |
7231 | pxw[i] = val; |
7262 | } |
7232 | } |
7263 | /* Render standby states get 0 weight */ |
7233 | /* Render standby states get 0 weight */ |
7264 | pxw[14] = 0; |
7234 | pxw[14] = 0; |
7265 | pxw[15] = 0; |
7235 | pxw[15] = 0; |
7266 | 7236 | ||
7267 | for (i = 0; i < 4; i++) { |
7237 | for (i = 0; i < 4; i++) { |
7268 | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | |
7238 | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | |
7269 | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); |
7239 | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); |
7270 | I915_WRITE(PXW + (i * 4), val); |
7240 | I915_WRITE(PXW + (i * 4), val); |
7271 | } |
7241 | } |
7272 | 7242 | ||
7273 | /* Adjust magic regs to magic values (more experimental results) */ |
7243 | /* Adjust magic regs to magic values (more experimental results) */ |
7274 | I915_WRITE(OGW0, 0); |
7244 | I915_WRITE(OGW0, 0); |
7275 | I915_WRITE(OGW1, 0); |
7245 | I915_WRITE(OGW1, 0); |
7276 | I915_WRITE(EG0, 0x00007f00); |
7246 | I915_WRITE(EG0, 0x00007f00); |
7277 | I915_WRITE(EG1, 0x0000000e); |
7247 | I915_WRITE(EG1, 0x0000000e); |
7278 | I915_WRITE(EG2, 0x000e0000); |
7248 | I915_WRITE(EG2, 0x000e0000); |
7279 | I915_WRITE(EG3, 0x68000300); |
7249 | I915_WRITE(EG3, 0x68000300); |
7280 | I915_WRITE(EG4, 0x42000000); |
7250 | I915_WRITE(EG4, 0x42000000); |
7281 | I915_WRITE(EG5, 0x00140031); |
7251 | I915_WRITE(EG5, 0x00140031); |
7282 | I915_WRITE(EG6, 0); |
7252 | I915_WRITE(EG6, 0); |
7283 | I915_WRITE(EG7, 0); |
7253 | I915_WRITE(EG7, 0); |
7284 | 7254 | ||
7285 | for (i = 0; i < 8; i++) |
7255 | for (i = 0; i < 8; i++) |
7286 | I915_WRITE(PXWL + (i * 4), 0); |
7256 | I915_WRITE(PXWL + (i * 4), 0); |
7287 | 7257 | ||
7288 | /* Enable PMON + select events */ |
7258 | /* Enable PMON + select events */ |
7289 | I915_WRITE(ECR, 0x80000019); |
7259 | I915_WRITE(ECR, 0x80000019); |
7290 | 7260 | ||
7291 | lcfuse = I915_READ(LCFUSE02); |
7261 | lcfuse = I915_READ(LCFUSE02); |
7292 | 7262 | ||
7293 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
7263 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); |
7294 | } |
7264 | } |
7295 | 7265 | ||
7296 | static bool intel_enable_rc6(struct drm_device *dev) |
7266 | static bool intel_enable_rc6(struct drm_device *dev) |
7297 | { |
7267 | { |
7298 | /* |
7268 | /* |
7299 | * Respect the kernel parameter if it is set |
7269 | * Respect the kernel parameter if it is set |
7300 | */ |
7270 | */ |
7301 | if (i915_enable_rc6 >= 0) |
7271 | if (i915_enable_rc6 >= 0) |
7302 | return i915_enable_rc6; |
7272 | return i915_enable_rc6; |
7303 | 7273 | ||
7304 | /* |
7274 | /* |
7305 | * Disable RC6 on Ironlake |
7275 | * Disable RC6 on Ironlake |
7306 | */ |
7276 | */ |
7307 | if (INTEL_INFO(dev)->gen == 5) |
7277 | if (INTEL_INFO(dev)->gen == 5) |
7308 | return 0; |
7278 | return 0; |
7309 | 7279 | ||
7310 | /* |
7280 | /* |
7311 | * Disable rc6 on Sandybridge |
7281 | * Disable rc6 on Sandybridge |
7312 | */ |
7282 | */ |
7313 | if (INTEL_INFO(dev)->gen == 6) { |
7283 | if (INTEL_INFO(dev)->gen == 6) { |
7314 | DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); |
7284 | DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n"); |
7315 | return 0; |
7285 | return 0; |
7316 | } |
7286 | } |
7317 | DRM_DEBUG_DRIVER("RC6 enabled\n"); |
7287 | DRM_DEBUG_DRIVER("RC6 enabled\n"); |
7318 | return 1; |
7288 | return 1; |
7319 | } |
7289 | } |
7320 | 7290 | ||
7321 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
7291 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
7322 | { |
7292 | { |
7323 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
7293 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
7324 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
7294 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
7325 | u32 pcu_mbox, rc6_mask = 0; |
7295 | u32 pcu_mbox, rc6_mask = 0; |
7326 | int cur_freq, min_freq, max_freq; |
7296 | int cur_freq, min_freq, max_freq; |
7327 | int i; |
7297 | int i; |
7328 | 7298 | ||
7329 | /* Here begins a magic sequence of register writes to enable |
7299 | /* Here begins a magic sequence of register writes to enable |
7330 | * auto-downclocking. |
7300 | * auto-downclocking. |
7331 | * |
7301 | * |
7332 | * Perhaps there might be some value in exposing these to |
7302 | * Perhaps there might be some value in exposing these to |
7333 | * userspace... |
7303 | * userspace... |
7334 | */ |
7304 | */ |
7335 | I915_WRITE(GEN6_RC_STATE, 0); |
7305 | I915_WRITE(GEN6_RC_STATE, 0); |
7336 | mutex_lock(&dev_priv->dev->struct_mutex); |
7306 | mutex_lock(&dev_priv->dev->struct_mutex); |
7337 | gen6_gt_force_wake_get(dev_priv); |
7307 | gen6_gt_force_wake_get(dev_priv); |
7338 | 7308 | ||
7339 | /* disable the counters and set deterministic thresholds */ |
7309 | /* disable the counters and set deterministic thresholds */ |
7340 | I915_WRITE(GEN6_RC_CONTROL, 0); |
7310 | I915_WRITE(GEN6_RC_CONTROL, 0); |
7341 | 7311 | ||
7342 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); |
7312 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); |
7343 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); |
7313 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); |
7344 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); |
7314 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); |
7345 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); |
7315 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); |
7346 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); |
7316 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); |
7347 | 7317 | ||
7348 | for (i = 0; i < I915_NUM_RINGS; i++) |
7318 | for (i = 0; i < I915_NUM_RINGS; i++) |
7349 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); |
7319 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); |
7350 | 7320 | ||
7351 | I915_WRITE(GEN6_RC_SLEEP, 0); |
7321 | I915_WRITE(GEN6_RC_SLEEP, 0); |
7352 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); |
7322 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); |
7353 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); |
7323 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); |
7354 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); |
7324 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); |
7355 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ |
7325 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ |
7356 | 7326 | ||
7357 | if (intel_enable_rc6(dev_priv->dev)) |
7327 | if (intel_enable_rc6(dev_priv->dev)) |
7358 | rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | |
7328 | rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | |
7359 | GEN6_RC_CTL_RC6_ENABLE; |
7329 | GEN6_RC_CTL_RC6_ENABLE; |
7360 | 7330 | ||
7361 | I915_WRITE(GEN6_RC_CONTROL, |
7331 | I915_WRITE(GEN6_RC_CONTROL, |
7362 | rc6_mask | |
7332 | rc6_mask | |
7363 | GEN6_RC_CTL_EI_MODE(1) | |
7333 | GEN6_RC_CTL_EI_MODE(1) | |
7364 | GEN6_RC_CTL_HW_ENABLE); |
7334 | GEN6_RC_CTL_HW_ENABLE); |
7365 | 7335 | ||
7366 | I915_WRITE(GEN6_RPNSWREQ, |
7336 | I915_WRITE(GEN6_RPNSWREQ, |
7367 | GEN6_FREQUENCY(10) | |
7337 | GEN6_FREQUENCY(10) | |
7368 | GEN6_OFFSET(0) | |
7338 | GEN6_OFFSET(0) | |
7369 | GEN6_AGGRESSIVE_TURBO); |
7339 | GEN6_AGGRESSIVE_TURBO); |
7370 | I915_WRITE(GEN6_RC_VIDEO_FREQ, |
7340 | I915_WRITE(GEN6_RC_VIDEO_FREQ, |
7371 | GEN6_FREQUENCY(12)); |
7341 | GEN6_FREQUENCY(12)); |
7372 | 7342 | ||
7373 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); |
7343 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); |
7374 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
7344 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
7375 | 18 << 24 | |
7345 | 18 << 24 | |
7376 | 6 << 16); |
7346 | 6 << 16); |
7377 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); |
7347 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); |
7378 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); |
7348 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); |
7379 | I915_WRITE(GEN6_RP_UP_EI, 100000); |
7349 | I915_WRITE(GEN6_RP_UP_EI, 100000); |
7380 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); |
7350 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); |
7381 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
7351 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
7382 | I915_WRITE(GEN6_RP_CONTROL, |
7352 | I915_WRITE(GEN6_RP_CONTROL, |
7383 | GEN6_RP_MEDIA_TURBO | |
7353 | GEN6_RP_MEDIA_TURBO | |
7384 | GEN6_RP_MEDIA_HW_MODE | |
7354 | GEN6_RP_MEDIA_HW_MODE | |
7385 | GEN6_RP_MEDIA_IS_GFX | |
7355 | GEN6_RP_MEDIA_IS_GFX | |
7386 | GEN6_RP_ENABLE | |
7356 | GEN6_RP_ENABLE | |
7387 | GEN6_RP_UP_BUSY_AVG | |
7357 | GEN6_RP_UP_BUSY_AVG | |
7388 | GEN6_RP_DOWN_IDLE_CONT); |
7358 | GEN6_RP_DOWN_IDLE_CONT); |
7389 | 7359 | ||
7390 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7360 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7391 | 500)) |
7361 | 500)) |
7392 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
7362 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
7393 | 7363 | ||
7394 | I915_WRITE(GEN6_PCODE_DATA, 0); |
7364 | I915_WRITE(GEN6_PCODE_DATA, 0); |
7395 | I915_WRITE(GEN6_PCODE_MAILBOX, |
7365 | I915_WRITE(GEN6_PCODE_MAILBOX, |
7396 | GEN6_PCODE_READY | |
7366 | GEN6_PCODE_READY | |
7397 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
7367 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
7398 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7368 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7399 | 500)) |
7369 | 500)) |
7400 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
7370 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
7401 | 7371 | ||
7402 | min_freq = (rp_state_cap & 0xff0000) >> 16; |
7372 | min_freq = (rp_state_cap & 0xff0000) >> 16; |
7403 | max_freq = rp_state_cap & 0xff; |
7373 | max_freq = rp_state_cap & 0xff; |
7404 | cur_freq = (gt_perf_status & 0xff00) >> 8; |
7374 | cur_freq = (gt_perf_status & 0xff00) >> 8; |
7405 | 7375 | ||
7406 | /* Check for overclock support */ |
7376 | /* Check for overclock support */ |
7407 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7377 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7408 | 500)) |
7378 | 500)) |
7409 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
7379 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); |
7410 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); |
7380 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); |
7411 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); |
7381 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); |
7412 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7382 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, |
7413 | 500)) |
7383 | 500)) |
7414 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
7384 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
7415 | if (pcu_mbox & (1<<31)) { /* OC supported */ |
7385 | if (pcu_mbox & (1<<31)) { /* OC supported */ |
7416 | max_freq = pcu_mbox & 0xff; |
7386 | max_freq = pcu_mbox & 0xff; |
7417 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
7387 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
7418 | } |
7388 | } |
7419 | 7389 | ||
7420 | /* In units of 100MHz */ |
7390 | /* In units of 100MHz */ |
7421 | dev_priv->max_delay = max_freq; |
7391 | dev_priv->max_delay = max_freq; |
7422 | dev_priv->min_delay = min_freq; |
7392 | dev_priv->min_delay = min_freq; |
7423 | dev_priv->cur_delay = cur_freq; |
7393 | dev_priv->cur_delay = cur_freq; |
7424 | 7394 | ||
7425 | /* requires MSI enabled */ |
7395 | /* requires MSI enabled */ |
7426 | I915_WRITE(GEN6_PMIER, |
7396 | I915_WRITE(GEN6_PMIER, |
7427 | GEN6_PM_MBOX_EVENT | |
7397 | GEN6_PM_MBOX_EVENT | |
7428 | GEN6_PM_THERMAL_EVENT | |
7398 | GEN6_PM_THERMAL_EVENT | |
7429 | GEN6_PM_RP_DOWN_TIMEOUT | |
7399 | GEN6_PM_RP_DOWN_TIMEOUT | |
7430 | GEN6_PM_RP_UP_THRESHOLD | |
7400 | GEN6_PM_RP_UP_THRESHOLD | |
7431 | GEN6_PM_RP_DOWN_THRESHOLD | |
7401 | GEN6_PM_RP_DOWN_THRESHOLD | |
7432 | GEN6_PM_RP_UP_EI_EXPIRED | |
7402 | GEN6_PM_RP_UP_EI_EXPIRED | |
7433 | GEN6_PM_RP_DOWN_EI_EXPIRED); |
7403 | GEN6_PM_RP_DOWN_EI_EXPIRED); |
7434 | // spin_lock_irq(&dev_priv->rps_lock); |
7404 | // spin_lock_irq(&dev_priv->rps_lock); |
7435 | // WARN_ON(dev_priv->pm_iir != 0); |
7405 | // WARN_ON(dev_priv->pm_iir != 0); |
7436 | I915_WRITE(GEN6_PMIMR, 0); |
7406 | I915_WRITE(GEN6_PMIMR, 0); |
7437 | // spin_unlock_irq(&dev_priv->rps_lock); |
7407 | // spin_unlock_irq(&dev_priv->rps_lock); |
7438 | /* enable all PM interrupts */ |
7408 | /* enable all PM interrupts */ |
7439 | I915_WRITE(GEN6_PMINTRMSK, 0); |
7409 | I915_WRITE(GEN6_PMINTRMSK, 0); |
7440 | 7410 | ||
7441 | gen6_gt_force_wake_put(dev_priv); |
7411 | gen6_gt_force_wake_put(dev_priv); |
7442 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7412 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7443 | } |
7413 | } |
7444 | 7414 | ||
7445 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) |
7415 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) |
7446 | { |
7416 | { |
7447 | int min_freq = 15; |
7417 | int min_freq = 15; |
7448 | int gpu_freq, ia_freq, max_ia_freq; |
7418 | int gpu_freq, ia_freq, max_ia_freq; |
7449 | int scaling_factor = 180; |
7419 | int scaling_factor = 180; |
7450 | 7420 | ||
7451 | // max_ia_freq = cpufreq_quick_get_max(0); |
7421 | // max_ia_freq = cpufreq_quick_get_max(0); |
7452 | /* |
7422 | /* |
7453 | * Default to measured freq if none found, PCU will ensure we don't go |
7423 | * Default to measured freq if none found, PCU will ensure we don't go |
7454 | * over |
7424 | * over |
7455 | */ |
7425 | */ |
7456 | // if (!max_ia_freq) |
7426 | // if (!max_ia_freq) |
7457 | max_ia_freq = 3000000; //tsc_khz; |
7427 | max_ia_freq = 3000000; //tsc_khz; |
7458 | 7428 | ||
7459 | /* Convert from kHz to MHz */ |
7429 | /* Convert from kHz to MHz */ |
7460 | max_ia_freq /= 1000; |
7430 | max_ia_freq /= 1000; |
7461 | 7431 | ||
7462 | mutex_lock(&dev_priv->dev->struct_mutex); |
7432 | mutex_lock(&dev_priv->dev->struct_mutex); |
7463 | 7433 | ||
7464 | /* |
7434 | /* |
7465 | * For each potential GPU frequency, load a ring frequency we'd like |
7435 | * For each potential GPU frequency, load a ring frequency we'd like |
7466 | * to use for memory access. We do this by specifying the IA frequency |
7436 | * to use for memory access. We do this by specifying the IA frequency |
7467 | * the PCU should use as a reference to determine the ring frequency. |
7437 | * the PCU should use as a reference to determine the ring frequency. |
7468 | */ |
7438 | */ |
7469 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; |
7439 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; |
7470 | gpu_freq--) { |
7440 | gpu_freq--) { |
7471 | int diff = dev_priv->max_delay - gpu_freq; |
7441 | int diff = dev_priv->max_delay - gpu_freq; |
7472 | 7442 | ||
7473 | /* |
7443 | /* |
7474 | * For GPU frequencies less than 750MHz, just use the lowest |
7444 | * For GPU frequencies less than 750MHz, just use the lowest |
7475 | * ring freq. |
7445 | * ring freq. |
7476 | */ |
7446 | */ |
7477 | if (gpu_freq < min_freq) |
7447 | if (gpu_freq < min_freq) |
7478 | ia_freq = 800; |
7448 | ia_freq = 800; |
7479 | else |
7449 | else |
7480 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); |
7450 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); |
7481 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); |
7451 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); |
7482 | 7452 | ||
7483 | I915_WRITE(GEN6_PCODE_DATA, |
7453 | I915_WRITE(GEN6_PCODE_DATA, |
7484 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | |
7454 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | |
7485 | gpu_freq); |
7455 | gpu_freq); |
7486 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | |
7456 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | |
7487 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
7457 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); |
7488 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & |
7458 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & |
7489 | GEN6_PCODE_READY) == 0, 10)) { |
7459 | GEN6_PCODE_READY) == 0, 10)) { |
7490 | DRM_ERROR("pcode write of freq table timed out\n"); |
7460 | DRM_ERROR("pcode write of freq table timed out\n"); |
7491 | continue; |
7461 | continue; |
7492 | } |
7462 | } |
7493 | } |
7463 | } |
7494 | 7464 | ||
7495 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7465 | mutex_unlock(&dev_priv->dev->struct_mutex); |
7496 | } |
7466 | } |
7497 | 7467 | ||
7498 | static void ironlake_init_clock_gating(struct drm_device *dev) |
7468 | static void ironlake_init_clock_gating(struct drm_device *dev) |
7499 | { |
7469 | { |
7500 | struct drm_i915_private *dev_priv = dev->dev_private; |
7470 | struct drm_i915_private *dev_priv = dev->dev_private; |
7501 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7471 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7502 | 7472 | ||
7503 | /* Required for FBC */ |
7473 | /* Required for FBC */ |
7504 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | |
7474 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | |
7505 | DPFCRUNIT_CLOCK_GATE_DISABLE | |
7475 | DPFCRUNIT_CLOCK_GATE_DISABLE | |
7506 | DPFDUNIT_CLOCK_GATE_DISABLE; |
7476 | DPFDUNIT_CLOCK_GATE_DISABLE; |
7507 | /* Required for CxSR */ |
7477 | /* Required for CxSR */ |
7508 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; |
7478 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; |
7509 | 7479 | ||
7510 | I915_WRITE(PCH_3DCGDIS0, |
7480 | I915_WRITE(PCH_3DCGDIS0, |
7511 | MARIUNIT_CLOCK_GATE_DISABLE | |
7481 | MARIUNIT_CLOCK_GATE_DISABLE | |
7512 | SVSMUNIT_CLOCK_GATE_DISABLE); |
7482 | SVSMUNIT_CLOCK_GATE_DISABLE); |
7513 | I915_WRITE(PCH_3DCGDIS1, |
7483 | I915_WRITE(PCH_3DCGDIS1, |
7514 | VFMUNIT_CLOCK_GATE_DISABLE); |
7484 | VFMUNIT_CLOCK_GATE_DISABLE); |
7515 | 7485 | ||
7516 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7486 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7517 | 7487 | ||
7518 | /* |
7488 | /* |
7519 | * According to the spec the following bits should be set in |
7489 | * According to the spec the following bits should be set in |
7520 | * order to enable memory self-refresh |
7490 | * order to enable memory self-refresh |
7521 | * The bit 22/21 of 0x42004 |
7491 | * The bit 22/21 of 0x42004 |
7522 | * The bit 5 of 0x42020 |
7492 | * The bit 5 of 0x42020 |
7523 | * The bit 15 of 0x45000 |
7493 | * The bit 15 of 0x45000 |
7524 | */ |
7494 | */ |
7525 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7495 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7526 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
7496 | (I915_READ(ILK_DISPLAY_CHICKEN2) | |
7527 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
7497 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); |
7528 | I915_WRITE(ILK_DSPCLK_GATE, |
7498 | I915_WRITE(ILK_DSPCLK_GATE, |
7529 | (I915_READ(ILK_DSPCLK_GATE) | |
7499 | (I915_READ(ILK_DSPCLK_GATE) | |
7530 | ILK_DPARB_CLK_GATE)); |
7500 | ILK_DPARB_CLK_GATE)); |
7531 | I915_WRITE(DISP_ARB_CTL, |
7501 | I915_WRITE(DISP_ARB_CTL, |
7532 | (I915_READ(DISP_ARB_CTL) | |
7502 | (I915_READ(DISP_ARB_CTL) | |
7533 | DISP_FBC_WM_DIS)); |
7503 | DISP_FBC_WM_DIS)); |
7534 | I915_WRITE(WM3_LP_ILK, 0); |
7504 | I915_WRITE(WM3_LP_ILK, 0); |
7535 | I915_WRITE(WM2_LP_ILK, 0); |
7505 | I915_WRITE(WM2_LP_ILK, 0); |
7536 | I915_WRITE(WM1_LP_ILK, 0); |
7506 | I915_WRITE(WM1_LP_ILK, 0); |
7537 | 7507 | ||
7538 | /* |
7508 | /* |
7539 | * Based on the document from hardware guys the following bits |
7509 | * Based on the document from hardware guys the following bits |
7540 | * should be set unconditionally in order to enable FBC. |
7510 | * should be set unconditionally in order to enable FBC. |
7541 | * The bit 22 of 0x42000 |
7511 | * The bit 22 of 0x42000 |
7542 | * The bit 22 of 0x42004 |
7512 | * The bit 22 of 0x42004 |
7543 | * The bit 7,8,9 of 0x42020. |
7513 | * The bit 7,8,9 of 0x42020. |
7544 | */ |
7514 | */ |
7545 | if (IS_IRONLAKE_M(dev)) { |
7515 | if (IS_IRONLAKE_M(dev)) { |
7546 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
7516 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
7547 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
7517 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
7548 | ILK_FBCQ_DIS); |
7518 | ILK_FBCQ_DIS); |
7549 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7519 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7550 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7520 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7551 | ILK_DPARB_GATE); |
7521 | ILK_DPARB_GATE); |
7552 | I915_WRITE(ILK_DSPCLK_GATE, |
7522 | I915_WRITE(ILK_DSPCLK_GATE, |
7553 | I915_READ(ILK_DSPCLK_GATE) | |
7523 | I915_READ(ILK_DSPCLK_GATE) | |
7554 | ILK_DPFC_DIS1 | |
7524 | ILK_DPFC_DIS1 | |
7555 | ILK_DPFC_DIS2 | |
7525 | ILK_DPFC_DIS2 | |
7556 | ILK_CLK_FBC); |
7526 | ILK_CLK_FBC); |
7557 | } |
7527 | } |
7558 | 7528 | ||
7559 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7529 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7560 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7530 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7561 | ILK_ELPIN_409_SELECT); |
7531 | ILK_ELPIN_409_SELECT); |
7562 | I915_WRITE(_3D_CHICKEN2, |
7532 | I915_WRITE(_3D_CHICKEN2, |
7563 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | |
7533 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | |
7564 | _3D_CHICKEN2_WM_READ_PIPELINED); |
7534 | _3D_CHICKEN2_WM_READ_PIPELINED); |
7565 | } |
7535 | } |
7566 | 7536 | ||
7567 | static void gen6_init_clock_gating(struct drm_device *dev) |
7537 | static void gen6_init_clock_gating(struct drm_device *dev) |
7568 | { |
7538 | { |
7569 | struct drm_i915_private *dev_priv = dev->dev_private; |
7539 | struct drm_i915_private *dev_priv = dev->dev_private; |
7570 | int pipe; |
7540 | int pipe; |
7571 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7541 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7572 | 7542 | ||
7573 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7543 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7574 | 7544 | ||
7575 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7545 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7576 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7546 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7577 | ILK_ELPIN_409_SELECT); |
7547 | ILK_ELPIN_409_SELECT); |
7578 | 7548 | ||
7579 | I915_WRITE(WM3_LP_ILK, 0); |
7549 | I915_WRITE(WM3_LP_ILK, 0); |
7580 | I915_WRITE(WM2_LP_ILK, 0); |
7550 | I915_WRITE(WM2_LP_ILK, 0); |
7581 | I915_WRITE(WM1_LP_ILK, 0); |
7551 | I915_WRITE(WM1_LP_ILK, 0); |
7582 | 7552 | ||
7583 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
7553 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
7584 | * gating disable must be set. Failure to set it results in |
7554 | * gating disable must be set. Failure to set it results in |
7585 | * flickering pixels due to Z write ordering failures after |
7555 | * flickering pixels due to Z write ordering failures after |
7586 | * some amount of runtime in the Mesa "fire" demo, and Unigine |
7556 | * some amount of runtime in the Mesa "fire" demo, and Unigine |
7587 | * Sanctuary and Tropics, and apparently anything else with |
7557 | * Sanctuary and Tropics, and apparently anything else with |
7588 | * alpha test or pixel discard. |
7558 | * alpha test or pixel discard. |
7589 | * |
7559 | * |
7590 | * According to the spec, bit 11 (RCCUNIT) must also be set, |
7560 | * According to the spec, bit 11 (RCCUNIT) must also be set, |
7591 | * but we didn't debug actual testcases to find it out. |
7561 | * but we didn't debug actual testcases to find it out. |
7592 | */ |
7562 | */ |
7593 | I915_WRITE(GEN6_UCGCTL2, |
7563 | I915_WRITE(GEN6_UCGCTL2, |
7594 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | |
7564 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | |
7595 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); |
7565 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); |
7596 | 7566 | ||
7597 | /* |
7567 | /* |
7598 | * According to the spec the following bits should be |
7568 | * According to the spec the following bits should be |
7599 | * set in order to enable memory self-refresh and fbc: |
7569 | * set in order to enable memory self-refresh and fbc: |
7600 | * The bit21 and bit22 of 0x42000 |
7570 | * The bit21 and bit22 of 0x42000 |
7601 | * The bit21 and bit22 of 0x42004 |
7571 | * The bit21 and bit22 of 0x42004 |
7602 | * The bit5 and bit7 of 0x42020 |
7572 | * The bit5 and bit7 of 0x42020 |
7603 | * The bit14 of 0x70180 |
7573 | * The bit14 of 0x70180 |
7604 | * The bit14 of 0x71180 |
7574 | * The bit14 of 0x71180 |
7605 | */ |
7575 | */ |
7606 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
7576 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
7607 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
7577 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
7608 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); |
7578 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); |
7609 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7579 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
7610 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7580 | I915_READ(ILK_DISPLAY_CHICKEN2) | |
7611 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
7581 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); |
7612 | I915_WRITE(ILK_DSPCLK_GATE, |
7582 | I915_WRITE(ILK_DSPCLK_GATE, |
7613 | I915_READ(ILK_DSPCLK_GATE) | |
7583 | I915_READ(ILK_DSPCLK_GATE) | |
7614 | ILK_DPARB_CLK_GATE | |
7584 | ILK_DPARB_CLK_GATE | |
7615 | ILK_DPFD_CLK_GATE); |
7585 | ILK_DPFD_CLK_GATE); |
7616 | 7586 | ||
7617 | for_each_pipe(pipe) { |
7587 | for_each_pipe(pipe) { |
7618 | I915_WRITE(DSPCNTR(pipe), |
7588 | I915_WRITE(DSPCNTR(pipe), |
7619 | I915_READ(DSPCNTR(pipe)) | |
7589 | I915_READ(DSPCNTR(pipe)) | |
7620 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7590 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7621 | intel_flush_display_plane(dev_priv, pipe); |
7591 | intel_flush_display_plane(dev_priv, pipe); |
7622 | } |
7592 | } |
7623 | } |
7593 | } |
7624 | 7594 | ||
7625 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
7595 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
7626 | { |
7596 | { |
7627 | struct drm_i915_private *dev_priv = dev->dev_private; |
7597 | struct drm_i915_private *dev_priv = dev->dev_private; |
7628 | int pipe; |
7598 | int pipe; |
7629 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7599 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
7630 | 7600 | ||
7631 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7601 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
7632 | 7602 | ||
7633 | I915_WRITE(WM3_LP_ILK, 0); |
7603 | I915_WRITE(WM3_LP_ILK, 0); |
7634 | I915_WRITE(WM2_LP_ILK, 0); |
7604 | I915_WRITE(WM2_LP_ILK, 0); |
7635 | I915_WRITE(WM1_LP_ILK, 0); |
7605 | I915_WRITE(WM1_LP_ILK, 0); |
7636 | 7606 | ||
7637 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
7607 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
7638 | 7608 | ||
7639 | I915_WRITE(IVB_CHICKEN3, |
7609 | I915_WRITE(IVB_CHICKEN3, |
7640 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
7610 | CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | |
7641 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
7611 | CHICKEN3_DGMG_DONE_FIX_DISABLE); |
7642 | 7612 | ||
7643 | for_each_pipe(pipe) { |
7613 | for_each_pipe(pipe) { |
7644 | I915_WRITE(DSPCNTR(pipe), |
7614 | I915_WRITE(DSPCNTR(pipe), |
7645 | I915_READ(DSPCNTR(pipe)) | |
7615 | I915_READ(DSPCNTR(pipe)) | |
7646 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7616 | DISPPLANE_TRICKLE_FEED_DISABLE); |
7647 | intel_flush_display_plane(dev_priv, pipe); |
7617 | intel_flush_display_plane(dev_priv, pipe); |
7648 | } |
7618 | } |
7649 | } |
7619 | } |
7650 | 7620 | ||
7651 | static void g4x_init_clock_gating(struct drm_device *dev) |
7621 | static void g4x_init_clock_gating(struct drm_device *dev) |
7652 | { |
7622 | { |
7653 | struct drm_i915_private *dev_priv = dev->dev_private; |
7623 | struct drm_i915_private *dev_priv = dev->dev_private; |
7654 | uint32_t dspclk_gate; |
7624 | uint32_t dspclk_gate; |
7655 | 7625 | ||
7656 | I915_WRITE(RENCLK_GATE_D1, 0); |
7626 | I915_WRITE(RENCLK_GATE_D1, 0); |
7657 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | |
7627 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | |
7658 | GS_UNIT_CLOCK_GATE_DISABLE | |
7628 | GS_UNIT_CLOCK_GATE_DISABLE | |
7659 | CL_UNIT_CLOCK_GATE_DISABLE); |
7629 | CL_UNIT_CLOCK_GATE_DISABLE); |
7660 | I915_WRITE(RAMCLK_GATE_D, 0); |
7630 | I915_WRITE(RAMCLK_GATE_D, 0); |
7661 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | |
7631 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | |
7662 | OVRUNIT_CLOCK_GATE_DISABLE | |
7632 | OVRUNIT_CLOCK_GATE_DISABLE | |
7663 | OVCUNIT_CLOCK_GATE_DISABLE; |
7633 | OVCUNIT_CLOCK_GATE_DISABLE; |
7664 | if (IS_GM45(dev)) |
7634 | if (IS_GM45(dev)) |
7665 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
7635 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; |
7666 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); |
7636 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); |
7667 | } |
7637 | } |
7668 | 7638 | ||
7669 | static void crestline_init_clock_gating(struct drm_device *dev) |
7639 | static void crestline_init_clock_gating(struct drm_device *dev) |
7670 | { |
7640 | { |
7671 | struct drm_i915_private *dev_priv = dev->dev_private; |
7641 | struct drm_i915_private *dev_priv = dev->dev_private; |
7672 | 7642 | ||
7673 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); |
7643 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); |
7674 | I915_WRITE(RENCLK_GATE_D2, 0); |
7644 | I915_WRITE(RENCLK_GATE_D2, 0); |
7675 | I915_WRITE(DSPCLK_GATE_D, 0); |
7645 | I915_WRITE(DSPCLK_GATE_D, 0); |
7676 | I915_WRITE(RAMCLK_GATE_D, 0); |
7646 | I915_WRITE(RAMCLK_GATE_D, 0); |
7677 | I915_WRITE16(DEUC, 0); |
7647 | I915_WRITE16(DEUC, 0); |
7678 | } |
7648 | } |
7679 | 7649 | ||
7680 | static void broadwater_init_clock_gating(struct drm_device *dev) |
7650 | static void broadwater_init_clock_gating(struct drm_device *dev) |
7681 | { |
7651 | { |
7682 | struct drm_i915_private *dev_priv = dev->dev_private; |
7652 | struct drm_i915_private *dev_priv = dev->dev_private; |
7683 | 7653 | ||
7684 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | |
7654 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | |
7685 | I965_RCC_CLOCK_GATE_DISABLE | |
7655 | I965_RCC_CLOCK_GATE_DISABLE | |
7686 | I965_RCPB_CLOCK_GATE_DISABLE | |
7656 | I965_RCPB_CLOCK_GATE_DISABLE | |
7687 | I965_ISC_CLOCK_GATE_DISABLE | |
7657 | I965_ISC_CLOCK_GATE_DISABLE | |
7688 | I965_FBC_CLOCK_GATE_DISABLE); |
7658 | I965_FBC_CLOCK_GATE_DISABLE); |
7689 | I915_WRITE(RENCLK_GATE_D2, 0); |
7659 | I915_WRITE(RENCLK_GATE_D2, 0); |
7690 | } |
7660 | } |
7691 | 7661 | ||
7692 | static void gen3_init_clock_gating(struct drm_device *dev) |
7662 | static void gen3_init_clock_gating(struct drm_device *dev) |
7693 | { |
7663 | { |
7694 | struct drm_i915_private *dev_priv = dev->dev_private; |
7664 | struct drm_i915_private *dev_priv = dev->dev_private; |
7695 | u32 dstate = I915_READ(D_STATE); |
7665 | u32 dstate = I915_READ(D_STATE); |
7696 | 7666 | ||
7697 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | |
7667 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | |
7698 | DSTATE_DOT_CLOCK_GATING; |
7668 | DSTATE_DOT_CLOCK_GATING; |
7699 | I915_WRITE(D_STATE, dstate); |
7669 | I915_WRITE(D_STATE, dstate); |
7700 | } |
7670 | } |
7701 | 7671 | ||
7702 | static void i85x_init_clock_gating(struct drm_device *dev) |
7672 | static void i85x_init_clock_gating(struct drm_device *dev) |
7703 | { |
7673 | { |
7704 | struct drm_i915_private *dev_priv = dev->dev_private; |
7674 | struct drm_i915_private *dev_priv = dev->dev_private; |
7705 | 7675 | ||
7706 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); |
7676 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); |
7707 | } |
7677 | } |
7708 | 7678 | ||
7709 | static void i830_init_clock_gating(struct drm_device *dev) |
7679 | static void i830_init_clock_gating(struct drm_device *dev) |
7710 | { |
7680 | { |
7711 | struct drm_i915_private *dev_priv = dev->dev_private; |
7681 | struct drm_i915_private *dev_priv = dev->dev_private; |
7712 | 7682 | ||
7713 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
7683 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); |
7714 | } |
7684 | } |
7715 | 7685 | ||
7716 | static void ibx_init_clock_gating(struct drm_device *dev) |
7686 | static void ibx_init_clock_gating(struct drm_device *dev) |
7717 | { |
7687 | { |
7718 | struct drm_i915_private *dev_priv = dev->dev_private; |
7688 | struct drm_i915_private *dev_priv = dev->dev_private; |
7719 | 7689 | ||
7720 | /* |
7690 | /* |
7721 | * On Ibex Peak and Cougar Point, we need to disable clock |
7691 | * On Ibex Peak and Cougar Point, we need to disable clock |
7722 | * gating for the panel power sequencer or it will fail to |
7692 | * gating for the panel power sequencer or it will fail to |
7723 | * start up when no ports are active. |
7693 | * start up when no ports are active. |
7724 | */ |
7694 | */ |
7725 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7695 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7726 | } |
7696 | } |
7727 | 7697 | ||
7728 | static void cpt_init_clock_gating(struct drm_device *dev) |
7698 | static void cpt_init_clock_gating(struct drm_device *dev) |
7729 | { |
7699 | { |
7730 | struct drm_i915_private *dev_priv = dev->dev_private; |
7700 | struct drm_i915_private *dev_priv = dev->dev_private; |
7731 | int pipe; |
7701 | int pipe; |
7732 | 7702 | ||
7733 | /* |
7703 | /* |
7734 | * On Ibex Peak and Cougar Point, we need to disable clock |
7704 | * On Ibex Peak and Cougar Point, we need to disable clock |
7735 | * gating for the panel power sequencer or it will fail to |
7705 | * gating for the panel power sequencer or it will fail to |
7736 | * start up when no ports are active. |
7706 | * start up when no ports are active. |
7737 | */ |
7707 | */ |
7738 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7708 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); |
7739 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
7709 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | |
7740 | DPLS_EDP_PPS_FIX_DIS); |
7710 | DPLS_EDP_PPS_FIX_DIS); |
7741 | /* Without this, mode sets may fail silently on FDI */ |
7711 | /* Without this, mode sets may fail silently on FDI */ |
7742 | for_each_pipe(pipe) |
7712 | for_each_pipe(pipe) |
7743 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); |
7713 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); |
7744 | } |
7714 | } |
7745 | 7715 | ||
7746 | static void ironlake_teardown_rc6(struct drm_device *dev) |
7716 | static void ironlake_teardown_rc6(struct drm_device *dev) |
7747 | { |
7717 | { |
7748 | struct drm_i915_private *dev_priv = dev->dev_private; |
7718 | struct drm_i915_private *dev_priv = dev->dev_private; |
7749 | 7719 | ||
7750 | if (dev_priv->renderctx) { |
7720 | if (dev_priv->renderctx) { |
7751 | // i915_gem_object_unpin(dev_priv->renderctx); |
7721 | // i915_gem_object_unpin(dev_priv->renderctx); |
7752 | // drm_gem_object_unreference(&dev_priv->renderctx->base); |
7722 | // drm_gem_object_unreference(&dev_priv->renderctx->base); |
7753 | dev_priv->renderctx = NULL; |
7723 | dev_priv->renderctx = NULL; |
7754 | } |
7724 | } |
7755 | 7725 | ||
7756 | if (dev_priv->pwrctx) { |
7726 | if (dev_priv->pwrctx) { |
7757 | // i915_gem_object_unpin(dev_priv->pwrctx); |
7727 | // i915_gem_object_unpin(dev_priv->pwrctx); |
7758 | // drm_gem_object_unreference(&dev_priv->pwrctx->base); |
7728 | // drm_gem_object_unreference(&dev_priv->pwrctx->base); |
7759 | dev_priv->pwrctx = NULL; |
7729 | dev_priv->pwrctx = NULL; |
7760 | } |
7730 | } |
7761 | } |
7731 | } |
7762 | 7732 | ||
7763 | static void ironlake_disable_rc6(struct drm_device *dev) |
7733 | static void ironlake_disable_rc6(struct drm_device *dev) |
7764 | { |
7734 | { |
7765 | struct drm_i915_private *dev_priv = dev->dev_private; |
7735 | struct drm_i915_private *dev_priv = dev->dev_private; |
7766 | 7736 | ||
7767 | if (I915_READ(PWRCTXA)) { |
7737 | if (I915_READ(PWRCTXA)) { |
7768 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ |
7738 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ |
7769 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); |
7739 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); |
7770 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), |
7740 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), |
7771 | 50); |
7741 | 50); |
7772 | 7742 | ||
7773 | I915_WRITE(PWRCTXA, 0); |
7743 | I915_WRITE(PWRCTXA, 0); |
7774 | POSTING_READ(PWRCTXA); |
7744 | POSTING_READ(PWRCTXA); |
7775 | 7745 | ||
7776 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
7746 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
7777 | POSTING_READ(RSTDBYCTL); |
7747 | POSTING_READ(RSTDBYCTL); |
7778 | } |
7748 | } |
7779 | 7749 | ||
7780 | ironlake_teardown_rc6(dev); |
7750 | ironlake_teardown_rc6(dev); |
7781 | } |
7751 | } |
7782 | 7752 | ||
7783 | static int ironlake_setup_rc6(struct drm_device *dev) |
7753 | static int ironlake_setup_rc6(struct drm_device *dev) |
7784 | { |
7754 | { |
7785 | struct drm_i915_private *dev_priv = dev->dev_private; |
7755 | struct drm_i915_private *dev_priv = dev->dev_private; |
7786 | 7756 | ||
7787 | if (dev_priv->renderctx == NULL) |
7757 | if (dev_priv->renderctx == NULL) |
7788 | // dev_priv->renderctx = intel_alloc_context_page(dev); |
7758 | // dev_priv->renderctx = intel_alloc_context_page(dev); |
7789 | if (!dev_priv->renderctx) |
7759 | if (!dev_priv->renderctx) |
7790 | return -ENOMEM; |
7760 | return -ENOMEM; |
7791 | 7761 | ||
7792 | if (dev_priv->pwrctx == NULL) |
7762 | if (dev_priv->pwrctx == NULL) |
7793 | // dev_priv->pwrctx = intel_alloc_context_page(dev); |
7763 | // dev_priv->pwrctx = intel_alloc_context_page(dev); |
7794 | if (!dev_priv->pwrctx) { |
7764 | if (!dev_priv->pwrctx) { |
7795 | ironlake_teardown_rc6(dev); |
7765 | ironlake_teardown_rc6(dev); |
7796 | return -ENOMEM; |
7766 | return -ENOMEM; |
7797 | } |
7767 | } |
7798 | 7768 | ||
7799 | return 0; |
7769 | return 0; |
7800 | } |
7770 | } |
7801 | 7771 | ||
7802 | void ironlake_enable_rc6(struct drm_device *dev) |
7772 | void ironlake_enable_rc6(struct drm_device *dev) |
7803 | { |
7773 | { |
7804 | struct drm_i915_private *dev_priv = dev->dev_private; |
7774 | struct drm_i915_private *dev_priv = dev->dev_private; |
7805 | int ret; |
7775 | int ret; |
7806 | 7776 | ||
7807 | /* rc6 disabled by default due to repeated reports of hanging during |
7777 | /* rc6 disabled by default due to repeated reports of hanging during |
7808 | * boot and resume. |
7778 | * boot and resume. |
7809 | */ |
7779 | */ |
7810 | if (!intel_enable_rc6(dev)) |
7780 | if (!intel_enable_rc6(dev)) |
7811 | return; |
7781 | return; |
7812 | 7782 | ||
7813 | mutex_lock(&dev->struct_mutex); |
7783 | mutex_lock(&dev->struct_mutex); |
7814 | ret = ironlake_setup_rc6(dev); |
7784 | ret = ironlake_setup_rc6(dev); |
7815 | if (ret) { |
7785 | if (ret) { |
7816 | mutex_unlock(&dev->struct_mutex); |
7786 | mutex_unlock(&dev->struct_mutex); |
7817 | return; |
7787 | return; |
7818 | } |
7788 | } |
7819 | 7789 | ||
7820 | /* |
7790 | /* |
7821 | * GPU can automatically power down the render unit if given a page |
7791 | * GPU can automatically power down the render unit if given a page |
7822 | * to save state. |
7792 | * to save state. |
7823 | */ |
7793 | */ |
7824 | #if 0 |
7794 | #if 0 |
7825 | ret = BEGIN_LP_RING(6); |
7795 | ret = BEGIN_LP_RING(6); |
7826 | if (ret) { |
7796 | if (ret) { |
7827 | ironlake_teardown_rc6(dev); |
7797 | ironlake_teardown_rc6(dev); |
7828 | mutex_unlock(&dev->struct_mutex); |
7798 | mutex_unlock(&dev->struct_mutex); |
7829 | return; |
7799 | return; |
7830 | } |
7800 | } |
7831 | 7801 | ||
7832 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
7802 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
7833 | OUT_RING(MI_SET_CONTEXT); |
7803 | OUT_RING(MI_SET_CONTEXT); |
7834 | OUT_RING(dev_priv->renderctx->gtt_offset | |
7804 | OUT_RING(dev_priv->renderctx->gtt_offset | |
7835 | MI_MM_SPACE_GTT | |
7805 | MI_MM_SPACE_GTT | |
7836 | MI_SAVE_EXT_STATE_EN | |
7806 | MI_SAVE_EXT_STATE_EN | |
7837 | MI_RESTORE_EXT_STATE_EN | |
7807 | MI_RESTORE_EXT_STATE_EN | |
7838 | MI_RESTORE_INHIBIT); |
7808 | MI_RESTORE_INHIBIT); |
7839 | OUT_RING(MI_SUSPEND_FLUSH); |
7809 | OUT_RING(MI_SUSPEND_FLUSH); |
7840 | OUT_RING(MI_NOOP); |
7810 | OUT_RING(MI_NOOP); |
7841 | OUT_RING(MI_FLUSH); |
7811 | OUT_RING(MI_FLUSH); |
7842 | ADVANCE_LP_RING(); |
7812 | ADVANCE_LP_RING(); |
7843 | 7813 | ||
7844 | /* |
7814 | /* |
7845 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW |
7815 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW |
7846 | * does an implicit flush, combined with MI_FLUSH above, it should be |
7816 | * does an implicit flush, combined with MI_FLUSH above, it should be |
7847 | * safe to assume that renderctx is valid |
7817 | * safe to assume that renderctx is valid |
7848 | */ |
7818 | */ |
7849 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); |
7819 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); |
7850 | if (ret) { |
7820 | if (ret) { |
7851 | DRM_ERROR("failed to enable ironlake power power savings\n"); |
7821 | DRM_ERROR("failed to enable ironlake power power savings\n"); |
7852 | ironlake_teardown_rc6(dev); |
7822 | ironlake_teardown_rc6(dev); |
7853 | mutex_unlock(&dev->struct_mutex); |
7823 | mutex_unlock(&dev->struct_mutex); |
7854 | return; |
7824 | return; |
7855 | } |
7825 | } |
7856 | #endif |
7826 | #endif |
7857 | 7827 | ||
7858 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
7828 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
7859 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
7829 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
7860 | mutex_unlock(&dev->struct_mutex); |
7830 | mutex_unlock(&dev->struct_mutex); |
7861 | } |
7831 | } |
7862 | 7832 | ||
7863 | void intel_init_clock_gating(struct drm_device *dev) |
7833 | void intel_init_clock_gating(struct drm_device *dev) |
7864 | { |
7834 | { |
7865 | struct drm_i915_private *dev_priv = dev->dev_private; |
7835 | struct drm_i915_private *dev_priv = dev->dev_private; |
7866 | 7836 | ||
7867 | dev_priv->display.init_clock_gating(dev); |
7837 | dev_priv->display.init_clock_gating(dev); |
7868 | 7838 | ||
7869 | if (dev_priv->display.init_pch_clock_gating) |
7839 | if (dev_priv->display.init_pch_clock_gating) |
7870 | dev_priv->display.init_pch_clock_gating(dev); |
7840 | dev_priv->display.init_pch_clock_gating(dev); |
7871 | } |
7841 | } |
7872 | 7842 | ||
7873 | /* Set up chip specific display functions */ |
7843 | /* Set up chip specific display functions */ |
7874 | static void intel_init_display(struct drm_device *dev) |
7844 | static void intel_init_display(struct drm_device *dev) |
7875 | { |
7845 | { |
7876 | struct drm_i915_private *dev_priv = dev->dev_private; |
7846 | struct drm_i915_private *dev_priv = dev->dev_private; |
7877 | 7847 | ||
7878 | /* We always want a DPMS function */ |
7848 | /* We always want a DPMS function */ |
7879 | if (HAS_PCH_SPLIT(dev)) { |
7849 | if (HAS_PCH_SPLIT(dev)) { |
7880 | dev_priv->display.dpms = ironlake_crtc_dpms; |
7850 | dev_priv->display.dpms = ironlake_crtc_dpms; |
7881 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
7851 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
7882 | dev_priv->display.update_plane = ironlake_update_plane; |
7852 | dev_priv->display.update_plane = ironlake_update_plane; |
7883 | } else { |
7853 | } else { |
7884 | dev_priv->display.dpms = i9xx_crtc_dpms; |
7854 | dev_priv->display.dpms = i9xx_crtc_dpms; |
7885 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
7855 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
7886 | dev_priv->display.update_plane = i9xx_update_plane; |
7856 | dev_priv->display.update_plane = i9xx_update_plane; |
7887 | } |
7857 | } |
7888 | 7858 | ||
7889 | if (I915_HAS_FBC(dev)) { |
7859 | if (I915_HAS_FBC(dev)) { |
7890 | if (HAS_PCH_SPLIT(dev)) { |
7860 | if (HAS_PCH_SPLIT(dev)) { |
7891 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
7861 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
7892 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
7862 | dev_priv->display.enable_fbc = ironlake_enable_fbc; |
7893 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
7863 | dev_priv->display.disable_fbc = ironlake_disable_fbc; |
7894 | } else if (IS_GM45(dev)) { |
7864 | } else if (IS_GM45(dev)) { |
7895 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
7865 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
7896 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
7866 | dev_priv->display.enable_fbc = g4x_enable_fbc; |
7897 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
7867 | dev_priv->display.disable_fbc = g4x_disable_fbc; |
7898 | } else if (IS_CRESTLINE(dev)) { |
7868 | } else if (IS_CRESTLINE(dev)) { |
7899 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
7869 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
7900 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
7870 | dev_priv->display.enable_fbc = i8xx_enable_fbc; |
7901 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
7871 | dev_priv->display.disable_fbc = i8xx_disable_fbc; |
7902 | } |
7872 | } |
7903 | /* 855GM needs testing */ |
7873 | /* 855GM needs testing */ |
7904 | } |
7874 | } |
7905 | 7875 | ||
7906 | /* Returns the core display clock speed */ |
7876 | /* Returns the core display clock speed */ |
7907 | if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
7877 | if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
7908 | dev_priv->display.get_display_clock_speed = |
7878 | dev_priv->display.get_display_clock_speed = |
7909 | i945_get_display_clock_speed; |
7879 | i945_get_display_clock_speed; |
7910 | else if (IS_I915G(dev)) |
7880 | else if (IS_I915G(dev)) |
7911 | dev_priv->display.get_display_clock_speed = |
7881 | dev_priv->display.get_display_clock_speed = |
7912 | i915_get_display_clock_speed; |
7882 | i915_get_display_clock_speed; |
7913 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
7883 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
7914 | dev_priv->display.get_display_clock_speed = |
7884 | dev_priv->display.get_display_clock_speed = |
7915 | i9xx_misc_get_display_clock_speed; |
7885 | i9xx_misc_get_display_clock_speed; |
7916 | else if (IS_I915GM(dev)) |
7886 | else if (IS_I915GM(dev)) |
7917 | dev_priv->display.get_display_clock_speed = |
7887 | dev_priv->display.get_display_clock_speed = |
7918 | i915gm_get_display_clock_speed; |
7888 | i915gm_get_display_clock_speed; |
7919 | else if (IS_I865G(dev)) |
7889 | else if (IS_I865G(dev)) |
7920 | dev_priv->display.get_display_clock_speed = |
7890 | dev_priv->display.get_display_clock_speed = |
7921 | i865_get_display_clock_speed; |
7891 | i865_get_display_clock_speed; |
7922 | else if (IS_I85X(dev)) |
7892 | else if (IS_I85X(dev)) |
7923 | dev_priv->display.get_display_clock_speed = |
7893 | dev_priv->display.get_display_clock_speed = |
7924 | i855_get_display_clock_speed; |
7894 | i855_get_display_clock_speed; |
7925 | else /* 852, 830 */ |
7895 | else /* 852, 830 */ |
7926 | dev_priv->display.get_display_clock_speed = |
7896 | dev_priv->display.get_display_clock_speed = |
7927 | i830_get_display_clock_speed; |
7897 | i830_get_display_clock_speed; |
7928 | 7898 | ||
7929 | /* For FIFO watermark updates */ |
7899 | /* For FIFO watermark updates */ |
7930 | if (HAS_PCH_SPLIT(dev)) { |
7900 | if (HAS_PCH_SPLIT(dev)) { |
7931 | dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; |
7901 | dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; |
7932 | dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; |
7902 | dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; |
7933 | 7903 | ||
7934 | /* IVB configs may use multi-threaded forcewake */ |
7904 | /* IVB configs may use multi-threaded forcewake */ |
7935 | if (IS_IVYBRIDGE(dev)) { |
7905 | if (IS_IVYBRIDGE(dev)) { |
7936 | u32 ecobus; |
7906 | u32 ecobus; |
7937 | 7907 | ||
7938 | /* A small trick here - if the bios hasn't configured MT forcewake, |
7908 | /* A small trick here - if the bios hasn't configured MT forcewake, |
7939 | * and if the device is in RC6, then force_wake_mt_get will not wake |
7909 | * and if the device is in RC6, then force_wake_mt_get will not wake |
7940 | * the device and the ECOBUS read will return zero. Which will be |
7910 | * the device and the ECOBUS read will return zero. Which will be |
7941 | * (correctly) interpreted by the test below as MT forcewake being |
7911 | * (correctly) interpreted by the test below as MT forcewake being |
7942 | * disabled. |
7912 | * disabled. |
7943 | */ |
7913 | */ |
7944 | mutex_lock(&dev->struct_mutex); |
7914 | mutex_lock(&dev->struct_mutex); |
7945 | __gen6_gt_force_wake_mt_get(dev_priv); |
7915 | __gen6_gt_force_wake_mt_get(dev_priv); |
7946 | ecobus = I915_READ_NOTRACE(ECOBUS); |
7916 | ecobus = I915_READ_NOTRACE(ECOBUS); |
7947 | __gen6_gt_force_wake_mt_put(dev_priv); |
7917 | __gen6_gt_force_wake_mt_put(dev_priv); |
7948 | mutex_unlock(&dev->struct_mutex); |
7918 | mutex_unlock(&dev->struct_mutex); |
7949 | 7919 | ||
7950 | if (ecobus & FORCEWAKE_MT_ENABLE) { |
7920 | if (ecobus & FORCEWAKE_MT_ENABLE) { |
7951 | DRM_DEBUG_KMS("Using MT version of forcewake\n"); |
7921 | DRM_DEBUG_KMS("Using MT version of forcewake\n"); |
7952 | dev_priv->display.force_wake_get = |
7922 | dev_priv->display.force_wake_get = |
7953 | __gen6_gt_force_wake_mt_get; |
7923 | __gen6_gt_force_wake_mt_get; |
7954 | dev_priv->display.force_wake_put = |
7924 | dev_priv->display.force_wake_put = |
7955 | __gen6_gt_force_wake_mt_put; |
7925 | __gen6_gt_force_wake_mt_put; |
7956 | } |
7926 | } |
7957 | } |
7927 | } |
7958 | 7928 | ||
7959 | if (HAS_PCH_IBX(dev)) |
7929 | if (HAS_PCH_IBX(dev)) |
7960 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; |
7930 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; |
7961 | else if (HAS_PCH_CPT(dev)) |
7931 | else if (HAS_PCH_CPT(dev)) |
7962 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; |
7932 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; |
7963 | 7933 | ||
7964 | if (IS_GEN5(dev)) { |
7934 | if (IS_GEN5(dev)) { |
7965 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
7935 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
7966 | dev_priv->display.update_wm = ironlake_update_wm; |
7936 | dev_priv->display.update_wm = ironlake_update_wm; |
7967 | else { |
7937 | else { |
7968 | DRM_DEBUG_KMS("Failed to get proper latency. " |
7938 | DRM_DEBUG_KMS("Failed to get proper latency. " |
7969 | "Disable CxSR\n"); |
7939 | "Disable CxSR\n"); |
7970 | dev_priv->display.update_wm = NULL; |
7940 | dev_priv->display.update_wm = NULL; |
7971 | } |
7941 | } |
7972 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
7942 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
7973 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
7943 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
7974 | dev_priv->display.write_eld = ironlake_write_eld; |
7944 | dev_priv->display.write_eld = ironlake_write_eld; |
7975 | } else if (IS_GEN6(dev)) { |
7945 | } else if (IS_GEN6(dev)) { |
7976 | if (SNB_READ_WM0_LATENCY()) { |
7946 | if (SNB_READ_WM0_LATENCY()) { |
7977 | dev_priv->display.update_wm = sandybridge_update_wm; |
7947 | dev_priv->display.update_wm = sandybridge_update_wm; |
7978 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
7948 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
7979 | } else { |
7949 | } else { |
7980 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7950 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7981 | "Disable CxSR\n"); |
7951 | "Disable CxSR\n"); |
7982 | dev_priv->display.update_wm = NULL; |
7952 | dev_priv->display.update_wm = NULL; |
7983 | } |
7953 | } |
7984 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
7954 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
7985 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
7955 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
7986 | dev_priv->display.write_eld = ironlake_write_eld; |
7956 | dev_priv->display.write_eld = ironlake_write_eld; |
7987 | } else if (IS_IVYBRIDGE(dev)) { |
7957 | } else if (IS_IVYBRIDGE(dev)) { |
7988 | /* FIXME: detect B0+ stepping and use auto training */ |
7958 | /* FIXME: detect B0+ stepping and use auto training */ |
7989 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
7959 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; |
7990 | if (SNB_READ_WM0_LATENCY()) { |
7960 | if (SNB_READ_WM0_LATENCY()) { |
7991 | dev_priv->display.update_wm = sandybridge_update_wm; |
7961 | dev_priv->display.update_wm = sandybridge_update_wm; |
7992 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
7962 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
7993 | } else { |
7963 | } else { |
7994 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7964 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7995 | "Disable CxSR\n"); |
7965 | "Disable CxSR\n"); |
7996 | dev_priv->display.update_wm = NULL; |
7966 | dev_priv->display.update_wm = NULL; |
7997 | } |
7967 | } |
7998 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
7968 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
7999 | dev_priv->display.write_eld = ironlake_write_eld; |
7969 | dev_priv->display.write_eld = ironlake_write_eld; |
8000 | } else |
7970 | } else |
8001 | dev_priv->display.update_wm = NULL; |
7971 | dev_priv->display.update_wm = NULL; |
8002 | } else if (IS_PINEVIEW(dev)) { |
7972 | } else if (IS_PINEVIEW(dev)) { |
8003 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), |
7973 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), |
8004 | dev_priv->is_ddr3, |
7974 | dev_priv->is_ddr3, |
8005 | dev_priv->fsb_freq, |
7975 | dev_priv->fsb_freq, |
8006 | dev_priv->mem_freq)) { |
7976 | dev_priv->mem_freq)) { |
8007 | DRM_INFO("failed to find known CxSR latency " |
7977 | DRM_INFO("failed to find known CxSR latency " |
8008 | "(found ddr%s fsb freq %d, mem freq %d), " |
7978 | "(found ddr%s fsb freq %d, mem freq %d), " |
8009 | "disabling CxSR\n", |
7979 | "disabling CxSR\n", |
8010 | (dev_priv->is_ddr3 == 1) ? "3" : "2", |
7980 | (dev_priv->is_ddr3 == 1) ? "3" : "2", |
8011 | dev_priv->fsb_freq, dev_priv->mem_freq); |
7981 | dev_priv->fsb_freq, dev_priv->mem_freq); |
8012 | /* Disable CxSR and never update its watermark again */ |
7982 | /* Disable CxSR and never update its watermark again */ |
8013 | pineview_disable_cxsr(dev); |
7983 | pineview_disable_cxsr(dev); |
8014 | dev_priv->display.update_wm = NULL; |
7984 | dev_priv->display.update_wm = NULL; |
8015 | } else |
7985 | } else |
8016 | dev_priv->display.update_wm = pineview_update_wm; |
7986 | dev_priv->display.update_wm = pineview_update_wm; |
8017 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
7987 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
8018 | } else if (IS_G4X(dev)) { |
7988 | } else if (IS_G4X(dev)) { |
8019 | dev_priv->display.write_eld = g4x_write_eld; |
7989 | dev_priv->display.write_eld = g4x_write_eld; |
8020 | dev_priv->display.update_wm = g4x_update_wm; |
7990 | dev_priv->display.update_wm = g4x_update_wm; |
8021 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
7991 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
8022 | } else if (IS_GEN4(dev)) { |
7992 | } else if (IS_GEN4(dev)) { |
8023 | dev_priv->display.update_wm = i965_update_wm; |
7993 | dev_priv->display.update_wm = i965_update_wm; |
8024 | if (IS_CRESTLINE(dev)) |
7994 | if (IS_CRESTLINE(dev)) |
8025 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; |
7995 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; |
8026 | else if (IS_BROADWATER(dev)) |
7996 | else if (IS_BROADWATER(dev)) |
8027 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; |
7997 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; |
8028 | } else if (IS_GEN3(dev)) { |
7998 | } else if (IS_GEN3(dev)) { |
8029 | dev_priv->display.update_wm = i9xx_update_wm; |
7999 | dev_priv->display.update_wm = i9xx_update_wm; |
8030 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
8000 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; |
8031 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
8001 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
8032 | } else if (IS_I865G(dev)) { |
8002 | } else if (IS_I865G(dev)) { |
8033 | dev_priv->display.update_wm = i830_update_wm; |
8003 | dev_priv->display.update_wm = i830_update_wm; |
8034 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; |
8004 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; |
8035 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
8005 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
8036 | } else if (IS_I85X(dev)) { |
8006 | } else if (IS_I85X(dev)) { |
8037 | dev_priv->display.update_wm = i9xx_update_wm; |
8007 | dev_priv->display.update_wm = i9xx_update_wm; |
8038 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; |
8008 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; |
8039 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; |
8009 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; |
8040 | } else { |
8010 | } else { |
8041 | dev_priv->display.update_wm = i830_update_wm; |
8011 | dev_priv->display.update_wm = i830_update_wm; |
8042 | dev_priv->display.init_clock_gating = i830_init_clock_gating; |
8012 | dev_priv->display.init_clock_gating = i830_init_clock_gating; |
8043 | if (IS_845G(dev)) |
8013 | if (IS_845G(dev)) |
8044 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
8014 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
8045 | else |
8015 | else |
8046 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
8016 | dev_priv->display.get_fifo_size = i830_get_fifo_size; |
8047 | } |
8017 | } |
8048 | 8018 | ||
8049 | /* Default just returns -ENODEV to indicate unsupported */ |
8019 | /* Default just returns -ENODEV to indicate unsupported */ |
8050 | // dev_priv->display.queue_flip = intel_default_queue_flip; |
8020 | // dev_priv->display.queue_flip = intel_default_queue_flip; |
8051 | 8021 | ||
8052 | #if 0 |
8022 | #if 0 |
8053 | switch (INTEL_INFO(dev)->gen) { |
8023 | switch (INTEL_INFO(dev)->gen) { |
8054 | case 2: |
8024 | case 2: |
8055 | dev_priv->display.queue_flip = intel_gen2_queue_flip; |
8025 | dev_priv->display.queue_flip = intel_gen2_queue_flip; |
8056 | break; |
8026 | break; |
8057 | 8027 | ||
8058 | case 3: |
8028 | case 3: |
8059 | dev_priv->display.queue_flip = intel_gen3_queue_flip; |
8029 | dev_priv->display.queue_flip = intel_gen3_queue_flip; |
8060 | break; |
8030 | break; |
8061 | 8031 | ||
8062 | case 4: |
8032 | case 4: |
8063 | case 5: |
8033 | case 5: |
8064 | dev_priv->display.queue_flip = intel_gen4_queue_flip; |
8034 | dev_priv->display.queue_flip = intel_gen4_queue_flip; |
8065 | break; |
8035 | break; |
8066 | 8036 | ||
8067 | case 6: |
8037 | case 6: |
8068 | dev_priv->display.queue_flip = intel_gen6_queue_flip; |
8038 | dev_priv->display.queue_flip = intel_gen6_queue_flip; |
8069 | break; |
8039 | break; |
8070 | case 7: |
8040 | case 7: |
8071 | dev_priv->display.queue_flip = intel_gen7_queue_flip; |
8041 | dev_priv->display.queue_flip = intel_gen7_queue_flip; |
8072 | break; |
8042 | break; |
8073 | } |
8043 | } |
8074 | #endif |
8044 | #endif |
8075 | } |
8045 | } |
8076 | 8046 | ||
8077 | /* |
8047 | /* |
8078 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, |
8048 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, |
8079 | * resume, or other times. This quirk makes sure that's the case for |
8049 | * resume, or other times. This quirk makes sure that's the case for |
8080 | * affected systems. |
8050 | * affected systems. |
8081 | */ |
8051 | */ |
8082 | static void quirk_pipea_force(struct drm_device *dev) |
8052 | static void quirk_pipea_force(struct drm_device *dev) |
8083 | { |
8053 | { |
8084 | struct drm_i915_private *dev_priv = dev->dev_private; |
8054 | struct drm_i915_private *dev_priv = dev->dev_private; |
8085 | 8055 | ||
8086 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; |
8056 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; |
8087 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); |
8057 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); |
8088 | } |
8058 | } |
8089 | 8059 | ||
8090 | /* |
8060 | /* |
8091 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason |
8061 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason |
8092 | */ |
8062 | */ |
8093 | static void quirk_ssc_force_disable(struct drm_device *dev) |
8063 | static void quirk_ssc_force_disable(struct drm_device *dev) |
8094 | { |
8064 | { |
8095 | struct drm_i915_private *dev_priv = dev->dev_private; |
8065 | struct drm_i915_private *dev_priv = dev->dev_private; |
8096 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; |
8066 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; |
8097 | } |
8067 | } |
8098 | 8068 | ||
8099 | struct intel_quirk { |
8069 | struct intel_quirk { |
8100 | int device; |
8070 | int device; |
8101 | int subsystem_vendor; |
8071 | int subsystem_vendor; |
8102 | int subsystem_device; |
8072 | int subsystem_device; |
8103 | void (*hook)(struct drm_device *dev); |
8073 | void (*hook)(struct drm_device *dev); |
8104 | }; |
8074 | }; |
8105 | 8075 | ||
8106 | struct intel_quirk intel_quirks[] = { |
8076 | struct intel_quirk intel_quirks[] = { |
8107 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ |
8077 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ |
8108 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, |
8078 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, |
8109 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8079 | /* HP Mini needs pipe A force quirk (LP: #322104) */ |
8110 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8080 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
8111 | 8081 | ||
8112 | /* Thinkpad R31 needs pipe A force quirk */ |
8082 | /* Thinkpad R31 needs pipe A force quirk */ |
8113 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, |
8083 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, |
8114 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
8084 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
8115 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
8085 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
8116 | 8086 | ||
8117 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ |
8087 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ |
8118 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, |
8088 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, |
8119 | /* ThinkPad X40 needs pipe A force quirk */ |
8089 | /* ThinkPad X40 needs pipe A force quirk */ |
8120 | 8090 | ||
8121 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
8091 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
8122 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
8092 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
8123 | 8093 | ||
8124 | /* 855 & before need to leave pipe A & dpll A up */ |
8094 | /* 855 & before need to leave pipe A & dpll A up */ |
8125 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8095 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8126 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8096 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
8127 | 8097 | ||
8128 | /* Lenovo U160 cannot use SSC on LVDS */ |
8098 | /* Lenovo U160 cannot use SSC on LVDS */ |
8129 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8099 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, |
8130 | 8100 | ||
8131 | /* Sony Vaio Y cannot use SSC on LVDS */ |
8101 | /* Sony Vaio Y cannot use SSC on LVDS */ |
8132 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
8102 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, |
8133 | }; |
8103 | }; |
8134 | 8104 | ||
8135 | static void intel_init_quirks(struct drm_device *dev) |
8105 | static void intel_init_quirks(struct drm_device *dev) |
8136 | { |
8106 | { |
8137 | struct pci_dev *d = dev->pdev; |
8107 | struct pci_dev *d = dev->pdev; |
8138 | int i; |
8108 | int i; |
8139 | 8109 | ||
8140 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { |
8110 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { |
8141 | struct intel_quirk *q = &intel_quirks[i]; |
8111 | struct intel_quirk *q = &intel_quirks[i]; |
8142 | 8112 | ||
8143 | if (d->device == q->device && |
8113 | if (d->device == q->device && |
8144 | (d->subsystem_vendor == q->subsystem_vendor || |
8114 | (d->subsystem_vendor == q->subsystem_vendor || |
8145 | q->subsystem_vendor == PCI_ANY_ID) && |
8115 | q->subsystem_vendor == PCI_ANY_ID) && |
8146 | (d->subsystem_device == q->subsystem_device || |
8116 | (d->subsystem_device == q->subsystem_device || |
8147 | q->subsystem_device == PCI_ANY_ID)) |
8117 | q->subsystem_device == PCI_ANY_ID)) |
8148 | q->hook(dev); |
8118 | q->hook(dev); |
8149 | } |
8119 | } |
8150 | } |
8120 | } |
8151 | 8121 | ||
8152 | /* Disable the VGA plane that we never use */ |
8122 | /* Disable the VGA plane that we never use */ |
8153 | static void i915_disable_vga(struct drm_device *dev) |
8123 | static void i915_disable_vga(struct drm_device *dev) |
8154 | { |
8124 | { |
8155 | struct drm_i915_private *dev_priv = dev->dev_private; |
8125 | struct drm_i915_private *dev_priv = dev->dev_private; |
8156 | u8 sr1; |
8126 | u8 sr1; |
8157 | u32 vga_reg; |
8127 | u32 vga_reg; |
8158 | 8128 | ||
8159 | if (HAS_PCH_SPLIT(dev)) |
8129 | if (HAS_PCH_SPLIT(dev)) |
8160 | vga_reg = CPU_VGACNTRL; |
8130 | vga_reg = CPU_VGACNTRL; |
8161 | else |
8131 | else |
8162 | vga_reg = VGACNTRL; |
8132 | vga_reg = VGACNTRL; |
8163 | 8133 | ||
8164 | // vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
8134 | // vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
8165 | out8(VGA_SR_INDEX, 1); |
8135 | out8(VGA_SR_INDEX, 1); |
8166 | sr1 = in8(VGA_SR_DATA); |
8136 | sr1 = in8(VGA_SR_DATA); |
8167 | out8(VGA_SR_DATA,sr1 | 1<<5); |
8137 | out8(VGA_SR_DATA,sr1 | 1<<5); |
8168 | // vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
8138 | // vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); |
8169 | udelay(300); |
8139 | udelay(300); |
8170 | 8140 | ||
8171 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
8141 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); |
8172 | POSTING_READ(vga_reg); |
8142 | POSTING_READ(vga_reg); |
8173 | } |
8143 | } |
8174 | 8144 | ||
8175 | void intel_modeset_init(struct drm_device *dev) |
8145 | void intel_modeset_init(struct drm_device *dev) |
8176 | { |
8146 | { |
8177 | struct drm_i915_private *dev_priv = dev->dev_private; |
8147 | struct drm_i915_private *dev_priv = dev->dev_private; |
8178 | int i, ret; |
8148 | int i, ret; |
8179 | 8149 | ||
8180 | drm_mode_config_init(dev); |
8150 | drm_mode_config_init(dev); |
8181 | 8151 | ||
8182 | dev->mode_config.min_width = 0; |
8152 | dev->mode_config.min_width = 0; |
8183 | dev->mode_config.min_height = 0; |
8153 | dev->mode_config.min_height = 0; |
8184 | 8154 | ||
8185 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
8155 | dev->mode_config.funcs = (void *)&intel_mode_funcs; |
8186 | 8156 | ||
8187 | intel_init_quirks(dev); |
8157 | intel_init_quirks(dev); |
8188 | 8158 | ||
8189 | intel_init_display(dev); |
8159 | intel_init_display(dev); |
8190 | 8160 | ||
8191 | if (IS_GEN2(dev)) { |
8161 | if (IS_GEN2(dev)) { |
8192 | dev->mode_config.max_width = 2048; |
8162 | dev->mode_config.max_width = 2048; |
8193 | dev->mode_config.max_height = 2048; |
8163 | dev->mode_config.max_height = 2048; |
8194 | } else if (IS_GEN3(dev)) { |
8164 | } else if (IS_GEN3(dev)) { |
8195 | dev->mode_config.max_width = 4096; |
8165 | dev->mode_config.max_width = 4096; |
8196 | dev->mode_config.max_height = 4096; |
8166 | dev->mode_config.max_height = 4096; |
8197 | } else { |
8167 | } else { |
8198 | dev->mode_config.max_width = 8192; |
8168 | dev->mode_config.max_width = 8192; |
8199 | dev->mode_config.max_height = 8192; |
8169 | dev->mode_config.max_height = 8192; |
8200 | } |
8170 | } |
8201 | dev->mode_config.fb_base = get_bus_addr(); |
8171 | dev->mode_config.fb_base = get_bus_addr(); |
8202 | 8172 | ||
8203 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
8173 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
8204 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
8174 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
8205 | 8175 | ||
8206 | for (i = 0; i < dev_priv->num_pipe; i++) { |
8176 | for (i = 0; i < dev_priv->num_pipe; i++) { |
8207 | intel_crtc_init(dev, i); |
8177 | intel_crtc_init(dev, i); |
8208 | ret = intel_plane_init(dev, i); |
8178 | ret = intel_plane_init(dev, i); |
8209 | if (ret) |
8179 | if (ret) |
8210 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
8180 | DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); |
8211 | } |
8181 | } |
8212 | 8182 | ||
8213 | /* Just disable it once at startup */ |
8183 | /* Just disable it once at startup */ |
8214 | i915_disable_vga(dev); |
8184 | i915_disable_vga(dev); |
8215 | intel_setup_outputs(dev); |
8185 | intel_setup_outputs(dev); |
8216 | 8186 | ||
8217 | intel_init_clock_gating(dev); |
8187 | intel_init_clock_gating(dev); |
8218 | 8188 | ||
8219 | if (IS_IRONLAKE_M(dev)) { |
8189 | if (IS_IRONLAKE_M(dev)) { |
8220 | ironlake_enable_drps(dev); |
8190 | ironlake_enable_drps(dev); |
8221 | intel_init_emon(dev); |
8191 | intel_init_emon(dev); |
8222 | } |
8192 | } |
8223 | 8193 | ||
8224 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
8194 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
8225 | gen6_enable_rps(dev_priv); |
8195 | gen6_enable_rps(dev_priv); |
8226 | gen6_update_ring_freq(dev_priv); |
8196 | gen6_update_ring_freq(dev_priv); |
8227 | } |
8197 | } |
8228 | 8198 | ||
8229 | // INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
8199 | // INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
8230 | // setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
8200 | // setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, |
8231 | // (unsigned long)dev); |
8201 | // (unsigned long)dev); |
8232 | } |
8202 | } |
8233 | 8203 | ||
8234 | void intel_modeset_gem_init(struct drm_device *dev) |
8204 | void intel_modeset_gem_init(struct drm_device *dev) |
8235 | { |
8205 | { |
8236 | if (IS_IRONLAKE_M(dev)) |
8206 | if (IS_IRONLAKE_M(dev)) |
8237 | ironlake_enable_rc6(dev); |
8207 | ironlake_enable_rc6(dev); |
8238 | 8208 | ||
8239 | // intel_setup_overlay(dev); |
8209 | // intel_setup_overlay(dev); |
8240 | } |
8210 | } |
8241 | 8211 | ||
8242 | 8212 | ||
8243 | /* |
8213 | /* |
8244 | * Return which encoder is currently attached for connector. |
8214 | * Return which encoder is currently attached for connector. |
8245 | */ |
8215 | */ |
8246 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
8216 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
8247 | { |
8217 | { |
8248 | return &intel_attached_encoder(connector)->base; |
8218 | return &intel_attached_encoder(connector)->base; |
8249 | } |
8219 | } |
8250 | 8220 | ||
8251 | void intel_connector_attach_encoder(struct intel_connector *connector, |
8221 | void intel_connector_attach_encoder(struct intel_connector *connector, |
8252 | struct intel_encoder *encoder) |
8222 | struct intel_encoder *encoder) |
8253 | { |
8223 | { |
8254 | connector->encoder = encoder; |
8224 | connector->encoder = encoder; |
8255 | drm_mode_connector_attach_encoder(&connector->base, |
8225 | drm_mode_connector_attach_encoder(&connector->base, |
8256 | &encoder->base); |
8226 | &encoder->base); |
8257 | }>5); |
8227 | }>5); |
8258 | //><5); |
8228 | //><5); |
8259 | //>>><>><>>31))><31))>><>><>>><>><>>><>><>><>>>>> |
8229 | //>>><>><>>31))><31))>><>><>>><>><>>><>><>><>>>>> |
8260 | >< |
8230 | >< |
8261 | > |
8231 | > |
8262 | >< |
8232 | >< |
8263 | >><>><>><>><>>>>>>><>>>>>=>>>=>=>=>=>>>>>>>>>>=>=>>>>>>>>> |
8233 | >><>><>><>><>>>>>>><>>>>>=>>>=>=>=>=>>>>>>>>>>=>=>>>>>>>>> |