Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6937 | Rev 7144 | ||
---|---|---|---|
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
1 | /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
2 | */ |
2 | */ |
3 | /* |
3 | /* |
4 | * |
4 | * |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. |
6 | * All Rights Reserved. |
7 | * |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
9 | * copy of this software and associated documentation files (the |
9 | * copy of this software and associated documentation files (the |
10 | * "Software"), to deal in the Software without restriction, including |
10 | * "Software"), to deal in the Software without restriction, including |
11 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * without limitation the rights to use, copy, modify, merge, publish, |
12 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * distribute, sub license, and/or sell copies of the Software, and to |
13 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * permit persons to whom the Software is furnished to do so, subject to |
14 | * the following conditions: |
14 | * the following conditions: |
15 | * |
15 | * |
16 | * The above copyright notice and this permission notice (including the |
16 | * The above copyright notice and this permission notice (including the |
17 | * next paragraph) shall be included in all copies or substantial portions |
17 | * next paragraph) shall be included in all copies or substantial portions |
18 | * of the Software. |
18 | * of the Software. |
19 | * |
19 | * |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
27 | * |
27 | * |
28 | */ |
28 | */ |
29 | 29 | ||
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include |
33 | #include |
34 | #include "i915_drv.h" |
34 | #include "i915_drv.h" |
35 | #include "i915_trace.h" |
35 | #include "i915_trace.h" |
36 | #include "intel_drv.h" |
36 | #include "intel_drv.h" |
37 | 37 | ||
38 | #include |
38 | #include |
39 | #include |
39 | #include |
- | 40 | #include |
|
- | 41 | #include |
|
40 | #include |
42 | #include |
41 | 43 | ||
42 | #include |
44 | #include |
43 | 45 | ||
44 | int init_display_kms(struct drm_device *dev); |
46 | int init_display_kms(struct drm_device *dev); |
45 | 47 | ||
46 | extern int intel_agp_enabled; |
48 | extern int intel_agp_enabled; |
47 | 49 | ||
48 | static struct drm_driver driver; |
50 | static struct drm_driver driver; |
49 | 51 | ||
50 | #define GEN_DEFAULT_PIPEOFFSETS \ |
52 | #define GEN_DEFAULT_PIPEOFFSETS \ |
51 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
53 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
52 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ |
54 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ |
53 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
55 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
54 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ |
56 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ |
55 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } |
57 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } |
56 | 58 | ||
57 | #define GEN_CHV_PIPEOFFSETS \ |
59 | #define GEN_CHV_PIPEOFFSETS \ |
58 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
60 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ |
59 | CHV_PIPE_C_OFFSET }, \ |
61 | CHV_PIPE_C_OFFSET }, \ |
60 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
62 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ |
61 | CHV_TRANSCODER_C_OFFSET, }, \ |
63 | CHV_TRANSCODER_C_OFFSET, }, \ |
62 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ |
64 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ |
63 | CHV_PALETTE_C_OFFSET } |
65 | CHV_PALETTE_C_OFFSET } |
64 | 66 | ||
65 | #define CURSOR_OFFSETS \ |
67 | #define CURSOR_OFFSETS \ |
66 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } |
68 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } |
67 | 69 | ||
68 | #define IVB_CURSOR_OFFSETS \ |
70 | #define IVB_CURSOR_OFFSETS \ |
69 | .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } |
71 | .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } |
70 | 72 | ||
71 | 73 | ||
72 | 74 | ||
73 | 75 | ||
74 | static const struct intel_device_info intel_i915g_info = { |
76 | static const struct intel_device_info intel_i915g_info = { |
75 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
77 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
76 | .has_overlay = 1, .overlay_needs_physical = 1, |
78 | .has_overlay = 1, .overlay_needs_physical = 1, |
77 | .ring_mask = RENDER_RING, |
79 | .ring_mask = RENDER_RING, |
78 | GEN_DEFAULT_PIPEOFFSETS, |
80 | GEN_DEFAULT_PIPEOFFSETS, |
79 | CURSOR_OFFSETS, |
81 | CURSOR_OFFSETS, |
80 | }; |
82 | }; |
81 | static const struct intel_device_info intel_i915gm_info = { |
83 | static const struct intel_device_info intel_i915gm_info = { |
82 | .gen = 3, .is_mobile = 1, .num_pipes = 2, |
84 | .gen = 3, .is_mobile = 1, .num_pipes = 2, |
83 | .cursor_needs_physical = 1, |
85 | .cursor_needs_physical = 1, |
84 | .has_overlay = 1, .overlay_needs_physical = 1, |
86 | .has_overlay = 1, .overlay_needs_physical = 1, |
85 | .supports_tv = 1, |
87 | .supports_tv = 1, |
86 | .has_fbc = 1, |
88 | .has_fbc = 1, |
87 | .ring_mask = RENDER_RING, |
89 | .ring_mask = RENDER_RING, |
88 | GEN_DEFAULT_PIPEOFFSETS, |
90 | GEN_DEFAULT_PIPEOFFSETS, |
89 | CURSOR_OFFSETS, |
91 | CURSOR_OFFSETS, |
90 | }; |
92 | }; |
91 | static const struct intel_device_info intel_i945g_info = { |
93 | static const struct intel_device_info intel_i945g_info = { |
92 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
94 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
93 | .has_overlay = 1, .overlay_needs_physical = 1, |
95 | .has_overlay = 1, .overlay_needs_physical = 1, |
94 | .ring_mask = RENDER_RING, |
96 | .ring_mask = RENDER_RING, |
95 | GEN_DEFAULT_PIPEOFFSETS, |
97 | GEN_DEFAULT_PIPEOFFSETS, |
96 | CURSOR_OFFSETS, |
98 | CURSOR_OFFSETS, |
97 | }; |
99 | }; |
98 | static const struct intel_device_info intel_i945gm_info = { |
100 | static const struct intel_device_info intel_i945gm_info = { |
99 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
101 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
100 | .has_hotplug = 1, .cursor_needs_physical = 1, |
102 | .has_hotplug = 1, .cursor_needs_physical = 1, |
101 | .has_overlay = 1, .overlay_needs_physical = 1, |
103 | .has_overlay = 1, .overlay_needs_physical = 1, |
102 | .supports_tv = 1, |
104 | .supports_tv = 1, |
103 | .has_fbc = 1, |
105 | .has_fbc = 1, |
104 | .ring_mask = RENDER_RING, |
106 | .ring_mask = RENDER_RING, |
105 | GEN_DEFAULT_PIPEOFFSETS, |
107 | GEN_DEFAULT_PIPEOFFSETS, |
106 | CURSOR_OFFSETS, |
108 | CURSOR_OFFSETS, |
107 | }; |
109 | }; |
108 | 110 | ||
109 | static const struct intel_device_info intel_i965g_info = { |
111 | static const struct intel_device_info intel_i965g_info = { |
110 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, |
112 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, |
111 | .has_hotplug = 1, |
113 | .has_hotplug = 1, |
112 | .has_overlay = 1, |
114 | .has_overlay = 1, |
113 | .ring_mask = RENDER_RING, |
115 | .ring_mask = RENDER_RING, |
114 | GEN_DEFAULT_PIPEOFFSETS, |
116 | GEN_DEFAULT_PIPEOFFSETS, |
115 | CURSOR_OFFSETS, |
117 | CURSOR_OFFSETS, |
116 | }; |
118 | }; |
117 | 119 | ||
118 | static const struct intel_device_info intel_i965gm_info = { |
120 | static const struct intel_device_info intel_i965gm_info = { |
119 | .gen = 4, .is_crestline = 1, .num_pipes = 2, |
121 | .gen = 4, .is_crestline = 1, .num_pipes = 2, |
120 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
122 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
121 | .has_overlay = 1, |
123 | .has_overlay = 1, |
122 | .supports_tv = 1, |
124 | .supports_tv = 1, |
123 | .ring_mask = RENDER_RING, |
125 | .ring_mask = RENDER_RING, |
124 | GEN_DEFAULT_PIPEOFFSETS, |
126 | GEN_DEFAULT_PIPEOFFSETS, |
125 | CURSOR_OFFSETS, |
127 | CURSOR_OFFSETS, |
126 | }; |
128 | }; |
127 | 129 | ||
128 | static const struct intel_device_info intel_g33_info = { |
130 | static const struct intel_device_info intel_g33_info = { |
129 | .gen = 3, .is_g33 = 1, .num_pipes = 2, |
131 | .gen = 3, .is_g33 = 1, .num_pipes = 2, |
130 | .need_gfx_hws = 1, .has_hotplug = 1, |
132 | .need_gfx_hws = 1, .has_hotplug = 1, |
131 | .has_overlay = 1, |
133 | .has_overlay = 1, |
132 | .ring_mask = RENDER_RING, |
134 | .ring_mask = RENDER_RING, |
133 | GEN_DEFAULT_PIPEOFFSETS, |
135 | GEN_DEFAULT_PIPEOFFSETS, |
134 | CURSOR_OFFSETS, |
136 | CURSOR_OFFSETS, |
135 | }; |
137 | }; |
136 | 138 | ||
137 | static const struct intel_device_info intel_g45_info = { |
139 | static const struct intel_device_info intel_g45_info = { |
138 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
140 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
139 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
141 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
140 | .ring_mask = RENDER_RING | BSD_RING, |
142 | .ring_mask = RENDER_RING | BSD_RING, |
141 | GEN_DEFAULT_PIPEOFFSETS, |
143 | GEN_DEFAULT_PIPEOFFSETS, |
142 | CURSOR_OFFSETS, |
144 | CURSOR_OFFSETS, |
143 | }; |
145 | }; |
144 | 146 | ||
145 | static const struct intel_device_info intel_gm45_info = { |
147 | static const struct intel_device_info intel_gm45_info = { |
146 | .gen = 4, .is_g4x = 1, .num_pipes = 2, |
148 | .gen = 4, .is_g4x = 1, .num_pipes = 2, |
147 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
149 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
148 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
150 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
149 | .supports_tv = 1, |
151 | .supports_tv = 1, |
150 | .ring_mask = RENDER_RING | BSD_RING, |
152 | .ring_mask = RENDER_RING | BSD_RING, |
151 | GEN_DEFAULT_PIPEOFFSETS, |
153 | GEN_DEFAULT_PIPEOFFSETS, |
152 | CURSOR_OFFSETS, |
154 | CURSOR_OFFSETS, |
153 | }; |
155 | }; |
154 | 156 | ||
155 | static const struct intel_device_info intel_pineview_info = { |
157 | static const struct intel_device_info intel_pineview_info = { |
156 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, |
158 | .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, |
157 | .need_gfx_hws = 1, .has_hotplug = 1, |
159 | .need_gfx_hws = 1, .has_hotplug = 1, |
158 | .has_overlay = 1, |
160 | .has_overlay = 1, |
159 | GEN_DEFAULT_PIPEOFFSETS, |
161 | GEN_DEFAULT_PIPEOFFSETS, |
160 | CURSOR_OFFSETS, |
162 | CURSOR_OFFSETS, |
161 | }; |
163 | }; |
162 | 164 | ||
163 | static const struct intel_device_info intel_ironlake_d_info = { |
165 | static const struct intel_device_info intel_ironlake_d_info = { |
164 | .gen = 5, .num_pipes = 2, |
166 | .gen = 5, .num_pipes = 2, |
165 | .need_gfx_hws = 1, .has_hotplug = 1, |
167 | .need_gfx_hws = 1, .has_hotplug = 1, |
166 | .ring_mask = RENDER_RING | BSD_RING, |
168 | .ring_mask = RENDER_RING | BSD_RING, |
167 | GEN_DEFAULT_PIPEOFFSETS, |
169 | GEN_DEFAULT_PIPEOFFSETS, |
168 | CURSOR_OFFSETS, |
170 | CURSOR_OFFSETS, |
169 | }; |
171 | }; |
170 | 172 | ||
171 | static const struct intel_device_info intel_ironlake_m_info = { |
173 | static const struct intel_device_info intel_ironlake_m_info = { |
172 | .gen = 5, .is_mobile = 1, .num_pipes = 2, |
174 | .gen = 5, .is_mobile = 1, .num_pipes = 2, |
173 | .need_gfx_hws = 1, .has_hotplug = 1, |
175 | .need_gfx_hws = 1, .has_hotplug = 1, |
174 | .has_fbc = 1, |
176 | .has_fbc = 1, |
175 | .ring_mask = RENDER_RING | BSD_RING, |
177 | .ring_mask = RENDER_RING | BSD_RING, |
176 | GEN_DEFAULT_PIPEOFFSETS, |
178 | GEN_DEFAULT_PIPEOFFSETS, |
177 | CURSOR_OFFSETS, |
179 | CURSOR_OFFSETS, |
178 | }; |
180 | }; |
179 | 181 | ||
180 | static const struct intel_device_info intel_sandybridge_d_info = { |
182 | static const struct intel_device_info intel_sandybridge_d_info = { |
181 | .gen = 6, .num_pipes = 2, |
183 | .gen = 6, .num_pipes = 2, |
182 | .need_gfx_hws = 1, .has_hotplug = 1, |
184 | .need_gfx_hws = 1, .has_hotplug = 1, |
183 | .has_fbc = 1, |
185 | .has_fbc = 1, |
184 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
186 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
185 | .has_llc = 1, |
187 | .has_llc = 1, |
186 | GEN_DEFAULT_PIPEOFFSETS, |
188 | GEN_DEFAULT_PIPEOFFSETS, |
187 | CURSOR_OFFSETS, |
189 | CURSOR_OFFSETS, |
188 | }; |
190 | }; |
189 | 191 | ||
190 | static const struct intel_device_info intel_sandybridge_m_info = { |
192 | static const struct intel_device_info intel_sandybridge_m_info = { |
191 | .gen = 6, .is_mobile = 1, .num_pipes = 2, |
193 | .gen = 6, .is_mobile = 1, .num_pipes = 2, |
192 | .need_gfx_hws = 1, .has_hotplug = 1, |
194 | .need_gfx_hws = 1, .has_hotplug = 1, |
193 | .has_fbc = 1, |
195 | .has_fbc = 1, |
194 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
196 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
195 | .has_llc = 1, |
197 | .has_llc = 1, |
196 | GEN_DEFAULT_PIPEOFFSETS, |
198 | GEN_DEFAULT_PIPEOFFSETS, |
197 | CURSOR_OFFSETS, |
199 | CURSOR_OFFSETS, |
198 | }; |
200 | }; |
199 | 201 | ||
200 | #define GEN7_FEATURES \ |
202 | #define GEN7_FEATURES \ |
201 | .gen = 7, .num_pipes = 3, \ |
203 | .gen = 7, .num_pipes = 3, \ |
202 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
204 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
203 | .has_fbc = 1, \ |
205 | .has_fbc = 1, \ |
204 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
206 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
205 | .has_llc = 1, \ |
207 | .has_llc = 1, \ |
206 | GEN_DEFAULT_PIPEOFFSETS, \ |
208 | GEN_DEFAULT_PIPEOFFSETS, \ |
207 | IVB_CURSOR_OFFSETS |
209 | IVB_CURSOR_OFFSETS |
208 | 210 | ||
209 | static const struct intel_device_info intel_ivybridge_d_info = { |
211 | static const struct intel_device_info intel_ivybridge_d_info = { |
210 | GEN7_FEATURES, |
212 | GEN7_FEATURES, |
211 | .is_ivybridge = 1, |
213 | .is_ivybridge = 1, |
212 | }; |
214 | }; |
213 | 215 | ||
214 | static const struct intel_device_info intel_ivybridge_m_info = { |
216 | static const struct intel_device_info intel_ivybridge_m_info = { |
215 | GEN7_FEATURES, |
217 | GEN7_FEATURES, |
216 | .is_ivybridge = 1, |
218 | .is_ivybridge = 1, |
217 | .is_mobile = 1, |
219 | .is_mobile = 1, |
218 | }; |
220 | }; |
219 | 221 | ||
220 | static const struct intel_device_info intel_ivybridge_q_info = { |
222 | static const struct intel_device_info intel_ivybridge_q_info = { |
221 | GEN7_FEATURES, |
223 | GEN7_FEATURES, |
222 | .is_ivybridge = 1, |
224 | .is_ivybridge = 1, |
223 | .num_pipes = 0, /* legal, last one wins */ |
225 | .num_pipes = 0, /* legal, last one wins */ |
224 | }; |
226 | }; |
225 | 227 | ||
226 | #define VLV_FEATURES \ |
228 | #define VLV_FEATURES \ |
227 | .gen = 7, .num_pipes = 2, \ |
229 | .gen = 7, .num_pipes = 2, \ |
228 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
230 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
229 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
231 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
230 | .display_mmio_offset = VLV_DISPLAY_BASE, \ |
232 | .display_mmio_offset = VLV_DISPLAY_BASE, \ |
231 | GEN_DEFAULT_PIPEOFFSETS, \ |
233 | GEN_DEFAULT_PIPEOFFSETS, \ |
232 | CURSOR_OFFSETS |
234 | CURSOR_OFFSETS |
233 | 235 | ||
234 | static const struct intel_device_info intel_valleyview_m_info = { |
236 | static const struct intel_device_info intel_valleyview_m_info = { |
235 | VLV_FEATURES, |
237 | VLV_FEATURES, |
236 | .is_valleyview = 1, |
238 | .is_valleyview = 1, |
237 | .is_mobile = 1, |
239 | .is_mobile = 1, |
238 | }; |
240 | }; |
239 | 241 | ||
240 | static const struct intel_device_info intel_valleyview_d_info = { |
242 | static const struct intel_device_info intel_valleyview_d_info = { |
241 | VLV_FEATURES, |
243 | VLV_FEATURES, |
242 | .is_valleyview = 1, |
244 | .is_valleyview = 1, |
243 | }; |
245 | }; |
244 | 246 | ||
245 | #define HSW_FEATURES \ |
247 | #define HSW_FEATURES \ |
246 | GEN7_FEATURES, \ |
248 | GEN7_FEATURES, \ |
247 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ |
249 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ |
248 | .has_ddi = 1, \ |
250 | .has_ddi = 1, \ |
249 | .has_fpga_dbg = 1 |
251 | .has_fpga_dbg = 1 |
250 | 252 | ||
251 | static const struct intel_device_info intel_haswell_d_info = { |
253 | static const struct intel_device_info intel_haswell_d_info = { |
252 | HSW_FEATURES, |
254 | HSW_FEATURES, |
253 | .is_haswell = 1, |
255 | .is_haswell = 1, |
254 | }; |
256 | }; |
255 | 257 | ||
256 | static const struct intel_device_info intel_haswell_m_info = { |
258 | static const struct intel_device_info intel_haswell_m_info = { |
257 | HSW_FEATURES, |
259 | HSW_FEATURES, |
258 | .is_haswell = 1, |
260 | .is_haswell = 1, |
259 | .is_mobile = 1, |
261 | .is_mobile = 1, |
260 | }; |
262 | }; |
261 | 263 | ||
262 | static const struct intel_device_info intel_broadwell_d_info = { |
264 | static const struct intel_device_info intel_broadwell_d_info = { |
263 | HSW_FEATURES, |
265 | HSW_FEATURES, |
264 | .gen = 8, |
266 | .gen = 8, |
265 | }; |
267 | }; |
266 | 268 | ||
267 | static const struct intel_device_info intel_broadwell_m_info = { |
269 | static const struct intel_device_info intel_broadwell_m_info = { |
268 | HSW_FEATURES, |
270 | HSW_FEATURES, |
269 | .gen = 8, .is_mobile = 1, |
271 | .gen = 8, .is_mobile = 1, |
270 | }; |
272 | }; |
271 | 273 | ||
272 | static const struct intel_device_info intel_broadwell_gt3d_info = { |
274 | static const struct intel_device_info intel_broadwell_gt3d_info = { |
273 | HSW_FEATURES, |
275 | HSW_FEATURES, |
274 | .gen = 8, |
276 | .gen = 8, |
275 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
277 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
276 | }; |
278 | }; |
277 | 279 | ||
278 | static const struct intel_device_info intel_broadwell_gt3m_info = { |
280 | static const struct intel_device_info intel_broadwell_gt3m_info = { |
279 | HSW_FEATURES, |
281 | HSW_FEATURES, |
280 | .gen = 8, .is_mobile = 1, |
282 | .gen = 8, .is_mobile = 1, |
281 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
283 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
282 | }; |
284 | }; |
283 | 285 | ||
284 | static const struct intel_device_info intel_cherryview_info = { |
286 | static const struct intel_device_info intel_cherryview_info = { |
285 | .gen = 8, .num_pipes = 3, |
287 | .gen = 8, .num_pipes = 3, |
286 | .need_gfx_hws = 1, .has_hotplug = 1, |
288 | .need_gfx_hws = 1, .has_hotplug = 1, |
287 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
289 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
288 | .is_cherryview = 1, |
290 | .is_cherryview = 1, |
289 | .display_mmio_offset = VLV_DISPLAY_BASE, |
291 | .display_mmio_offset = VLV_DISPLAY_BASE, |
290 | GEN_CHV_PIPEOFFSETS, |
292 | GEN_CHV_PIPEOFFSETS, |
291 | CURSOR_OFFSETS, |
293 | CURSOR_OFFSETS, |
292 | }; |
294 | }; |
293 | 295 | ||
294 | static const struct intel_device_info intel_skylake_info = { |
296 | static const struct intel_device_info intel_skylake_info = { |
295 | HSW_FEATURES, |
297 | HSW_FEATURES, |
296 | .is_skylake = 1, |
298 | .is_skylake = 1, |
297 | .gen = 9, |
299 | .gen = 9, |
298 | }; |
300 | }; |
299 | 301 | ||
300 | static const struct intel_device_info intel_skylake_gt3_info = { |
302 | static const struct intel_device_info intel_skylake_gt3_info = { |
301 | HSW_FEATURES, |
303 | HSW_FEATURES, |
302 | .is_skylake = 1, |
304 | .is_skylake = 1, |
303 | .gen = 9, |
305 | .gen = 9, |
304 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
306 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
305 | }; |
307 | }; |
306 | 308 | ||
307 | static const struct intel_device_info intel_broxton_info = { |
309 | static const struct intel_device_info intel_broxton_info = { |
308 | .is_preliminary = 1, |
310 | .is_preliminary = 1, |
309 | .is_broxton = 1, |
311 | .is_broxton = 1, |
310 | .gen = 9, |
312 | .gen = 9, |
311 | .need_gfx_hws = 1, .has_hotplug = 1, |
313 | .need_gfx_hws = 1, .has_hotplug = 1, |
312 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
314 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
313 | .num_pipes = 3, |
315 | .num_pipes = 3, |
314 | .has_ddi = 1, |
316 | .has_ddi = 1, |
315 | .has_fpga_dbg = 1, |
317 | .has_fpga_dbg = 1, |
316 | .has_fbc = 1, |
318 | .has_fbc = 1, |
317 | GEN_DEFAULT_PIPEOFFSETS, |
319 | GEN_DEFAULT_PIPEOFFSETS, |
318 | IVB_CURSOR_OFFSETS, |
320 | IVB_CURSOR_OFFSETS, |
319 | }; |
321 | }; |
320 | 322 | ||
321 | static const struct intel_device_info intel_kabylake_info = { |
323 | static const struct intel_device_info intel_kabylake_info = { |
322 | HSW_FEATURES, |
324 | HSW_FEATURES, |
323 | .is_preliminary = 1, |
325 | .is_preliminary = 1, |
324 | .is_kabylake = 1, |
326 | .is_kabylake = 1, |
325 | .gen = 9, |
327 | .gen = 9, |
326 | }; |
328 | }; |
327 | 329 | ||
328 | static const struct intel_device_info intel_kabylake_gt3_info = { |
330 | static const struct intel_device_info intel_kabylake_gt3_info = { |
329 | HSW_FEATURES, |
331 | HSW_FEATURES, |
330 | .is_preliminary = 1, |
332 | .is_preliminary = 1, |
331 | .is_kabylake = 1, |
333 | .is_kabylake = 1, |
332 | .gen = 9, |
334 | .gen = 9, |
333 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
335 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, |
334 | }; |
336 | }; |
335 | 337 | ||
336 | /* |
338 | /* |
337 | * Make sure any device matches here are from most specific to most |
339 | * Make sure any device matches here are from most specific to most |
338 | * general. For example, since the Quanta match is based on the subsystem |
340 | * general. For example, since the Quanta match is based on the subsystem |
339 | * and subvendor IDs, we need it to come before the more general IVB |
341 | * and subvendor IDs, we need it to come before the more general IVB |
340 | * PCI ID matches, otherwise we'll use the wrong info struct above. |
342 | * PCI ID matches, otherwise we'll use the wrong info struct above. |
341 | */ |
343 | */ |
342 | static const struct pci_device_id pciidlist[] = { |
344 | static const struct pci_device_id pciidlist[] = { |
343 | INTEL_I915G_IDS(&intel_i915g_info), |
345 | INTEL_I915G_IDS(&intel_i915g_info), |
344 | INTEL_I915GM_IDS(&intel_i915gm_info), |
346 | INTEL_I915GM_IDS(&intel_i915gm_info), |
345 | INTEL_I945G_IDS(&intel_i945g_info), |
347 | INTEL_I945G_IDS(&intel_i945g_info), |
346 | INTEL_I945GM_IDS(&intel_i945gm_info), |
348 | INTEL_I945GM_IDS(&intel_i945gm_info), |
347 | INTEL_I965G_IDS(&intel_i965g_info), |
349 | INTEL_I965G_IDS(&intel_i965g_info), |
348 | INTEL_G33_IDS(&intel_g33_info), |
350 | INTEL_G33_IDS(&intel_g33_info), |
349 | INTEL_I965GM_IDS(&intel_i965gm_info), |
351 | INTEL_I965GM_IDS(&intel_i965gm_info), |
350 | INTEL_GM45_IDS(&intel_gm45_info), |
352 | INTEL_GM45_IDS(&intel_gm45_info), |
351 | INTEL_G45_IDS(&intel_g45_info), |
353 | INTEL_G45_IDS(&intel_g45_info), |
352 | INTEL_PINEVIEW_IDS(&intel_pineview_info), |
354 | INTEL_PINEVIEW_IDS(&intel_pineview_info), |
353 | INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), |
355 | INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), |
354 | INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), |
356 | INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), |
355 | INTEL_SNB_D_IDS(&intel_sandybridge_d_info), |
357 | INTEL_SNB_D_IDS(&intel_sandybridge_d_info), |
356 | INTEL_SNB_M_IDS(&intel_sandybridge_m_info), |
358 | INTEL_SNB_M_IDS(&intel_sandybridge_m_info), |
357 | INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ |
359 | INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ |
358 | INTEL_IVB_M_IDS(&intel_ivybridge_m_info), |
360 | INTEL_IVB_M_IDS(&intel_ivybridge_m_info), |
359 | INTEL_IVB_D_IDS(&intel_ivybridge_d_info), |
361 | INTEL_IVB_D_IDS(&intel_ivybridge_d_info), |
360 | INTEL_HSW_D_IDS(&intel_haswell_d_info), |
362 | INTEL_HSW_D_IDS(&intel_haswell_d_info), |
361 | INTEL_HSW_M_IDS(&intel_haswell_m_info), |
363 | INTEL_HSW_M_IDS(&intel_haswell_m_info), |
362 | INTEL_VLV_M_IDS(&intel_valleyview_m_info), |
364 | INTEL_VLV_M_IDS(&intel_valleyview_m_info), |
363 | INTEL_VLV_D_IDS(&intel_valleyview_d_info), |
365 | INTEL_VLV_D_IDS(&intel_valleyview_d_info), |
364 | INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), |
366 | INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), |
365 | INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), |
367 | INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), |
366 | INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), |
368 | INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), |
367 | INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), |
369 | INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), |
368 | INTEL_CHV_IDS(&intel_cherryview_info), |
370 | INTEL_CHV_IDS(&intel_cherryview_info), |
369 | INTEL_SKL_GT1_IDS(&intel_skylake_info), |
371 | INTEL_SKL_GT1_IDS(&intel_skylake_info), |
370 | INTEL_SKL_GT2_IDS(&intel_skylake_info), |
372 | INTEL_SKL_GT2_IDS(&intel_skylake_info), |
371 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), |
373 | INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), |
372 | INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), |
374 | INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), |
373 | INTEL_BXT_IDS(&intel_broxton_info), |
375 | INTEL_BXT_IDS(&intel_broxton_info), |
374 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), |
376 | INTEL_KBL_GT1_IDS(&intel_kabylake_info), |
375 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), |
377 | INTEL_KBL_GT2_IDS(&intel_kabylake_info), |
376 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), |
378 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), |
377 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), |
379 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), |
378 | {0, 0, 0} |
380 | {0, 0, 0} |
379 | }; |
381 | }; |
380 | 382 | ||
381 | 383 | ||
382 | 384 | ||
383 | static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) |
385 | static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) |
384 | { |
386 | { |
385 | enum intel_pch ret = PCH_NOP; |
387 | enum intel_pch ret = PCH_NOP; |
386 | 388 | ||
387 | /* |
389 | /* |
388 | * In a virtualized passthrough environment we can be in a |
390 | * In a virtualized passthrough environment we can be in a |
389 | * setup where the ISA bridge is not able to be passed through. |
391 | * setup where the ISA bridge is not able to be passed through. |
390 | * In this case, a south bridge can be emulated and we have to |
392 | * In this case, a south bridge can be emulated and we have to |
391 | * make an educated guess as to which PCH is really there. |
393 | * make an educated guess as to which PCH is really there. |
392 | */ |
394 | */ |
393 | 395 | ||
394 | if (IS_GEN5(dev)) { |
396 | if (IS_GEN5(dev)) { |
395 | ret = PCH_IBX; |
397 | ret = PCH_IBX; |
396 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); |
398 | DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); |
397 | } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { |
399 | } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { |
398 | ret = PCH_CPT; |
400 | ret = PCH_CPT; |
399 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); |
401 | DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); |
400 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
402 | } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { |
401 | ret = PCH_LPT; |
403 | ret = PCH_LPT; |
402 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); |
404 | DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); |
403 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
405 | } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { |
404 | ret = PCH_SPT; |
406 | ret = PCH_SPT; |
405 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); |
407 | DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); |
406 | } |
408 | } |
407 | 409 | ||
408 | return ret; |
410 | return ret; |
409 | } |
411 | } |
410 | 412 | ||
411 | void intel_detect_pch(struct drm_device *dev) |
413 | void intel_detect_pch(struct drm_device *dev) |
412 | { |
414 | { |
413 | struct drm_i915_private *dev_priv = dev->dev_private; |
415 | struct drm_i915_private *dev_priv = dev->dev_private; |
414 | struct pci_dev *pch = NULL; |
416 | struct pci_dev *pch = NULL; |
415 | 417 | ||
416 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
418 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting |
417 | * (which really amounts to a PCH but no South Display). |
419 | * (which really amounts to a PCH but no South Display). |
418 | */ |
420 | */ |
419 | if (INTEL_INFO(dev)->num_pipes == 0) { |
421 | if (INTEL_INFO(dev)->num_pipes == 0) { |
420 | dev_priv->pch_type = PCH_NOP; |
422 | dev_priv->pch_type = PCH_NOP; |
421 | return; |
423 | return; |
422 | } |
424 | } |
423 | 425 | ||
424 | /* |
426 | /* |
425 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
427 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
426 | * make graphics device passthrough work easy for VMM, that only |
428 | * make graphics device passthrough work easy for VMM, that only |
427 | * need to expose ISA bridge to let driver know the real hardware |
429 | * need to expose ISA bridge to let driver know the real hardware |
428 | * underneath. This is a requirement from virtualization team. |
430 | * underneath. This is a requirement from virtualization team. |
429 | * |
431 | * |
430 | * In some virtualized environments (e.g. XEN), there is irrelevant |
432 | * In some virtualized environments (e.g. XEN), there is irrelevant |
431 | * ISA bridge in the system. To work reliably, we should scan trhough |
433 | * ISA bridge in the system. To work reliably, we should scan trhough |
432 | * all the ISA bridge devices and check for the first match, instead |
434 | * all the ISA bridge devices and check for the first match, instead |
433 | * of only checking the first one. |
435 | * of only checking the first one. |
434 | */ |
436 | */ |
435 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
437 | while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { |
436 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
438 | if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
437 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
439 | unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
438 | dev_priv->pch_id = id; |
440 | dev_priv->pch_id = id; |
439 | 441 | ||
440 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
442 | if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
441 | dev_priv->pch_type = PCH_IBX; |
443 | dev_priv->pch_type = PCH_IBX; |
442 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
444 | DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
443 | WARN_ON(!IS_GEN5(dev)); |
445 | WARN_ON(!IS_GEN5(dev)); |
444 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
446 | } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
445 | dev_priv->pch_type = PCH_CPT; |
447 | dev_priv->pch_type = PCH_CPT; |
446 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
448 | DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
447 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
449 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
448 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
450 | } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
449 | /* PantherPoint is CPT compatible */ |
451 | /* PantherPoint is CPT compatible */ |
450 | dev_priv->pch_type = PCH_CPT; |
452 | dev_priv->pch_type = PCH_CPT; |
451 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); |
453 | DRM_DEBUG_KMS("Found PantherPoint PCH\n"); |
452 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
454 | WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); |
453 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
455 | } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
454 | dev_priv->pch_type = PCH_LPT; |
456 | dev_priv->pch_type = PCH_LPT; |
455 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
457 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
456 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
458 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
457 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); |
459 | WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); |
458 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
460 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
459 | dev_priv->pch_type = PCH_LPT; |
461 | dev_priv->pch_type = PCH_LPT; |
460 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
462 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
461 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
463 | WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); |
462 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); |
464 | WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); |
463 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
465 | } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { |
464 | dev_priv->pch_type = PCH_SPT; |
466 | dev_priv->pch_type = PCH_SPT; |
465 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
467 | DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); |
466 | WARN_ON(!IS_SKYLAKE(dev) && |
468 | WARN_ON(!IS_SKYLAKE(dev) && |
467 | !IS_KABYLAKE(dev)); |
469 | !IS_KABYLAKE(dev)); |
468 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { |
470 | } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { |
469 | dev_priv->pch_type = PCH_SPT; |
471 | dev_priv->pch_type = PCH_SPT; |
470 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); |
472 | DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); |
471 | WARN_ON(!IS_SKYLAKE(dev) && |
473 | WARN_ON(!IS_SKYLAKE(dev) && |
472 | !IS_KABYLAKE(dev)); |
474 | !IS_KABYLAKE(dev)); |
473 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
475 | } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || |
474 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
476 | ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && |
475 | pch->subsystem_vendor == 0x1af4 && |
477 | pch->subsystem_vendor == 0x1af4 && |
476 | pch->subsystem_device == 0x1100)) { |
478 | pch->subsystem_device == 0x1100)) { |
477 | dev_priv->pch_type = intel_virt_detect_pch(dev); |
479 | dev_priv->pch_type = intel_virt_detect_pch(dev); |
478 | } else |
480 | } else |
479 | continue; |
481 | continue; |
480 | 482 | ||
481 | break; |
483 | break; |
482 | } |
484 | } |
483 | } |
485 | } |
484 | if (!pch) |
486 | if (!pch) |
485 | DRM_DEBUG_KMS("No PCH found.\n"); |
487 | DRM_DEBUG_KMS("No PCH found.\n"); |
486 | 488 | ||
487 | // pci_dev_put(pch); |
489 | // pci_dev_put(pch); |
488 | } |
490 | } |
489 | 491 | ||
490 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
492 | bool i915_semaphore_is_enabled(struct drm_device *dev) |
491 | { |
493 | { |
492 | if (INTEL_INFO(dev)->gen < 6) |
494 | if (INTEL_INFO(dev)->gen < 6) |
493 | return false; |
495 | return false; |
494 | 496 | ||
495 | if (i915.semaphores >= 0) |
497 | if (i915.semaphores >= 0) |
496 | return i915.semaphores; |
498 | return i915.semaphores; |
497 | 499 | ||
498 | /* TODO: make semaphores and Execlists play nicely together */ |
500 | /* TODO: make semaphores and Execlists play nicely together */ |
499 | if (i915.enable_execlists) |
501 | if (i915.enable_execlists) |
500 | return false; |
502 | return false; |
501 | 503 | ||
502 | /* Until we get further testing... */ |
504 | /* Until we get further testing... */ |
503 | if (IS_GEN8(dev)) |
505 | if (IS_GEN8(dev)) |
504 | return false; |
506 | return false; |
505 | 507 | ||
506 | #ifdef CONFIG_INTEL_IOMMU |
508 | #ifdef CONFIG_INTEL_IOMMU |
507 | /* Enable semaphores on SNB when IO remapping is off */ |
509 | /* Enable semaphores on SNB when IO remapping is off */ |
508 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
510 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
509 | return false; |
511 | return false; |
510 | #endif |
512 | #endif |
511 | 513 | ||
512 | return true; |
514 | return true; |
513 | } |
515 | } |
514 | 516 | ||
515 | #if 0 |
517 | #if 0 |
516 | 518 | ||
517 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
519 | static void intel_suspend_encoders(struct drm_i915_private *dev_priv) |
518 | { |
520 | { |
519 | struct drm_device *dev = dev_priv->dev; |
521 | struct drm_device *dev = dev_priv->dev; |
520 | struct intel_encoder *encoder; |
522 | struct intel_encoder *encoder; |
521 | 523 | ||
522 | drm_modeset_lock_all(dev); |
524 | drm_modeset_lock_all(dev); |
523 | for_each_intel_encoder(dev, encoder) |
525 | for_each_intel_encoder(dev, encoder) |
524 | if (encoder->suspend) |
526 | if (encoder->suspend) |
525 | encoder->suspend(encoder); |
527 | encoder->suspend(encoder); |
526 | drm_modeset_unlock_all(dev); |
528 | drm_modeset_unlock_all(dev); |
527 | } |
529 | } |
528 | 530 | ||
529 | static int intel_suspend_complete(struct drm_i915_private *dev_priv); |
531 | static int intel_suspend_complete(struct drm_i915_private *dev_priv); |
530 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
532 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
531 | bool rpm_resume); |
533 | bool rpm_resume); |
532 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv); |
534 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv); |
533 | 535 | ||
534 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
536 | static bool suspend_to_idle(struct drm_i915_private *dev_priv) |
535 | { |
537 | { |
536 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) |
538 | #if IS_ENABLED(CONFIG_ACPI_SLEEP) |
537 | if (acpi_target_system_state() < ACPI_STATE_S3) |
539 | if (acpi_target_system_state() < ACPI_STATE_S3) |
538 | return true; |
540 | return true; |
539 | #endif |
541 | #endif |
540 | return false; |
542 | return false; |
541 | } |
543 | } |
542 | 544 | ||
543 | static int i915_drm_suspend(struct drm_device *dev) |
545 | static int i915_drm_suspend(struct drm_device *dev) |
544 | { |
546 | { |
545 | struct drm_i915_private *dev_priv = dev->dev_private; |
547 | struct drm_i915_private *dev_priv = dev->dev_private; |
546 | pci_power_t opregion_target_state; |
548 | pci_power_t opregion_target_state; |
547 | int error; |
549 | int error; |
548 | 550 | ||
549 | /* ignore lid events during suspend */ |
551 | /* ignore lid events during suspend */ |
550 | mutex_lock(&dev_priv->modeset_restore_lock); |
552 | mutex_lock(&dev_priv->modeset_restore_lock); |
551 | dev_priv->modeset_restore = MODESET_SUSPENDED; |
553 | dev_priv->modeset_restore = MODESET_SUSPENDED; |
552 | mutex_unlock(&dev_priv->modeset_restore_lock); |
554 | mutex_unlock(&dev_priv->modeset_restore_lock); |
553 | 555 | ||
554 | disable_rpm_wakeref_asserts(dev_priv); |
556 | disable_rpm_wakeref_asserts(dev_priv); |
555 | 557 | ||
556 | /* We do a lot of poking in a lot of registers, make sure they work |
558 | /* We do a lot of poking in a lot of registers, make sure they work |
557 | * properly. */ |
559 | * properly. */ |
558 | intel_display_set_init_power(dev_priv, true); |
560 | intel_display_set_init_power(dev_priv, true); |
559 | 561 | ||
560 | drm_kms_helper_poll_disable(dev); |
562 | drm_kms_helper_poll_disable(dev); |
561 | 563 | ||
562 | pci_save_state(dev->pdev); |
564 | pci_save_state(dev->pdev); |
563 | 565 | ||
564 | error = i915_gem_suspend(dev); |
566 | error = i915_gem_suspend(dev); |
565 | if (error) { |
567 | if (error) { |
566 | dev_err(&dev->pdev->dev, |
568 | dev_err(&dev->pdev->dev, |
567 | "GEM idle failed, resume might fail\n"); |
569 | "GEM idle failed, resume might fail\n"); |
568 | goto out; |
570 | goto out; |
569 | } |
571 | } |
570 | 572 | ||
571 | intel_guc_suspend(dev); |
573 | intel_guc_suspend(dev); |
572 | 574 | ||
573 | intel_suspend_gt_powersave(dev); |
575 | intel_suspend_gt_powersave(dev); |
574 | - | ||
575 | /* |
- | |
576 | * Disable CRTCs directly since we want to preserve sw state |
- | |
577 | * for _thaw. Also, power gate the CRTC power wells. |
- | |
578 | */ |
- | |
579 | drm_modeset_lock_all(dev); |
576 | |
580 | intel_display_suspend(dev); |
- | |
581 | drm_modeset_unlock_all(dev); |
577 | intel_display_suspend(dev); |
582 | 578 | ||
583 | intel_dp_mst_suspend(dev); |
579 | intel_dp_mst_suspend(dev); |
584 | 580 | ||
585 | intel_runtime_pm_disable_interrupts(dev_priv); |
581 | intel_runtime_pm_disable_interrupts(dev_priv); |
586 | intel_hpd_cancel_work(dev_priv); |
582 | intel_hpd_cancel_work(dev_priv); |
587 | 583 | ||
588 | intel_suspend_encoders(dev_priv); |
584 | intel_suspend_encoders(dev_priv); |
589 | 585 | ||
590 | intel_suspend_hw(dev); |
586 | intel_suspend_hw(dev); |
591 | 587 | ||
592 | i915_gem_suspend_gtt_mappings(dev); |
588 | i915_gem_suspend_gtt_mappings(dev); |
593 | 589 | ||
594 | i915_save_state(dev); |
590 | i915_save_state(dev); |
595 | 591 | ||
596 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
592 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
597 | intel_opregion_notify_adapter(dev, opregion_target_state); |
593 | intel_opregion_notify_adapter(dev, opregion_target_state); |
598 | 594 | ||
599 | intel_uncore_forcewake_reset(dev, false); |
595 | intel_uncore_forcewake_reset(dev, false); |
600 | intel_opregion_fini(dev); |
596 | intel_opregion_fini(dev); |
601 | 597 | ||
602 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
598 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
603 | 599 | ||
604 | dev_priv->suspend_count++; |
600 | dev_priv->suspend_count++; |
605 | 601 | ||
606 | intel_display_set_init_power(dev_priv, false); |
602 | intel_display_set_init_power(dev_priv, false); |
607 | 603 | ||
608 | if (HAS_CSR(dev_priv)) |
604 | if (HAS_CSR(dev_priv)) |
609 | flush_work(&dev_priv->csr.work); |
605 | flush_work(&dev_priv->csr.work); |
610 | 606 | ||
611 | out: |
607 | out: |
612 | enable_rpm_wakeref_asserts(dev_priv); |
608 | enable_rpm_wakeref_asserts(dev_priv); |
613 | 609 | ||
614 | return error; |
610 | return error; |
615 | } |
611 | } |
616 | 612 | ||
617 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
613 | static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) |
618 | { |
614 | { |
619 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
615 | struct drm_i915_private *dev_priv = drm_dev->dev_private; |
620 | bool fw_csr; |
616 | bool fw_csr; |
621 | int ret; |
617 | int ret; |
622 | 618 | ||
623 | disable_rpm_wakeref_asserts(dev_priv); |
619 | disable_rpm_wakeref_asserts(dev_priv); |
624 | 620 | ||
625 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
621 | fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; |
626 | /* |
622 | /* |
627 | * In case of firmware assisted context save/restore don't manually |
623 | * In case of firmware assisted context save/restore don't manually |
628 | * deinit the power domains. This also means the CSR/DMC firmware will |
624 | * deinit the power domains. This also means the CSR/DMC firmware will |
629 | * stay active, it will power down any HW resources as required and |
625 | * stay active, it will power down any HW resources as required and |
630 | * also enable deeper system power states that would be blocked if the |
626 | * also enable deeper system power states that would be blocked if the |
631 | * firmware was inactive. |
627 | * firmware was inactive. |
632 | */ |
628 | */ |
633 | if (!fw_csr) |
629 | if (!fw_csr) |
634 | intel_power_domains_suspend(dev_priv); |
630 | intel_power_domains_suspend(dev_priv); |
635 | 631 | ||
636 | ret = intel_suspend_complete(dev_priv); |
632 | ret = intel_suspend_complete(dev_priv); |
637 | 633 | ||
638 | if (ret) { |
634 | if (ret) { |
639 | DRM_ERROR("Suspend complete failed: %d\n", ret); |
635 | DRM_ERROR("Suspend complete failed: %d\n", ret); |
640 | if (!fw_csr) |
636 | if (!fw_csr) |
641 | intel_power_domains_init_hw(dev_priv, true); |
637 | intel_power_domains_init_hw(dev_priv, true); |
642 | 638 | ||
643 | goto out; |
639 | goto out; |
644 | } |
640 | } |
645 | 641 | ||
646 | pci_disable_device(drm_dev->pdev); |
642 | pci_disable_device(drm_dev->pdev); |
647 | /* |
643 | /* |
648 | * During hibernation on some platforms the BIOS may try to access |
644 | * During hibernation on some platforms the BIOS may try to access |
649 | * the device even though it's already in D3 and hang the machine. So |
645 | * the device even though it's already in D3 and hang the machine. So |
650 | * leave the device in D0 on those platforms and hope the BIOS will |
646 | * leave the device in D0 on those platforms and hope the BIOS will |
651 | * power down the device properly. The issue was seen on multiple old |
647 | * power down the device properly. The issue was seen on multiple old |
652 | * GENs with different BIOS vendors, so having an explicit blacklist |
648 | * GENs with different BIOS vendors, so having an explicit blacklist |
653 | * is inpractical; apply the workaround on everything pre GEN6. The |
649 | * is inpractical; apply the workaround on everything pre GEN6. The |
654 | * platforms where the issue was seen: |
650 | * platforms where the issue was seen: |
655 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 |
651 | * Lenovo Thinkpad X301, X61s, X60, T60, X41 |
656 | * Fujitsu FSC S7110 |
652 | * Fujitsu FSC S7110 |
657 | * Acer Aspire 1830T |
653 | * Acer Aspire 1830T |
658 | */ |
654 | */ |
659 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) |
655 | if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) |
660 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); |
656 | pci_set_power_state(drm_dev->pdev, PCI_D3hot); |
661 | 657 | ||
662 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
658 | dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); |
663 | 659 | ||
664 | out: |
660 | out: |
665 | enable_rpm_wakeref_asserts(dev_priv); |
661 | enable_rpm_wakeref_asserts(dev_priv); |
666 | 662 | ||
667 | return ret; |
663 | return ret; |
668 | } |
664 | } |
669 | 665 | ||
670 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
666 | int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) |
671 | { |
667 | { |
672 | int error; |
668 | int error; |
673 | 669 | ||
674 | if (!dev || !dev->dev_private) { |
670 | if (!dev || !dev->dev_private) { |
675 | DRM_ERROR("dev: %p\n", dev); |
671 | DRM_ERROR("dev: %p\n", dev); |
676 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
672 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); |
677 | return -ENODEV; |
673 | return -ENODEV; |
678 | } |
674 | } |
679 | 675 | ||
680 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
676 | if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && |
681 | state.event != PM_EVENT_FREEZE)) |
677 | state.event != PM_EVENT_FREEZE)) |
682 | return -EINVAL; |
678 | return -EINVAL; |
683 | 679 | ||
684 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
680 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
685 | return 0; |
681 | return 0; |
686 | 682 | ||
687 | error = i915_drm_suspend(dev); |
683 | error = i915_drm_suspend(dev); |
688 | if (error) |
684 | if (error) |
689 | return error; |
685 | return error; |
690 | 686 | ||
691 | return i915_drm_suspend_late(dev, false); |
687 | return i915_drm_suspend_late(dev, false); |
692 | } |
688 | } |
693 | 689 | ||
694 | static int i915_drm_resume(struct drm_device *dev) |
690 | static int i915_drm_resume(struct drm_device *dev) |
695 | { |
691 | { |
696 | struct drm_i915_private *dev_priv = dev->dev_private; |
692 | struct drm_i915_private *dev_priv = dev->dev_private; |
697 | 693 | ||
698 | disable_rpm_wakeref_asserts(dev_priv); |
694 | disable_rpm_wakeref_asserts(dev_priv); |
699 | 695 | ||
700 | mutex_lock(&dev->struct_mutex); |
696 | mutex_lock(&dev->struct_mutex); |
701 | i915_gem_restore_gtt_mappings(dev); |
697 | i915_gem_restore_gtt_mappings(dev); |
702 | mutex_unlock(&dev->struct_mutex); |
698 | mutex_unlock(&dev->struct_mutex); |
703 | 699 | ||
704 | i915_restore_state(dev); |
700 | i915_restore_state(dev); |
705 | intel_opregion_setup(dev); |
701 | intel_opregion_setup(dev); |
706 | 702 | ||
707 | intel_init_pch_refclk(dev); |
703 | intel_init_pch_refclk(dev); |
708 | drm_mode_config_reset(dev); |
704 | drm_mode_config_reset(dev); |
709 | 705 | ||
710 | /* |
706 | /* |
711 | * Interrupts have to be enabled before any batches are run. If not the |
707 | * Interrupts have to be enabled before any batches are run. If not the |
712 | * GPU will hang. i915_gem_init_hw() will initiate batches to |
708 | * GPU will hang. i915_gem_init_hw() will initiate batches to |
713 | * update/restore the context. |
709 | * update/restore the context. |
714 | * |
710 | * |
715 | * Modeset enabling in intel_modeset_init_hw() also needs working |
711 | * Modeset enabling in intel_modeset_init_hw() also needs working |
716 | * interrupts. |
712 | * interrupts. |
717 | */ |
713 | */ |
718 | intel_runtime_pm_enable_interrupts(dev_priv); |
714 | intel_runtime_pm_enable_interrupts(dev_priv); |
719 | 715 | ||
720 | mutex_lock(&dev->struct_mutex); |
716 | mutex_lock(&dev->struct_mutex); |
721 | if (i915_gem_init_hw(dev)) { |
717 | if (i915_gem_init_hw(dev)) { |
722 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
718 | DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); |
723 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
719 | atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
724 | } |
720 | } |
725 | mutex_unlock(&dev->struct_mutex); |
721 | mutex_unlock(&dev->struct_mutex); |
726 | 722 | ||
727 | intel_guc_resume(dev); |
723 | intel_guc_resume(dev); |
728 | 724 | ||
729 | intel_modeset_init_hw(dev); |
725 | intel_modeset_init_hw(dev); |
730 | 726 | ||
731 | spin_lock_irq(&dev_priv->irq_lock); |
727 | spin_lock_irq(&dev_priv->irq_lock); |
732 | if (dev_priv->display.hpd_irq_setup) |
728 | if (dev_priv->display.hpd_irq_setup) |
733 | dev_priv->display.hpd_irq_setup(dev); |
729 | dev_priv->display.hpd_irq_setup(dev); |
734 | spin_unlock_irq(&dev_priv->irq_lock); |
730 | spin_unlock_irq(&dev_priv->irq_lock); |
735 | - | ||
736 | drm_modeset_lock_all(dev); |
- | |
737 | intel_display_resume(dev); |
- | |
738 | drm_modeset_unlock_all(dev); |
- | |
739 | 731 | ||
- | 732 | intel_dp_mst_resume(dev); |
|
- | 733 | ||
740 | intel_dp_mst_resume(dev); |
734 | intel_display_resume(dev); |
741 | 735 | ||
742 | /* |
736 | /* |
743 | * ... but also need to make sure that hotplug processing |
737 | * ... but also need to make sure that hotplug processing |
744 | * doesn't cause havoc. Like in the driver load code we don't |
738 | * doesn't cause havoc. Like in the driver load code we don't |
745 | * bother with the tiny race here where we might loose hotplug |
739 | * bother with the tiny race here where we might loose hotplug |
746 | * notifications. |
740 | * notifications. |
747 | * */ |
741 | * */ |
748 | intel_hpd_init(dev_priv); |
742 | intel_hpd_init(dev_priv); |
749 | /* Config may have changed between suspend and resume */ |
743 | /* Config may have changed between suspend and resume */ |
750 | drm_helper_hpd_irq_event(dev); |
744 | drm_helper_hpd_irq_event(dev); |
751 | 745 | ||
752 | intel_opregion_init(dev); |
746 | intel_opregion_init(dev); |
753 | 747 | ||
754 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
748 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
755 | 749 | ||
756 | mutex_lock(&dev_priv->modeset_restore_lock); |
750 | mutex_lock(&dev_priv->modeset_restore_lock); |
757 | dev_priv->modeset_restore = MODESET_DONE; |
751 | dev_priv->modeset_restore = MODESET_DONE; |
758 | mutex_unlock(&dev_priv->modeset_restore_lock); |
752 | mutex_unlock(&dev_priv->modeset_restore_lock); |
759 | 753 | ||
760 | intel_opregion_notify_adapter(dev, PCI_D0); |
754 | intel_opregion_notify_adapter(dev, PCI_D0); |
761 | 755 | ||
762 | drm_kms_helper_poll_enable(dev); |
756 | drm_kms_helper_poll_enable(dev); |
763 | 757 | ||
764 | enable_rpm_wakeref_asserts(dev_priv); |
758 | enable_rpm_wakeref_asserts(dev_priv); |
765 | 759 | ||
766 | return 0; |
760 | return 0; |
767 | } |
761 | } |
768 | 762 | ||
769 | static int i915_drm_resume_early(struct drm_device *dev) |
763 | static int i915_drm_resume_early(struct drm_device *dev) |
770 | { |
764 | { |
771 | struct drm_i915_private *dev_priv = dev->dev_private; |
765 | struct drm_i915_private *dev_priv = dev->dev_private; |
772 | int ret; |
766 | int ret; |
773 | 767 | ||
774 | /* |
768 | /* |
775 | * We have a resume ordering issue with the snd-hda driver also |
769 | * We have a resume ordering issue with the snd-hda driver also |
776 | * requiring our device to be power up. Due to the lack of a |
770 | * requiring our device to be power up. Due to the lack of a |
777 | * parent/child relationship we currently solve this with an early |
771 | * parent/child relationship we currently solve this with an early |
778 | * resume hook. |
772 | * resume hook. |
779 | * |
773 | * |
780 | * FIXME: This should be solved with a special hdmi sink device or |
774 | * FIXME: This should be solved with a special hdmi sink device or |
781 | * similar so that power domains can be employed. |
775 | * similar so that power domains can be employed. |
782 | */ |
776 | */ |
783 | 777 | ||
784 | /* |
778 | /* |
785 | * Note that we need to set the power state explicitly, since we |
779 | * Note that we need to set the power state explicitly, since we |
786 | * powered off the device during freeze and the PCI core won't power |
780 | * powered off the device during freeze and the PCI core won't power |
787 | * it back up for us during thaw. Powering off the device during |
781 | * it back up for us during thaw. Powering off the device during |
788 | * freeze is not a hard requirement though, and during the |
782 | * freeze is not a hard requirement though, and during the |
789 | * suspend/resume phases the PCI core makes sure we get here with the |
783 | * suspend/resume phases the PCI core makes sure we get here with the |
790 | * device powered on. So in case we change our freeze logic and keep |
784 | * device powered on. So in case we change our freeze logic and keep |
791 | * the device powered we can also remove the following set power state |
785 | * the device powered we can also remove the following set power state |
792 | * call. |
786 | * call. |
793 | */ |
787 | */ |
794 | ret = pci_set_power_state(dev->pdev, PCI_D0); |
788 | ret = pci_set_power_state(dev->pdev, PCI_D0); |
795 | if (ret) { |
789 | if (ret) { |
796 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); |
790 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); |
797 | goto out; |
791 | goto out; |
798 | } |
792 | } |
799 | 793 | ||
800 | /* |
794 | /* |
801 | * Note that pci_enable_device() first enables any parent bridge |
795 | * Note that pci_enable_device() first enables any parent bridge |
802 | * device and only then sets the power state for this device. The |
796 | * device and only then sets the power state for this device. The |
803 | * bridge enabling is a nop though, since bridge devices are resumed |
797 | * bridge enabling is a nop though, since bridge devices are resumed |
804 | * first. The order of enabling power and enabling the device is |
798 | * first. The order of enabling power and enabling the device is |
805 | * imposed by the PCI core as described above, so here we preserve the |
799 | * imposed by the PCI core as described above, so here we preserve the |
806 | * same order for the freeze/thaw phases. |
800 | * same order for the freeze/thaw phases. |
807 | * |
801 | * |
808 | * TODO: eventually we should remove pci_disable_device() / |
802 | * TODO: eventually we should remove pci_disable_device() / |
809 | * pci_enable_enable_device() from suspend/resume. Due to how they |
803 | * pci_enable_enable_device() from suspend/resume. Due to how they |
810 | * depend on the device enable refcount we can't anyway depend on them |
804 | * depend on the device enable refcount we can't anyway depend on them |
811 | * disabling/enabling the device. |
805 | * disabling/enabling the device. |
812 | */ |
806 | */ |
813 | if (pci_enable_device(dev->pdev)) { |
807 | if (pci_enable_device(dev->pdev)) { |
814 | ret = -EIO; |
808 | ret = -EIO; |
815 | goto out; |
809 | goto out; |
816 | } |
810 | } |
817 | 811 | ||
818 | pci_set_master(dev->pdev); |
812 | pci_set_master(dev->pdev); |
819 | 813 | ||
820 | disable_rpm_wakeref_asserts(dev_priv); |
814 | disable_rpm_wakeref_asserts(dev_priv); |
821 | 815 | ||
822 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
816 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
823 | ret = vlv_resume_prepare(dev_priv, false); |
817 | ret = vlv_resume_prepare(dev_priv, false); |
824 | if (ret) |
818 | if (ret) |
825 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
819 | DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", |
826 | ret); |
820 | ret); |
827 | 821 | ||
828 | intel_uncore_early_sanitize(dev, true); |
822 | intel_uncore_early_sanitize(dev, true); |
829 | 823 | ||
830 | if (IS_BROXTON(dev)) |
824 | if (IS_BROXTON(dev)) |
831 | ret = bxt_resume_prepare(dev_priv); |
825 | ret = bxt_resume_prepare(dev_priv); |
832 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
826 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
833 | hsw_disable_pc8(dev_priv); |
827 | hsw_disable_pc8(dev_priv); |
834 | 828 | ||
835 | intel_uncore_sanitize(dev); |
829 | intel_uncore_sanitize(dev); |
836 | 830 | ||
837 | if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) |
831 | if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) |
838 | intel_power_domains_init_hw(dev_priv, true); |
832 | intel_power_domains_init_hw(dev_priv, true); |
839 | 833 | ||
840 | out: |
834 | out: |
841 | dev_priv->suspended_to_idle = false; |
835 | dev_priv->suspended_to_idle = false; |
842 | 836 | ||
843 | enable_rpm_wakeref_asserts(dev_priv); |
837 | enable_rpm_wakeref_asserts(dev_priv); |
844 | 838 | ||
845 | return ret; |
839 | return ret; |
846 | } |
840 | } |
847 | 841 | ||
848 | int i915_resume_switcheroo(struct drm_device *dev) |
842 | int i915_resume_switcheroo(struct drm_device *dev) |
849 | { |
843 | { |
850 | int ret; |
844 | int ret; |
851 | 845 | ||
852 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
846 | if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
853 | return 0; |
847 | return 0; |
854 | 848 | ||
855 | ret = i915_drm_resume_early(dev); |
849 | ret = i915_drm_resume_early(dev); |
856 | if (ret) |
850 | if (ret) |
857 | return ret; |
851 | return ret; |
858 | 852 | ||
859 | return i915_drm_resume(dev); |
853 | return i915_drm_resume(dev); |
860 | } |
854 | } |
- | 855 | #endif |
|
861 | 856 | ||
862 | /** |
857 | /** |
863 | * i915_reset - reset chip after a hang |
858 | * i915_reset - reset chip after a hang |
864 | * @dev: drm device to reset |
859 | * @dev: drm device to reset |
865 | * |
860 | * |
866 | * Reset the chip. Useful if a hang is detected. Returns zero on successful |
861 | * Reset the chip. Useful if a hang is detected. Returns zero on successful |
867 | * reset or otherwise an error code. |
862 | * reset or otherwise an error code. |
868 | * |
863 | * |
869 | * Procedure is fairly simple: |
864 | * Procedure is fairly simple: |
870 | * - reset the chip using the reset reg |
865 | * - reset the chip using the reset reg |
871 | * - re-init context state |
866 | * - re-init context state |
872 | * - re-init hardware status page |
867 | * - re-init hardware status page |
873 | * - re-init ring buffer |
868 | * - re-init ring buffer |
874 | * - re-init interrupt state |
869 | * - re-init interrupt state |
875 | * - re-init display |
870 | * - re-init display |
876 | */ |
871 | */ |
877 | int i915_reset(struct drm_device *dev) |
872 | int i915_reset(struct drm_device *dev) |
878 | { |
873 | { |
879 | struct drm_i915_private *dev_priv = dev->dev_private; |
874 | struct drm_i915_private *dev_priv = dev->dev_private; |
880 | bool simulated; |
875 | bool simulated; |
881 | int ret; |
876 | int ret; |
882 | 877 | ||
883 | intel_reset_gt_powersave(dev); |
878 | intel_reset_gt_powersave(dev); |
884 | 879 | ||
885 | mutex_lock(&dev->struct_mutex); |
880 | mutex_lock(&dev->struct_mutex); |
886 | 881 | ||
887 | i915_gem_reset(dev); |
882 | i915_gem_reset(dev); |
888 | 883 | ||
889 | simulated = dev_priv->gpu_error.stop_rings != 0; |
884 | simulated = dev_priv->gpu_error.stop_rings != 0; |
890 | 885 | ||
891 | ret = intel_gpu_reset(dev); |
886 | ret = intel_gpu_reset(dev); |
892 | 887 | ||
893 | /* Also reset the gpu hangman. */ |
888 | /* Also reset the gpu hangman. */ |
894 | if (simulated) { |
889 | if (simulated) { |
895 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
890 | DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); |
896 | dev_priv->gpu_error.stop_rings = 0; |
891 | dev_priv->gpu_error.stop_rings = 0; |
897 | if (ret == -ENODEV) { |
892 | if (ret == -ENODEV) { |
898 | DRM_INFO("Reset not implemented, but ignoring " |
893 | DRM_INFO("Reset not implemented, but ignoring " |
899 | "error for simulated gpu hangs\n"); |
894 | "error for simulated gpu hangs\n"); |
900 | ret = 0; |
895 | ret = 0; |
901 | } |
896 | } |
902 | } |
897 | } |
903 | 898 | ||
904 | if (i915_stop_ring_allow_warn(dev_priv)) |
899 | if (i915_stop_ring_allow_warn(dev_priv)) |
905 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
900 | pr_notice("drm/i915: Resetting chip after gpu hang\n"); |
906 | 901 | ||
907 | if (ret) { |
902 | if (ret) { |
908 | DRM_ERROR("Failed to reset chip: %i\n", ret); |
903 | DRM_ERROR("Failed to reset chip: %i\n", ret); |
909 | mutex_unlock(&dev->struct_mutex); |
904 | mutex_unlock(&dev->struct_mutex); |
910 | return ret; |
905 | return ret; |
911 | } |
906 | } |
912 | 907 | ||
913 | intel_overlay_reset(dev_priv); |
908 | // intel_overlay_reset(dev_priv); |
914 | 909 | ||
915 | /* Ok, now get things going again... */ |
910 | /* Ok, now get things going again... */ |
916 | 911 | ||
917 | /* |
912 | /* |
918 | * Everything depends on having the GTT running, so we need to start |
913 | * Everything depends on having the GTT running, so we need to start |
919 | * there. Fortunately we don't need to do this unless we reset the |
914 | * there. Fortunately we don't need to do this unless we reset the |
920 | * chip at a PCI level. |
915 | * chip at a PCI level. |
921 | * |
916 | * |
922 | * Next we need to restore the context, but we don't use those |
917 | * Next we need to restore the context, but we don't use those |
923 | * yet either... |
918 | * yet either... |
924 | * |
919 | * |
925 | * Ring buffer needs to be re-initialized in the KMS case, or if X |
920 | * Ring buffer needs to be re-initialized in the KMS case, or if X |
926 | * was running at the time of the reset (i.e. we weren't VT |
921 | * was running at the time of the reset (i.e. we weren't VT |
927 | * switched away). |
922 | * switched away). |
928 | */ |
923 | */ |
929 | 924 | ||
930 | /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ |
925 | /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ |
931 | dev_priv->gpu_error.reload_in_reset = true; |
926 | dev_priv->gpu_error.reload_in_reset = true; |
932 | 927 | ||
933 | ret = i915_gem_init_hw(dev); |
928 | ret = i915_gem_init_hw(dev); |
934 | 929 | ||
935 | dev_priv->gpu_error.reload_in_reset = false; |
930 | dev_priv->gpu_error.reload_in_reset = false; |
936 | 931 | ||
937 | mutex_unlock(&dev->struct_mutex); |
932 | mutex_unlock(&dev->struct_mutex); |
938 | if (ret) { |
933 | if (ret) { |
939 | DRM_ERROR("Failed hw init on reset %d\n", ret); |
934 | DRM_ERROR("Failed hw init on reset %d\n", ret); |
940 | return ret; |
935 | return ret; |
941 | } |
936 | } |
942 | 937 | ||
943 | /* |
938 | /* |
944 | * rps/rc6 re-init is necessary to restore state lost after the |
939 | * rps/rc6 re-init is necessary to restore state lost after the |
945 | * reset and the re-install of gt irqs. Skip for ironlake per |
940 | * reset and the re-install of gt irqs. Skip for ironlake per |
946 | * previous concerns that it doesn't respond well to some forms |
941 | * previous concerns that it doesn't respond well to some forms |
947 | * of re-init after reset. |
942 | * of re-init after reset. |
948 | */ |
943 | */ |
949 | if (INTEL_INFO(dev)->gen > 5) |
944 | if (INTEL_INFO(dev)->gen > 5) |
950 | intel_enable_gt_powersave(dev); |
945 | intel_enable_gt_powersave(dev); |
951 | 946 | ||
952 | return 0; |
947 | return 0; |
953 | } |
948 | } |
- | 949 | ||
954 | 950 | #if 0 |
|
955 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
951 | static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
956 | { |
952 | { |
957 | struct intel_device_info *intel_info = |
953 | struct intel_device_info *intel_info = |
958 | (struct intel_device_info *) ent->driver_data; |
954 | (struct intel_device_info *) ent->driver_data; |
959 | 955 | ||
960 | if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { |
956 | if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { |
961 | DRM_INFO("This hardware requires preliminary hardware support.\n" |
957 | DRM_INFO("This hardware requires preliminary hardware support.\n" |
962 | "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); |
958 | "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); |
963 | return -ENODEV; |
959 | return -ENODEV; |
964 | } |
960 | } |
965 | 961 | ||
966 | /* Only bind to function 0 of the device. Early generations |
962 | /* Only bind to function 0 of the device. Early generations |
967 | * used function 1 as a placeholder for multi-head. This causes |
963 | * used function 1 as a placeholder for multi-head. This causes |
968 | * us confusion instead, especially on the systems where both |
964 | * us confusion instead, especially on the systems where both |
969 | * functions have the same PCI-ID! |
965 | * functions have the same PCI-ID! |
970 | */ |
966 | */ |
971 | if (PCI_FUNC(pdev->devfn)) |
967 | if (PCI_FUNC(pdev->devfn)) |
972 | return -ENODEV; |
968 | return -ENODEV; |
973 | 969 | ||
974 | return drm_get_pci_dev(pdev, ent, &driver); |
970 | return drm_get_pci_dev(pdev, ent, &driver); |
975 | } |
971 | } |
976 | 972 | ||
977 | static void |
973 | static void |
978 | i915_pci_remove(struct pci_dev *pdev) |
974 | i915_pci_remove(struct pci_dev *pdev) |
979 | { |
975 | { |
980 | struct drm_device *dev = pci_get_drvdata(pdev); |
976 | struct drm_device *dev = pci_get_drvdata(pdev); |
981 | 977 | ||
982 | drm_put_dev(dev); |
978 | drm_put_dev(dev); |
983 | } |
979 | } |
984 | 980 | ||
985 | static int i915_pm_suspend(struct device *dev) |
981 | static int i915_pm_suspend(struct device *dev) |
986 | { |
982 | { |
987 | struct pci_dev *pdev = to_pci_dev(dev); |
983 | struct pci_dev *pdev = to_pci_dev(dev); |
988 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
984 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
989 | 985 | ||
990 | if (!drm_dev || !drm_dev->dev_private) { |
986 | if (!drm_dev || !drm_dev->dev_private) { |
991 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
987 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); |
992 | return -ENODEV; |
988 | return -ENODEV; |
993 | } |
989 | } |
994 | 990 | ||
995 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
991 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
996 | return 0; |
992 | return 0; |
997 | 993 | ||
998 | return i915_drm_suspend(drm_dev); |
994 | return i915_drm_suspend(drm_dev); |
999 | } |
995 | } |
1000 | 996 | ||
1001 | static int i915_pm_suspend_late(struct device *dev) |
997 | static int i915_pm_suspend_late(struct device *dev) |
1002 | { |
998 | { |
1003 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
999 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1004 | 1000 | ||
1005 | /* |
1001 | /* |
1006 | * We have a suspend ordering issue with the snd-hda driver also |
1002 | * We have a suspend ordering issue with the snd-hda driver also |
1007 | * requiring our device to be power up. Due to the lack of a |
1003 | * requiring our device to be power up. Due to the lack of a |
1008 | * parent/child relationship we currently solve this with an late |
1004 | * parent/child relationship we currently solve this with an late |
1009 | * suspend hook. |
1005 | * suspend hook. |
1010 | * |
1006 | * |
1011 | * FIXME: This should be solved with a special hdmi sink device or |
1007 | * FIXME: This should be solved with a special hdmi sink device or |
1012 | * similar so that power domains can be employed. |
1008 | * similar so that power domains can be employed. |
1013 | */ |
1009 | */ |
1014 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1010 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1015 | return 0; |
1011 | return 0; |
1016 | 1012 | ||
1017 | return i915_drm_suspend_late(drm_dev, false); |
1013 | return i915_drm_suspend_late(drm_dev, false); |
1018 | } |
1014 | } |
1019 | 1015 | ||
1020 | static int i915_pm_poweroff_late(struct device *dev) |
1016 | static int i915_pm_poweroff_late(struct device *dev) |
1021 | { |
1017 | { |
1022 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1018 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1023 | 1019 | ||
1024 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1020 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1025 | return 0; |
1021 | return 0; |
1026 | 1022 | ||
1027 | return i915_drm_suspend_late(drm_dev, true); |
1023 | return i915_drm_suspend_late(drm_dev, true); |
1028 | } |
1024 | } |
1029 | 1025 | ||
1030 | static int i915_pm_resume_early(struct device *dev) |
1026 | static int i915_pm_resume_early(struct device *dev) |
1031 | { |
1027 | { |
1032 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1028 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1033 | 1029 | ||
1034 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1030 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1035 | return 0; |
1031 | return 0; |
1036 | 1032 | ||
1037 | return i915_drm_resume_early(drm_dev); |
1033 | return i915_drm_resume_early(drm_dev); |
1038 | } |
1034 | } |
1039 | 1035 | ||
1040 | static int i915_pm_resume(struct device *dev) |
1036 | static int i915_pm_resume(struct device *dev) |
1041 | { |
1037 | { |
1042 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1038 | struct drm_device *drm_dev = dev_to_i915(dev)->dev; |
1043 | 1039 | ||
1044 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1040 | if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) |
1045 | return 0; |
1041 | return 0; |
1046 | 1042 | ||
1047 | return i915_drm_resume(drm_dev); |
1043 | return i915_drm_resume(drm_dev); |
1048 | } |
1044 | } |
1049 | 1045 | ||
1050 | static int hsw_suspend_complete(struct drm_i915_private *dev_priv) |
1046 | static int hsw_suspend_complete(struct drm_i915_private *dev_priv) |
1051 | { |
1047 | { |
1052 | hsw_enable_pc8(dev_priv); |
1048 | hsw_enable_pc8(dev_priv); |
1053 | 1049 | ||
1054 | return 0; |
1050 | return 0; |
1055 | } |
1051 | } |
1056 | 1052 | ||
1057 | static int bxt_suspend_complete(struct drm_i915_private *dev_priv) |
1053 | static int bxt_suspend_complete(struct drm_i915_private *dev_priv) |
1058 | { |
1054 | { |
1059 | struct drm_device *dev = dev_priv->dev; |
1055 | struct drm_device *dev = dev_priv->dev; |
1060 | 1056 | ||
1061 | /* TODO: when DC5 support is added disable DC5 here. */ |
1057 | /* TODO: when DC5 support is added disable DC5 here. */ |
1062 | 1058 | ||
1063 | broxton_ddi_phy_uninit(dev); |
1059 | broxton_ddi_phy_uninit(dev); |
1064 | broxton_uninit_cdclk(dev); |
1060 | broxton_uninit_cdclk(dev); |
1065 | bxt_enable_dc9(dev_priv); |
1061 | bxt_enable_dc9(dev_priv); |
1066 | 1062 | ||
1067 | return 0; |
1063 | return 0; |
1068 | } |
1064 | } |
1069 | 1065 | ||
1070 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv) |
1066 | static int bxt_resume_prepare(struct drm_i915_private *dev_priv) |
1071 | { |
1067 | { |
1072 | struct drm_device *dev = dev_priv->dev; |
1068 | struct drm_device *dev = dev_priv->dev; |
1073 | 1069 | ||
1074 | /* TODO: when CSR FW support is added make sure the FW is loaded */ |
1070 | /* TODO: when CSR FW support is added make sure the FW is loaded */ |
1075 | 1071 | ||
1076 | bxt_disable_dc9(dev_priv); |
1072 | bxt_disable_dc9(dev_priv); |
1077 | 1073 | ||
1078 | /* |
1074 | /* |
1079 | * TODO: when DC5 support is added enable DC5 here if the CSR FW |
1075 | * TODO: when DC5 support is added enable DC5 here if the CSR FW |
1080 | * is available. |
1076 | * is available. |
1081 | */ |
1077 | */ |
1082 | broxton_init_cdclk(dev); |
1078 | broxton_init_cdclk(dev); |
1083 | broxton_ddi_phy_init(dev); |
1079 | broxton_ddi_phy_init(dev); |
1084 | intel_prepare_ddi(dev); |
- | |
1085 | 1080 | ||
1086 | return 0; |
1081 | return 0; |
1087 | } |
1082 | } |
1088 | 1083 | ||
1089 | /* |
1084 | /* |
1090 | * Save all Gunit registers that may be lost after a D3 and a subsequent |
1085 | * Save all Gunit registers that may be lost after a D3 and a subsequent |
1091 | * S0i[R123] transition. The list of registers needing a save/restore is |
1086 | * S0i[R123] transition. The list of registers needing a save/restore is |
1092 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit |
1087 | * defined in the VLV2_S0IXRegs document. This documents marks all Gunit |
1093 | * registers in the following way: |
1088 | * registers in the following way: |
1094 | * - Driver: saved/restored by the driver |
1089 | * - Driver: saved/restored by the driver |
1095 | * - Punit : saved/restored by the Punit firmware |
1090 | * - Punit : saved/restored by the Punit firmware |
1096 | * - No, w/o marking: no need to save/restore, since the register is R/O or |
1091 | * - No, w/o marking: no need to save/restore, since the register is R/O or |
1097 | * used internally by the HW in a way that doesn't depend |
1092 | * used internally by the HW in a way that doesn't depend |
1098 | * keeping the content across a suspend/resume. |
1093 | * keeping the content across a suspend/resume. |
1099 | * - Debug : used for debugging |
1094 | * - Debug : used for debugging |
1100 | * |
1095 | * |
1101 | * We save/restore all registers marked with 'Driver', with the following |
1096 | * We save/restore all registers marked with 'Driver', with the following |
1102 | * exceptions: |
1097 | * exceptions: |
1103 | * - Registers out of use, including also registers marked with 'Debug'. |
1098 | * - Registers out of use, including also registers marked with 'Debug'. |
1104 | * These have no effect on the driver's operation, so we don't save/restore |
1099 | * These have no effect on the driver's operation, so we don't save/restore |
1105 | * them to reduce the overhead. |
1100 | * them to reduce the overhead. |
1106 | * - Registers that are fully setup by an initialization function called from |
1101 | * - Registers that are fully setup by an initialization function called from |
1107 | * the resume path. For example many clock gating and RPS/RC6 registers. |
1102 | * the resume path. For example many clock gating and RPS/RC6 registers. |
1108 | * - Registers that provide the right functionality with their reset defaults. |
1103 | * - Registers that provide the right functionality with their reset defaults. |
1109 | * |
1104 | * |
1110 | * TODO: Except for registers that based on the above 3 criteria can be safely |
1105 | * TODO: Except for registers that based on the above 3 criteria can be safely |
1111 | * ignored, we save/restore all others, practically treating the HW context as |
1106 | * ignored, we save/restore all others, practically treating the HW context as |
1112 | * a black-box for the driver. Further investigation is needed to reduce the |
1107 | * a black-box for the driver. Further investigation is needed to reduce the |
1113 | * saved/restored registers even further, by following the same 3 criteria. |
1108 | * saved/restored registers even further, by following the same 3 criteria. |
1114 | */ |
1109 | */ |
1115 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
1110 | static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
1116 | { |
1111 | { |
1117 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
1112 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
1118 | int i; |
1113 | int i; |
1119 | 1114 | ||
1120 | /* GAM 0x4000-0x4770 */ |
1115 | /* GAM 0x4000-0x4770 */ |
1121 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); |
1116 | s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); |
1122 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); |
1117 | s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); |
1123 | s->arb_mode = I915_READ(ARB_MODE); |
1118 | s->arb_mode = I915_READ(ARB_MODE); |
1124 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); |
1119 | s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); |
1125 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); |
1120 | s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); |
1126 | 1121 | ||
1127 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
1122 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
1128 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
1123 | s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); |
1129 | 1124 | ||
1130 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
1125 | s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); |
1131 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
1126 | s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); |
1132 | 1127 | ||
1133 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
1128 | s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); |
1134 | s->ecochk = I915_READ(GAM_ECOCHK); |
1129 | s->ecochk = I915_READ(GAM_ECOCHK); |
1135 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); |
1130 | s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); |
1136 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); |
1131 | s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); |
1137 | 1132 | ||
1138 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); |
1133 | s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); |
1139 | 1134 | ||
1140 | /* MBC 0x9024-0x91D0, 0x8500 */ |
1135 | /* MBC 0x9024-0x91D0, 0x8500 */ |
1141 | s->g3dctl = I915_READ(VLV_G3DCTL); |
1136 | s->g3dctl = I915_READ(VLV_G3DCTL); |
1142 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); |
1137 | s->gsckgctl = I915_READ(VLV_GSCKGCTL); |
1143 | s->mbctl = I915_READ(GEN6_MBCTL); |
1138 | s->mbctl = I915_READ(GEN6_MBCTL); |
1144 | 1139 | ||
1145 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
1140 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
1146 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); |
1141 | s->ucgctl1 = I915_READ(GEN6_UCGCTL1); |
1147 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); |
1142 | s->ucgctl3 = I915_READ(GEN6_UCGCTL3); |
1148 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); |
1143 | s->rcgctl1 = I915_READ(GEN6_RCGCTL1); |
1149 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); |
1144 | s->rcgctl2 = I915_READ(GEN6_RCGCTL2); |
1150 | s->rstctl = I915_READ(GEN6_RSTCTL); |
1145 | s->rstctl = I915_READ(GEN6_RSTCTL); |
1151 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); |
1146 | s->misccpctl = I915_READ(GEN7_MISCCPCTL); |
1152 | 1147 | ||
1153 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
1148 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
1154 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); |
1149 | s->gfxpause = I915_READ(GEN6_GFXPAUSE); |
1155 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); |
1150 | s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); |
1156 | s->rpdeuc = I915_READ(GEN6_RPDEUC); |
1151 | s->rpdeuc = I915_READ(GEN6_RPDEUC); |
1157 | s->ecobus = I915_READ(ECOBUS); |
1152 | s->ecobus = I915_READ(ECOBUS); |
1158 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); |
1153 | s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); |
1159 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); |
1154 | s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); |
1160 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); |
1155 | s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); |
1161 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); |
1156 | s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); |
1162 | s->rcedata = I915_READ(VLV_RCEDATA); |
1157 | s->rcedata = I915_READ(VLV_RCEDATA); |
1163 | s->spare2gh = I915_READ(VLV_SPAREG2H); |
1158 | s->spare2gh = I915_READ(VLV_SPAREG2H); |
1164 | 1159 | ||
1165 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
1160 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
1166 | s->gt_imr = I915_READ(GTIMR); |
1161 | s->gt_imr = I915_READ(GTIMR); |
1167 | s->gt_ier = I915_READ(GTIER); |
1162 | s->gt_ier = I915_READ(GTIER); |
1168 | s->pm_imr = I915_READ(GEN6_PMIMR); |
1163 | s->pm_imr = I915_READ(GEN6_PMIMR); |
1169 | s->pm_ier = I915_READ(GEN6_PMIER); |
1164 | s->pm_ier = I915_READ(GEN6_PMIER); |
1170 | 1165 | ||
1171 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
1166 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
1172 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
1167 | s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); |
1173 | 1168 | ||
1174 | /* GT SA CZ domain, 0x100000-0x138124 */ |
1169 | /* GT SA CZ domain, 0x100000-0x138124 */ |
1175 | s->tilectl = I915_READ(TILECTL); |
1170 | s->tilectl = I915_READ(TILECTL); |
1176 | s->gt_fifoctl = I915_READ(GTFIFOCTL); |
1171 | s->gt_fifoctl = I915_READ(GTFIFOCTL); |
1177 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); |
1172 | s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); |
1178 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1173 | s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1179 | s->pmwgicz = I915_READ(VLV_PMWGICZ); |
1174 | s->pmwgicz = I915_READ(VLV_PMWGICZ); |
1180 | 1175 | ||
1181 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1176 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1182 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
1177 | s->gu_ctl0 = I915_READ(VLV_GU_CTL0); |
1183 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
1178 | s->gu_ctl1 = I915_READ(VLV_GU_CTL1); |
1184 | s->pcbr = I915_READ(VLV_PCBR); |
1179 | s->pcbr = I915_READ(VLV_PCBR); |
1185 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
1180 | s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); |
1186 | 1181 | ||
1187 | /* |
1182 | /* |
1188 | * Not saving any of: |
1183 | * Not saving any of: |
1189 | * DFT, 0x9800-0x9EC0 |
1184 | * DFT, 0x9800-0x9EC0 |
1190 | * SARB, 0xB000-0xB1FC |
1185 | * SARB, 0xB000-0xB1FC |
1191 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 |
1186 | * GAC, 0x5208-0x524C, 0x14000-0x14C000 |
1192 | * PCI CFG |
1187 | * PCI CFG |
1193 | */ |
1188 | */ |
1194 | } |
1189 | } |
1195 | 1190 | ||
1196 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
1191 | static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) |
1197 | { |
1192 | { |
1198 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
1193 | struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; |
1199 | u32 val; |
1194 | u32 val; |
1200 | int i; |
1195 | int i; |
1201 | 1196 | ||
1202 | /* GAM 0x4000-0x4770 */ |
1197 | /* GAM 0x4000-0x4770 */ |
1203 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); |
1198 | I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); |
1204 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); |
1199 | I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); |
1205 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); |
1200 | I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); |
1206 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); |
1201 | I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); |
1207 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); |
1202 | I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); |
1208 | 1203 | ||
1209 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
1204 | for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) |
1210 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
1205 | I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); |
1211 | 1206 | ||
1212 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
1207 | I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); |
1213 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
1208 | I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); |
1214 | 1209 | ||
1215 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
1210 | I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); |
1216 | I915_WRITE(GAM_ECOCHK, s->ecochk); |
1211 | I915_WRITE(GAM_ECOCHK, s->ecochk); |
1217 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); |
1212 | I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); |
1218 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); |
1213 | I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); |
1219 | 1214 | ||
1220 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); |
1215 | I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); |
1221 | 1216 | ||
1222 | /* MBC 0x9024-0x91D0, 0x8500 */ |
1217 | /* MBC 0x9024-0x91D0, 0x8500 */ |
1223 | I915_WRITE(VLV_G3DCTL, s->g3dctl); |
1218 | I915_WRITE(VLV_G3DCTL, s->g3dctl); |
1224 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); |
1219 | I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); |
1225 | I915_WRITE(GEN6_MBCTL, s->mbctl); |
1220 | I915_WRITE(GEN6_MBCTL, s->mbctl); |
1226 | 1221 | ||
1227 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
1222 | /* GCP 0x9400-0x9424, 0x8100-0x810C */ |
1228 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); |
1223 | I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); |
1229 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); |
1224 | I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); |
1230 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); |
1225 | I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); |
1231 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); |
1226 | I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); |
1232 | I915_WRITE(GEN6_RSTCTL, s->rstctl); |
1227 | I915_WRITE(GEN6_RSTCTL, s->rstctl); |
1233 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); |
1228 | I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); |
1234 | 1229 | ||
1235 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
1230 | /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ |
1236 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); |
1231 | I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); |
1237 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); |
1232 | I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); |
1238 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); |
1233 | I915_WRITE(GEN6_RPDEUC, s->rpdeuc); |
1239 | I915_WRITE(ECOBUS, s->ecobus); |
1234 | I915_WRITE(ECOBUS, s->ecobus); |
1240 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); |
1235 | I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); |
1241 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); |
1236 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); |
1242 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); |
1237 | I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); |
1243 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); |
1238 | I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); |
1244 | I915_WRITE(VLV_RCEDATA, s->rcedata); |
1239 | I915_WRITE(VLV_RCEDATA, s->rcedata); |
1245 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); |
1240 | I915_WRITE(VLV_SPAREG2H, s->spare2gh); |
1246 | 1241 | ||
1247 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
1242 | /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ |
1248 | I915_WRITE(GTIMR, s->gt_imr); |
1243 | I915_WRITE(GTIMR, s->gt_imr); |
1249 | I915_WRITE(GTIER, s->gt_ier); |
1244 | I915_WRITE(GTIER, s->gt_ier); |
1250 | I915_WRITE(GEN6_PMIMR, s->pm_imr); |
1245 | I915_WRITE(GEN6_PMIMR, s->pm_imr); |
1251 | I915_WRITE(GEN6_PMIER, s->pm_ier); |
1246 | I915_WRITE(GEN6_PMIER, s->pm_ier); |
1252 | 1247 | ||
1253 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
1248 | for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) |
1254 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
1249 | I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); |
1255 | 1250 | ||
1256 | /* GT SA CZ domain, 0x100000-0x138124 */ |
1251 | /* GT SA CZ domain, 0x100000-0x138124 */ |
1257 | I915_WRITE(TILECTL, s->tilectl); |
1252 | I915_WRITE(TILECTL, s->tilectl); |
1258 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); |
1253 | I915_WRITE(GTFIFOCTL, s->gt_fifoctl); |
1259 | /* |
1254 | /* |
1260 | * Preserve the GT allow wake and GFX force clock bit, they are not |
1255 | * Preserve the GT allow wake and GFX force clock bit, they are not |
1261 | * be restored, as they are used to control the s0ix suspend/resume |
1256 | * be restored, as they are used to control the s0ix suspend/resume |
1262 | * sequence by the caller. |
1257 | * sequence by the caller. |
1263 | */ |
1258 | */ |
1264 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
1259 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
1265 | val &= VLV_GTLC_ALLOWWAKEREQ; |
1260 | val &= VLV_GTLC_ALLOWWAKEREQ; |
1266 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; |
1261 | val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; |
1267 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
1262 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
1268 | 1263 | ||
1269 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1264 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1270 | val &= VLV_GFX_CLK_FORCE_ON_BIT; |
1265 | val &= VLV_GFX_CLK_FORCE_ON_BIT; |
1271 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; |
1266 | val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; |
1272 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
1267 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
1273 | 1268 | ||
1274 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); |
1269 | I915_WRITE(VLV_PMWGICZ, s->pmwgicz); |
1275 | 1270 | ||
1276 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1271 | /* Gunit-Display CZ domain, 0x182028-0x1821CF */ |
1277 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
1272 | I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); |
1278 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
1273 | I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); |
1279 | I915_WRITE(VLV_PCBR, s->pcbr); |
1274 | I915_WRITE(VLV_PCBR, s->pcbr); |
1280 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
1275 | I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); |
1281 | } |
1276 | } |
1282 | #endif |
1277 | #endif |
1283 | 1278 | ||
1284 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
1279 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) |
1285 | { |
1280 | { |
1286 | u32 val; |
1281 | u32 val; |
1287 | int err; |
1282 | int err; |
1288 | 1283 | ||
1289 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) |
1284 | #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) |
1290 | 1285 | ||
1291 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1286 | val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); |
1292 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
1287 | val &= ~VLV_GFX_CLK_FORCE_ON_BIT; |
1293 | if (force_on) |
1288 | if (force_on) |
1294 | val |= VLV_GFX_CLK_FORCE_ON_BIT; |
1289 | val |= VLV_GFX_CLK_FORCE_ON_BIT; |
1295 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
1290 | I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); |
1296 | 1291 | ||
1297 | if (!force_on) |
1292 | if (!force_on) |
1298 | return 0; |
1293 | return 0; |
1299 | 1294 | ||
1300 | err = wait_for(COND, 20); |
1295 | err = wait_for(COND, 20); |
1301 | if (err) |
1296 | if (err) |
1302 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", |
1297 | DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", |
1303 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
1298 | I915_READ(VLV_GTLC_SURVIVABILITY_REG)); |
1304 | 1299 | ||
1305 | return err; |
1300 | return err; |
1306 | #undef COND |
1301 | #undef COND |
1307 | } |
1302 | } |
1308 | #if 0 |
1303 | #if 0 |
1309 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
1304 | static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) |
1310 | { |
1305 | { |
1311 | u32 val; |
1306 | u32 val; |
1312 | int err = 0; |
1307 | int err = 0; |
1313 | 1308 | ||
1314 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
1309 | val = I915_READ(VLV_GTLC_WAKE_CTRL); |
1315 | val &= ~VLV_GTLC_ALLOWWAKEREQ; |
1310 | val &= ~VLV_GTLC_ALLOWWAKEREQ; |
1316 | if (allow) |
1311 | if (allow) |
1317 | val |= VLV_GTLC_ALLOWWAKEREQ; |
1312 | val |= VLV_GTLC_ALLOWWAKEREQ; |
1318 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
1313 | I915_WRITE(VLV_GTLC_WAKE_CTRL, val); |
1319 | POSTING_READ(VLV_GTLC_WAKE_CTRL); |
1314 | POSTING_READ(VLV_GTLC_WAKE_CTRL); |
1320 | 1315 | ||
1321 | #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ |
1316 | #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ |
1322 | allow) |
1317 | allow) |
1323 | err = wait_for(COND, 1); |
1318 | err = wait_for(COND, 1); |
1324 | if (err) |
1319 | if (err) |
1325 | DRM_ERROR("timeout disabling GT waking\n"); |
1320 | DRM_ERROR("timeout disabling GT waking\n"); |
1326 | return err; |
1321 | return err; |
1327 | #undef COND |
1322 | #undef COND |
1328 | } |
1323 | } |
1329 | 1324 | ||
1330 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
1325 | static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, |
1331 | bool wait_for_on) |
1326 | bool wait_for_on) |
1332 | { |
1327 | { |
1333 | u32 mask; |
1328 | u32 mask; |
1334 | u32 val; |
1329 | u32 val; |
1335 | int err; |
1330 | int err; |
1336 | 1331 | ||
1337 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
1332 | mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; |
1338 | val = wait_for_on ? mask : 0; |
1333 | val = wait_for_on ? mask : 0; |
1339 | #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) |
1334 | #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) |
1340 | if (COND) |
1335 | if (COND) |
1341 | return 0; |
1336 | return 0; |
1342 | 1337 | ||
1343 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", |
1338 | DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", |
1344 | wait_for_on ? "on" : "off", |
1339 | onoff(wait_for_on), |
1345 | I915_READ(VLV_GTLC_PW_STATUS)); |
1340 | I915_READ(VLV_GTLC_PW_STATUS)); |
1346 | 1341 | ||
1347 | /* |
1342 | /* |
1348 | * RC6 transitioning can be delayed up to 2 msec (see |
1343 | * RC6 transitioning can be delayed up to 2 msec (see |
1349 | * valleyview_enable_rps), use 3 msec for safety. |
1344 | * valleyview_enable_rps), use 3 msec for safety. |
1350 | */ |
1345 | */ |
1351 | err = wait_for(COND, 3); |
1346 | err = wait_for(COND, 3); |
1352 | if (err) |
1347 | if (err) |
1353 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
1348 | DRM_ERROR("timeout waiting for GT wells to go %s\n", |
1354 | wait_for_on ? "on" : "off"); |
1349 | onoff(wait_for_on)); |
1355 | 1350 | ||
1356 | return err; |
1351 | return err; |
1357 | #undef COND |
1352 | #undef COND |
1358 | } |
1353 | } |
1359 | 1354 | ||
1360 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
1355 | static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) |
1361 | { |
1356 | { |
1362 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) |
1357 | if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) |
1363 | return; |
1358 | return; |
1364 | 1359 | ||
1365 | DRM_ERROR("GT register access while GT waking disabled\n"); |
1360 | DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); |
1366 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
1361 | I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); |
1367 | } |
1362 | } |
1368 | 1363 | ||
1369 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
1364 | static int vlv_suspend_complete(struct drm_i915_private *dev_priv) |
1370 | { |
1365 | { |
1371 | u32 mask; |
1366 | u32 mask; |
1372 | int err; |
1367 | int err; |
1373 | 1368 | ||
1374 | /* |
1369 | /* |
1375 | * Bspec defines the following GT well on flags as debug only, so |
1370 | * Bspec defines the following GT well on flags as debug only, so |
1376 | * don't treat them as hard failures. |
1371 | * don't treat them as hard failures. |
1377 | */ |
1372 | */ |
1378 | (void)vlv_wait_for_gt_wells(dev_priv, false); |
1373 | (void)vlv_wait_for_gt_wells(dev_priv, false); |
1379 | 1374 | ||
1380 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
1375 | mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; |
1381 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
1376 | WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); |
1382 | 1377 | ||
1383 | vlv_check_no_gt_access(dev_priv); |
1378 | vlv_check_no_gt_access(dev_priv); |
1384 | 1379 | ||
1385 | err = vlv_force_gfx_clock(dev_priv, true); |
1380 | err = vlv_force_gfx_clock(dev_priv, true); |
1386 | if (err) |
1381 | if (err) |
1387 | goto err1; |
1382 | goto err1; |
1388 | 1383 | ||
1389 | err = vlv_allow_gt_wake(dev_priv, false); |
1384 | err = vlv_allow_gt_wake(dev_priv, false); |
1390 | if (err) |
1385 | if (err) |
1391 | goto err2; |
1386 | goto err2; |
1392 | 1387 | ||
1393 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
1388 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
1394 | vlv_save_gunit_s0ix_state(dev_priv); |
1389 | vlv_save_gunit_s0ix_state(dev_priv); |
1395 | 1390 | ||
1396 | err = vlv_force_gfx_clock(dev_priv, false); |
1391 | err = vlv_force_gfx_clock(dev_priv, false); |
1397 | if (err) |
1392 | if (err) |
1398 | goto err2; |
1393 | goto err2; |
1399 | 1394 | ||
1400 | return 0; |
1395 | return 0; |
1401 | 1396 | ||
1402 | err2: |
1397 | err2: |
1403 | /* For safety always re-enable waking and disable gfx clock forcing */ |
1398 | /* For safety always re-enable waking and disable gfx clock forcing */ |
1404 | vlv_allow_gt_wake(dev_priv, true); |
1399 | vlv_allow_gt_wake(dev_priv, true); |
1405 | err1: |
1400 | err1: |
1406 | vlv_force_gfx_clock(dev_priv, false); |
1401 | vlv_force_gfx_clock(dev_priv, false); |
1407 | 1402 | ||
1408 | return err; |
1403 | return err; |
1409 | } |
1404 | } |
1410 | 1405 | ||
1411 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1406 | static int vlv_resume_prepare(struct drm_i915_private *dev_priv, |
1412 | bool rpm_resume) |
1407 | bool rpm_resume) |
1413 | { |
1408 | { |
1414 | struct drm_device *dev = dev_priv->dev; |
1409 | struct drm_device *dev = dev_priv->dev; |
1415 | int err; |
1410 | int err; |
1416 | int ret; |
1411 | int ret; |
1417 | 1412 | ||
1418 | /* |
1413 | /* |
1419 | * If any of the steps fail just try to continue, that's the best we |
1414 | * If any of the steps fail just try to continue, that's the best we |
1420 | * can do at this point. Return the first error code (which will also |
1415 | * can do at this point. Return the first error code (which will also |
1421 | * leave RPM permanently disabled). |
1416 | * leave RPM permanently disabled). |
1422 | */ |
1417 | */ |
1423 | ret = vlv_force_gfx_clock(dev_priv, true); |
1418 | ret = vlv_force_gfx_clock(dev_priv, true); |
1424 | 1419 | ||
1425 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
1420 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
1426 | vlv_restore_gunit_s0ix_state(dev_priv); |
1421 | vlv_restore_gunit_s0ix_state(dev_priv); |
1427 | 1422 | ||
1428 | err = vlv_allow_gt_wake(dev_priv, true); |
1423 | err = vlv_allow_gt_wake(dev_priv, true); |
1429 | if (!ret) |
1424 | if (!ret) |
1430 | ret = err; |
1425 | ret = err; |
1431 | 1426 | ||
1432 | err = vlv_force_gfx_clock(dev_priv, false); |
1427 | err = vlv_force_gfx_clock(dev_priv, false); |
1433 | if (!ret) |
1428 | if (!ret) |
1434 | ret = err; |
1429 | ret = err; |
1435 | 1430 | ||
1436 | vlv_check_no_gt_access(dev_priv); |
1431 | vlv_check_no_gt_access(dev_priv); |
1437 | 1432 | ||
1438 | if (rpm_resume) { |
1433 | if (rpm_resume) { |
1439 | intel_init_clock_gating(dev); |
1434 | intel_init_clock_gating(dev); |
1440 | i915_gem_restore_fences(dev); |
1435 | i915_gem_restore_fences(dev); |
1441 | } |
1436 | } |
1442 | 1437 | ||
1443 | return ret; |
1438 | return ret; |
1444 | } |
1439 | } |
1445 | 1440 | ||
1446 | static int intel_runtime_suspend(struct device *device) |
1441 | static int intel_runtime_suspend(struct device *device) |
1447 | { |
1442 | { |
1448 | struct pci_dev *pdev = to_pci_dev(device); |
1443 | struct pci_dev *pdev = to_pci_dev(device); |
1449 | struct drm_device *dev = pci_get_drvdata(pdev); |
1444 | struct drm_device *dev = pci_get_drvdata(pdev); |
1450 | struct drm_i915_private *dev_priv = dev->dev_private; |
1445 | struct drm_i915_private *dev_priv = dev->dev_private; |
1451 | int ret; |
1446 | int ret; |
1452 | 1447 | ||
1453 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) |
1448 | if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) |
1454 | return -ENODEV; |
1449 | return -ENODEV; |
1455 | 1450 | ||
1456 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
1451 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
1457 | return -ENODEV; |
1452 | return -ENODEV; |
1458 | 1453 | ||
1459 | DRM_DEBUG_KMS("Suspending device\n"); |
1454 | DRM_DEBUG_KMS("Suspending device\n"); |
1460 | 1455 | ||
1461 | /* |
1456 | /* |
1462 | * We could deadlock here in case another thread holding struct_mutex |
1457 | * We could deadlock here in case another thread holding struct_mutex |
1463 | * calls RPM suspend concurrently, since the RPM suspend will wait |
1458 | * calls RPM suspend concurrently, since the RPM suspend will wait |
1464 | * first for this RPM suspend to finish. In this case the concurrent |
1459 | * first for this RPM suspend to finish. In this case the concurrent |
1465 | * RPM resume will be followed by its RPM suspend counterpart. Still |
1460 | * RPM resume will be followed by its RPM suspend counterpart. Still |
1466 | * for consistency return -EAGAIN, which will reschedule this suspend. |
1461 | * for consistency return -EAGAIN, which will reschedule this suspend. |
1467 | */ |
1462 | */ |
1468 | if (!mutex_trylock(&dev->struct_mutex)) { |
1463 | if (!mutex_trylock(&dev->struct_mutex)) { |
1469 | DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); |
1464 | DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); |
1470 | /* |
1465 | /* |
1471 | * Bump the expiration timestamp, otherwise the suspend won't |
1466 | * Bump the expiration timestamp, otherwise the suspend won't |
1472 | * be rescheduled. |
1467 | * be rescheduled. |
1473 | */ |
1468 | */ |
1474 | pm_runtime_mark_last_busy(device); |
1469 | pm_runtime_mark_last_busy(device); |
1475 | 1470 | ||
1476 | return -EAGAIN; |
1471 | return -EAGAIN; |
1477 | } |
1472 | } |
1478 | 1473 | ||
1479 | disable_rpm_wakeref_asserts(dev_priv); |
1474 | disable_rpm_wakeref_asserts(dev_priv); |
1480 | 1475 | ||
1481 | /* |
1476 | /* |
1482 | * We are safe here against re-faults, since the fault handler takes |
1477 | * We are safe here against re-faults, since the fault handler takes |
1483 | * an RPM reference. |
1478 | * an RPM reference. |
1484 | */ |
1479 | */ |
1485 | i915_gem_release_all_mmaps(dev_priv); |
1480 | i915_gem_release_all_mmaps(dev_priv); |
1486 | mutex_unlock(&dev->struct_mutex); |
1481 | mutex_unlock(&dev->struct_mutex); |
1487 | 1482 | ||
1488 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
1483 | cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); |
1489 | 1484 | ||
1490 | intel_guc_suspend(dev); |
1485 | intel_guc_suspend(dev); |
1491 | 1486 | ||
1492 | intel_suspend_gt_powersave(dev); |
1487 | intel_suspend_gt_powersave(dev); |
1493 | intel_runtime_pm_disable_interrupts(dev_priv); |
1488 | intel_runtime_pm_disable_interrupts(dev_priv); |
1494 | 1489 | ||
1495 | ret = intel_suspend_complete(dev_priv); |
1490 | ret = intel_suspend_complete(dev_priv); |
1496 | if (ret) { |
1491 | if (ret) { |
1497 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
1492 | DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); |
1498 | intel_runtime_pm_enable_interrupts(dev_priv); |
1493 | intel_runtime_pm_enable_interrupts(dev_priv); |
1499 | 1494 | ||
1500 | enable_rpm_wakeref_asserts(dev_priv); |
1495 | enable_rpm_wakeref_asserts(dev_priv); |
1501 | 1496 | ||
1502 | return ret; |
1497 | return ret; |
1503 | } |
1498 | } |
1504 | 1499 | ||
1505 | intel_uncore_forcewake_reset(dev, false); |
1500 | intel_uncore_forcewake_reset(dev, false); |
1506 | 1501 | ||
1507 | enable_rpm_wakeref_asserts(dev_priv); |
1502 | enable_rpm_wakeref_asserts(dev_priv); |
1508 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
1503 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
- | 1504 | ||
- | 1505 | if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) |
|
- | 1506 | DRM_ERROR("Unclaimed access detected prior to suspending\n"); |
|
- | 1507 | ||
1509 | dev_priv->pm.suspended = true; |
1508 | dev_priv->pm.suspended = true; |
1510 | 1509 | ||
1511 | /* |
1510 | /* |
1512 | * FIXME: We really should find a document that references the arguments |
1511 | * FIXME: We really should find a document that references the arguments |
1513 | * used below! |
1512 | * used below! |
1514 | */ |
1513 | */ |
1515 | if (IS_BROADWELL(dev)) { |
1514 | if (IS_BROADWELL(dev)) { |
1516 | /* |
1515 | /* |
1517 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop |
1516 | * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop |
1518 | * being detected, and the call we do at intel_runtime_resume() |
1517 | * being detected, and the call we do at intel_runtime_resume() |
1519 | * won't be able to restore them. Since PCI_D3hot matches the |
1518 | * won't be able to restore them. Since PCI_D3hot matches the |
1520 | * actual specification and appears to be working, use it. |
1519 | * actual specification and appears to be working, use it. |
1521 | */ |
1520 | */ |
1522 | intel_opregion_notify_adapter(dev, PCI_D3hot); |
1521 | intel_opregion_notify_adapter(dev, PCI_D3hot); |
1523 | } else { |
1522 | } else { |
1524 | /* |
1523 | /* |
1525 | * current versions of firmware which depend on this opregion |
1524 | * current versions of firmware which depend on this opregion |
1526 | * notification have repurposed the D1 definition to mean |
1525 | * notification have repurposed the D1 definition to mean |
1527 | * "runtime suspended" vs. what you would normally expect (D3) |
1526 | * "runtime suspended" vs. what you would normally expect (D3) |
1528 | * to distinguish it from notifications that might be sent via |
1527 | * to distinguish it from notifications that might be sent via |
1529 | * the suspend path. |
1528 | * the suspend path. |
1530 | */ |
1529 | */ |
1531 | intel_opregion_notify_adapter(dev, PCI_D1); |
1530 | intel_opregion_notify_adapter(dev, PCI_D1); |
1532 | } |
1531 | } |
1533 | 1532 | ||
1534 | assert_forcewakes_inactive(dev_priv); |
1533 | assert_forcewakes_inactive(dev_priv); |
1535 | 1534 | ||
1536 | DRM_DEBUG_KMS("Device suspended\n"); |
1535 | DRM_DEBUG_KMS("Device suspended\n"); |
1537 | return 0; |
1536 | return 0; |
1538 | } |
1537 | } |
1539 | 1538 | ||
1540 | static int intel_runtime_resume(struct device *device) |
1539 | static int intel_runtime_resume(struct device *device) |
1541 | { |
1540 | { |
1542 | struct pci_dev *pdev = to_pci_dev(device); |
1541 | struct pci_dev *pdev = to_pci_dev(device); |
1543 | struct drm_device *dev = pci_get_drvdata(pdev); |
1542 | struct drm_device *dev = pci_get_drvdata(pdev); |
1544 | struct drm_i915_private *dev_priv = dev->dev_private; |
1543 | struct drm_i915_private *dev_priv = dev->dev_private; |
1545 | int ret = 0; |
1544 | int ret = 0; |
1546 | 1545 | ||
1547 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
1546 | if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) |
1548 | return -ENODEV; |
1547 | return -ENODEV; |
1549 | 1548 | ||
1550 | DRM_DEBUG_KMS("Resuming device\n"); |
1549 | DRM_DEBUG_KMS("Resuming device\n"); |
1551 | 1550 | ||
1552 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
1551 | WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); |
1553 | disable_rpm_wakeref_asserts(dev_priv); |
1552 | disable_rpm_wakeref_asserts(dev_priv); |
1554 | 1553 | ||
1555 | intel_opregion_notify_adapter(dev, PCI_D0); |
1554 | intel_opregion_notify_adapter(dev, PCI_D0); |
1556 | dev_priv->pm.suspended = false; |
1555 | dev_priv->pm.suspended = false; |
- | 1556 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
|
- | 1557 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); |
|
1557 | 1558 | ||
1558 | intel_guc_resume(dev); |
1559 | intel_guc_resume(dev); |
1559 | 1560 | ||
1560 | if (IS_GEN6(dev_priv)) |
1561 | if (IS_GEN6(dev_priv)) |
1561 | intel_init_pch_refclk(dev); |
1562 | intel_init_pch_refclk(dev); |
1562 | 1563 | ||
1563 | if (IS_BROXTON(dev)) |
1564 | if (IS_BROXTON(dev)) |
1564 | ret = bxt_resume_prepare(dev_priv); |
1565 | ret = bxt_resume_prepare(dev_priv); |
1565 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1566 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1566 | hsw_disable_pc8(dev_priv); |
1567 | hsw_disable_pc8(dev_priv); |
1567 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1568 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1568 | ret = vlv_resume_prepare(dev_priv, true); |
1569 | ret = vlv_resume_prepare(dev_priv, true); |
1569 | 1570 | ||
1570 | /* |
1571 | /* |
1571 | * No point of rolling back things in case of an error, as the best |
1572 | * No point of rolling back things in case of an error, as the best |
1572 | * we can do is to hope that things will still work (and disable RPM). |
1573 | * we can do is to hope that things will still work (and disable RPM). |
1573 | */ |
1574 | */ |
1574 | i915_gem_init_swizzling(dev); |
1575 | i915_gem_init_swizzling(dev); |
1575 | gen6_update_ring_freq(dev); |
1576 | gen6_update_ring_freq(dev); |
1576 | 1577 | ||
1577 | intel_runtime_pm_enable_interrupts(dev_priv); |
1578 | intel_runtime_pm_enable_interrupts(dev_priv); |
1578 | 1579 | ||
1579 | /* |
1580 | /* |
1580 | * On VLV/CHV display interrupts are part of the display |
1581 | * On VLV/CHV display interrupts are part of the display |
1581 | * power well, so hpd is reinitialized from there. For |
1582 | * power well, so hpd is reinitialized from there. For |
1582 | * everyone else do it here. |
1583 | * everyone else do it here. |
1583 | */ |
1584 | */ |
1584 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
1585 | if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
1585 | intel_hpd_init(dev_priv); |
1586 | intel_hpd_init(dev_priv); |
1586 | 1587 | ||
1587 | intel_enable_gt_powersave(dev); |
1588 | intel_enable_gt_powersave(dev); |
1588 | 1589 | ||
1589 | enable_rpm_wakeref_asserts(dev_priv); |
1590 | enable_rpm_wakeref_asserts(dev_priv); |
1590 | 1591 | ||
1591 | if (ret) |
1592 | if (ret) |
1592 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
1593 | DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); |
1593 | else |
1594 | else |
1594 | DRM_DEBUG_KMS("Device resumed\n"); |
1595 | DRM_DEBUG_KMS("Device resumed\n"); |
1595 | 1596 | ||
1596 | return ret; |
1597 | return ret; |
1597 | } |
1598 | } |
1598 | 1599 | ||
1599 | /* |
1600 | /* |
1600 | * This function implements common functionality of runtime and system |
1601 | * This function implements common functionality of runtime and system |
1601 | * suspend sequence. |
1602 | * suspend sequence. |
1602 | */ |
1603 | */ |
1603 | static int intel_suspend_complete(struct drm_i915_private *dev_priv) |
1604 | static int intel_suspend_complete(struct drm_i915_private *dev_priv) |
1604 | { |
1605 | { |
1605 | int ret; |
1606 | int ret; |
1606 | 1607 | ||
1607 | if (IS_BROXTON(dev_priv)) |
1608 | if (IS_BROXTON(dev_priv)) |
1608 | ret = bxt_suspend_complete(dev_priv); |
1609 | ret = bxt_suspend_complete(dev_priv); |
1609 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1610 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1610 | ret = hsw_suspend_complete(dev_priv); |
1611 | ret = hsw_suspend_complete(dev_priv); |
1611 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1612 | else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
1612 | ret = vlv_suspend_complete(dev_priv); |
1613 | ret = vlv_suspend_complete(dev_priv); |
1613 | else |
1614 | else |
1614 | ret = 0; |
1615 | ret = 0; |
1615 | 1616 | ||
1616 | return ret; |
1617 | return ret; |
1617 | } |
1618 | } |
1618 | 1619 | ||
1619 | static const struct dev_pm_ops i915_pm_ops = { |
1620 | static const struct dev_pm_ops i915_pm_ops = { |
1620 | /* |
1621 | /* |
1621 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, |
1622 | * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, |
1622 | * PMSG_RESUME] |
1623 | * PMSG_RESUME] |
1623 | */ |
1624 | */ |
1624 | .suspend = i915_pm_suspend, |
1625 | .suspend = i915_pm_suspend, |
1625 | .suspend_late = i915_pm_suspend_late, |
1626 | .suspend_late = i915_pm_suspend_late, |
1626 | .resume_early = i915_pm_resume_early, |
1627 | .resume_early = i915_pm_resume_early, |
1627 | .resume = i915_pm_resume, |
1628 | .resume = i915_pm_resume, |
1628 | 1629 | ||
1629 | /* |
1630 | /* |
1630 | * S4 event handlers |
1631 | * S4 event handlers |
1631 | * @freeze, @freeze_late : called (1) before creating the |
1632 | * @freeze, @freeze_late : called (1) before creating the |
1632 | * hibernation image [PMSG_FREEZE] and |
1633 | * hibernation image [PMSG_FREEZE] and |
1633 | * (2) after rebooting, before restoring |
1634 | * (2) after rebooting, before restoring |
1634 | * the image [PMSG_QUIESCE] |
1635 | * the image [PMSG_QUIESCE] |
1635 | * @thaw, @thaw_early : called (1) after creating the hibernation |
1636 | * @thaw, @thaw_early : called (1) after creating the hibernation |
1636 | * image, before writing it [PMSG_THAW] |
1637 | * image, before writing it [PMSG_THAW] |
1637 | * and (2) after failing to create or |
1638 | * and (2) after failing to create or |
1638 | * restore the image [PMSG_RECOVER] |
1639 | * restore the image [PMSG_RECOVER] |
1639 | * @poweroff, @poweroff_late: called after writing the hibernation |
1640 | * @poweroff, @poweroff_late: called after writing the hibernation |
1640 | * image, before rebooting [PMSG_HIBERNATE] |
1641 | * image, before rebooting [PMSG_HIBERNATE] |
1641 | * @restore, @restore_early : called after rebooting and restoring the |
1642 | * @restore, @restore_early : called after rebooting and restoring the |
1642 | * hibernation image [PMSG_RESTORE] |
1643 | * hibernation image [PMSG_RESTORE] |
1643 | */ |
1644 | */ |
1644 | .freeze = i915_pm_suspend, |
1645 | .freeze = i915_pm_suspend, |
1645 | .freeze_late = i915_pm_suspend_late, |
1646 | .freeze_late = i915_pm_suspend_late, |
1646 | .thaw_early = i915_pm_resume_early, |
1647 | .thaw_early = i915_pm_resume_early, |
1647 | .thaw = i915_pm_resume, |
1648 | .thaw = i915_pm_resume, |
1648 | .poweroff = i915_pm_suspend, |
1649 | .poweroff = i915_pm_suspend, |
1649 | .poweroff_late = i915_pm_poweroff_late, |
1650 | .poweroff_late = i915_pm_poweroff_late, |
1650 | .restore_early = i915_pm_resume_early, |
1651 | .restore_early = i915_pm_resume_early, |
1651 | .restore = i915_pm_resume, |
1652 | .restore = i915_pm_resume, |
1652 | 1653 | ||
1653 | /* S0ix (via runtime suspend) event handlers */ |
1654 | /* S0ix (via runtime suspend) event handlers */ |
1654 | .runtime_suspend = intel_runtime_suspend, |
1655 | .runtime_suspend = intel_runtime_suspend, |
1655 | .runtime_resume = intel_runtime_resume, |
1656 | .runtime_resume = intel_runtime_resume, |
1656 | }; |
1657 | }; |
1657 | 1658 | ||
1658 | static const struct vm_operations_struct i915_gem_vm_ops = { |
1659 | static const struct vm_operations_struct i915_gem_vm_ops = { |
1659 | .fault = i915_gem_fault, |
1660 | .fault = i915_gem_fault, |
1660 | .open = drm_gem_vm_open, |
1661 | .open = drm_gem_vm_open, |
1661 | .close = drm_gem_vm_close, |
1662 | .close = drm_gem_vm_close, |
1662 | }; |
1663 | }; |
1663 | 1664 | ||
1664 | static const struct file_operations i915_driver_fops = { |
1665 | static const struct file_operations i915_driver_fops = { |
1665 | .owner = THIS_MODULE, |
1666 | .owner = THIS_MODULE, |
1666 | .open = drm_open, |
1667 | .open = drm_open, |
1667 | .release = drm_release, |
1668 | .release = drm_release, |
1668 | .unlocked_ioctl = drm_ioctl, |
1669 | .unlocked_ioctl = drm_ioctl, |
1669 | .mmap = drm_gem_mmap, |
1670 | .mmap = drm_gem_mmap, |
1670 | .poll = drm_poll, |
1671 | .poll = drm_poll, |
1671 | .read = drm_read, |
1672 | .read = drm_read, |
1672 | #ifdef CONFIG_COMPAT |
1673 | #ifdef CONFIG_COMPAT |
1673 | .compat_ioctl = i915_compat_ioctl, |
1674 | .compat_ioctl = i915_compat_ioctl, |
1674 | #endif |
1675 | #endif |
1675 | .llseek = noop_llseek, |
1676 | .llseek = noop_llseek, |
1676 | }; |
1677 | }; |
1677 | #endif |
1678 | #endif |
1678 | 1679 | ||
1679 | static struct drm_driver driver = { |
1680 | static struct drm_driver driver = { |
1680 | /* Don't use MTRRs here; the Xserver or userspace app should |
1681 | /* Don't use MTRRs here; the Xserver or userspace app should |
1681 | * deal with them for Intel hardware. |
1682 | * deal with them for Intel hardware. |
1682 | */ |
1683 | */ |
1683 | .driver_features = |
1684 | .driver_features = |
1684 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
1685 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | |
1685 | DRIVER_RENDER | DRIVER_MODESET, |
1686 | DRIVER_RENDER | DRIVER_MODESET, |
1686 | .load = i915_driver_load, |
1687 | .load = i915_driver_load, |
1687 | // .unload = i915_driver_unload, |
1688 | // .unload = i915_driver_unload, |
1688 | .open = i915_driver_open, |
1689 | .open = i915_driver_open, |
1689 | // .lastclose = i915_driver_lastclose, |
1690 | // .lastclose = i915_driver_lastclose, |
1690 | // .preclose = i915_driver_preclose, |
1691 | // .preclose = i915_driver_preclose, |
1691 | // .postclose = i915_driver_postclose, |
1692 | // .postclose = i915_driver_postclose, |
1692 | // .set_busid = drm_pci_set_busid, |
1693 | // .set_busid = drm_pci_set_busid, |
1693 | 1694 | ||
1694 | #if defined(CONFIG_DEBUG_FS) |
1695 | #if defined(CONFIG_DEBUG_FS) |
1695 | .debugfs_init = i915_debugfs_init, |
1696 | .debugfs_init = i915_debugfs_init, |
1696 | .debugfs_cleanup = i915_debugfs_cleanup, |
1697 | .debugfs_cleanup = i915_debugfs_cleanup, |
1697 | #endif |
1698 | #endif |
1698 | .gem_free_object = i915_gem_free_object, |
1699 | .gem_free_object = i915_gem_free_object, |
1699 | 1700 | ||
1700 | // .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
1701 | // .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
1701 | // .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
1702 | // .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
1702 | // .gem_prime_export = i915_gem_prime_export, |
1703 | // .gem_prime_export = i915_gem_prime_export, |
1703 | // .gem_prime_import = i915_gem_prime_import, |
1704 | // .gem_prime_import = i915_gem_prime_import, |
1704 | 1705 | ||
1705 | // .dumb_create = i915_gem_dumb_create, |
1706 | // .dumb_create = i915_gem_dumb_create, |
1706 | // .dumb_map_offset = i915_gem_mmap_gtt, |
1707 | // .dumb_map_offset = i915_gem_mmap_gtt, |
1707 | // .dumb_destroy = i915_gem_dumb_destroy, |
1708 | // .dumb_destroy = i915_gem_dumb_destroy, |
1708 | // .ioctls = i915_ioctls, |
1709 | // .ioctls = i915_ioctls, |
1709 | // .fops = &i915_driver_fops, |
1710 | // .fops = &i915_driver_fops, |
1710 | // .name = DRIVER_NAME, |
1711 | // .name = DRIVER_NAME, |
1711 | // .desc = DRIVER_DESC, |
1712 | // .desc = DRIVER_DESC, |
1712 | // .date = DRIVER_DATE, |
1713 | // .date = DRIVER_DATE, |
1713 | // .major = DRIVER_MAJOR, |
1714 | // .major = DRIVER_MAJOR, |
1714 | // .minor = DRIVER_MINOR, |
1715 | // .minor = DRIVER_MINOR, |
1715 | // .patchlevel = DRIVER_PATCHLEVEL, |
1716 | // .patchlevel = DRIVER_PATCHLEVEL, |
1716 | }; |
1717 | }; |
1717 | 1718 | ||
1718 | 1719 | ||
1719 | 1720 | ||
1720 | 1721 | ||
1721 | int i915_init(void) |
1722 | int i915_init(void) |
1722 | { |
1723 | { |
1723 | static pci_dev_t device; |
1724 | static pci_dev_t device; |
1724 | const struct pci_device_id *ent; |
1725 | const struct pci_device_id *ent; |
1725 | int err; |
1726 | int err; |
1726 | 1727 | ||
1727 | ent = find_pci_device(&device, pciidlist); |
1728 | ent = find_pci_device(&device, pciidlist); |
1728 | if( unlikely(ent == NULL) ) |
1729 | if( unlikely(ent == NULL) ) |
1729 | { |
1730 | { |
1730 | dbgprintf("device not found\n"); |
1731 | dbgprintf("device not found\n"); |
1731 | return -ENODEV; |
1732 | return -ENODEV; |
1732 | }; |
1733 | }; |
1733 | 1734 | ||
1734 | drm_core_init(); |
1735 | drm_core_init(); |
1735 | 1736 | ||
1736 | DRM_INFO("device %x:%x\n", device.pci_dev.vendor, |
1737 | DRM_INFO("device %x:%x\n", device.pci_dev.vendor, |
1737 | device.pci_dev.device); |
1738 | device.pci_dev.device); |
1738 | 1739 | ||
1739 | driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC; |
1740 | driver.driver_features |= DRIVER_MODESET+DRIVER_ATOMIC; |
1740 | 1741 | ||
1741 | err = drm_get_pci_dev(&device.pci_dev, ent, &driver); |
1742 | err = drm_get_pci_dev(&device.pci_dev, ent, &driver); |
1742 | 1743 | ||
1743 | return err; |
1744 | return err; |
1744 | } |
1745 | } |
1745 | 1746 | ||
1746 | 1747 | ||
1747 | MODULE_AUTHOR("Tungsten Graphics, Inc."); |
1748 | MODULE_AUTHOR("Tungsten Graphics, Inc."); |
1748 | MODULE_AUTHOR("Intel Corporation"); |
1749 | MODULE_AUTHOR("Intel Corporation"); |
1749 | 1750 | ||
1750 | MODULE_DESCRIPTION(DRIVER_DESC); |
1751 | MODULE_DESCRIPTION(DRIVER_DESC); |
1751 | MODULE_LICENSE("GPL and additional rights");>>><>>>>>>><> |
1752 | MODULE_LICENSE("GPL and additional rights");>>><>>>>>>><> |